filename
stringlengths 3
9
| code
stringlengths 4
1.87M
|
---|---|
475929.c | /*
* Automatically Tuned Linear Algebra Software v3.10.3
* (C) Copyright 2001 R. Clint Whaley
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions, and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the ATLAS group or the names of its contributers may
* not be used to endorse or promote products derived from this
* software without specific written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "atlas_misc.h"
#include "atlas_prefetch.h"
#ifdef MB
#if (MB/4)*4 != MB
#error "MB must be multiple of 4!!"
#endif
#endif
#ifdef NB
#if (NB/4)*4 != NB
#error "NB must be multiple of 4!!"
#endif
#endif
#ifdef KB
#if (KB/8)*8 != KB || KB == 0
#error "KB must be multiple of 8!!"
#endif
#else
#error "KB must be a compile time constant!!"
#endif
void ATL_USERMM
(const int M, const int N, const int K, const TYPE alpha, const TYPE *A, const int lda, const TYPE *B, const int ldb, const TYPE beta, TYPE *C, const int ldc)
/*
* matmul with TA=T, TB=N, lda=KB, ldb=KB, ldc=0, mu=4, nu=4, ku=8
*/
{
const TYPE *stM = A + KB*M;
const TYPE *stN = B + KB*N;
const int incAm = KB3+8, incAn = -KB*M;
const int incBm = 8-KB;
#define incBn KB4
const int incCn = ((ldc<<2) - M)SHIFT;
const int Kstart=(KB>>3)-1;
TYPE *pC0=C, *pC1=pC0+(ldc SHIFT), *pC2=pC1+(ldc SHIFT),
*pC3=pC2+(ldc SHIFT);
const TYPE *pA0=A;
const TYPE *pB0=B;
register int k;
register TYPE rA0, rA1, rA2, rA3;
register TYPE rB0, rB1, rB2, rB3;
register TYPE m0, m1, m2, m3;
register TYPE rC0_0, rC1_0, rC2_0, rC3_0, rC0_1, rC1_1, rC2_1, rC3_1,
rC0_2, rC1_2, rC2_2, rC3_2, rC0_3, rC1_3, rC2_3, rC3_3;
do /* N-loop */
{
ATL_pfl1R(pB0); ATL_pfl1R(pB0+KB); ATL_pfl1R(pB0+KB2); ATL_pfl1R(pB0+KB3);
ATL_pfl1R(pB0+8); ATL_pfl1R(pB0+KB+8); ATL_pfl1R(pB0+KB2+8); ATL_pfl1R(pB0+KB3+8);
do /* M-loop */
{
#ifdef BETA0
rC0_0 = rC1_0 = rC2_0 = rC3_0 =
rC0_1 = rC1_1 = rC2_1 = rC3_1 =
rC0_2 = rC1_2 = rC2_2 = rC3_2 =
rC0_3 = rC1_3 = rC2_3 = rC3_3 = ATL_rzero;
/* ATL_pfl1R(pB0+8); ATL_pfl1R(pB0+KB+8); ATL_pfl1R(pB0+KB2+8); ATL_pfl1R(pB0+KB3+8); */
#else
#ifdef TREAL
rC0_0 = *pC0; rC0_1 = *pC1; rC0_2 = *pC2; rC0_3 = *pC3;
rC1_0 = pC0[1]; rC1_1 = pC1[1]; rC1_2 = pC2[1]; rC1_3 = pC3[1];
rC2_0 = pC0[2]; rC2_1 = pC1[2]; rC2_2 = pC2[2]; rC2_3 = pC3[2];
rC3_0 = pC0[3]; rC3_1 = pC1[3]; rC3_2 = pC2[3]; rC3_3 = pC3[3];
#else
rC0_0 = *pC0; rC0_1 = *pC1; rC0_2 = *pC2; rC0_3 = *pC3;
rC1_0 = pC0[2]; rC1_1 = pC1[2]; rC1_2 = pC2[2]; rC1_3 = pC3[2];
rC2_0 = pC0[4]; rC2_1 = pC1[4]; rC2_2 = pC2[4]; rC2_3 = pC3[4];
rC3_0 = pC0[6]; rC3_1 = pC1[6]; rC3_2 = pC2[6]; rC3_3 = pC3[6];
#endif
#ifdef BETAX
rB3 = beta;
rC0_0 *= rB3; rC0_1 *= rB3; rC0_2 *= rB3; rC0_3 *= rB3;
/* ATL_pfl1R(pB0+8); */
rC1_0 *= rB3; rC1_1 *= rB3; rC1_2 *= rB3; rC1_3 *= rB3;
/* ATL_pfl1R(pB0+KB+8); */
rC2_0 *= rB3; rC2_1 *= rB3; rC2_2 *= rB3; rC2_3 *= rB3;
/* ATL_pfl1R(pB0+KB2+8); */
rC3_0 *= rB3; rC3_1 *= rB3; rC3_2 *= rB3; rC3_3 *= rB3;
/* ATL_pfl1R(pB0+KB3+8); */
#else
/* ATL_pfl1R(pB0+8); ATL_pfl1R(pB0+KB+8); ATL_pfl1R(pB0+KB2+8); ATL_pfl1R(pB0+KB3+8); */
#endif
#endif
/*
* Start pipeline
*/
rA0 = *pA0; rB0 = *pB0;
rA1 = pA0[KB]; rA2 = pA0[KB2];
m0 = rA0 * rB0; rA3 = pA0[KB3];
m1 = rA1 * rB0; rB1 = pB0[KB];
m2 = rA2 * rB0; rB2 = pB0[KB2];
m3 = rA3 * rB0; rB3 = pB0[KB3];
for (k=Kstart; k; k--)
{
rC0_0 += m0; m0 = rA0 * rB1; rB0 = pB0[1];
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+1];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB4);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+1];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[1];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+1];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+1];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+1];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+1];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[2];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+2];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB5);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+2];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[2];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+2];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+2];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+2];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+2];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[3];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+3];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB6);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+3];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[3];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+3];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+3];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+3];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+3];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[4];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+4];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB7);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+4];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[4];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+4];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+4];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+4];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+4];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[5];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+5];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0+16);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+5];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[5];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+5];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+5];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+5];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+5];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[6];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+6];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0+KB+16);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+6];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[6];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+6];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+6];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+6];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+6];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[7];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+7];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0+KB2+16);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+7];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[7];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+7];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+7];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+7];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+7];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[8];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+8];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0+KB3+16);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+8];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[8];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+8];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+8];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+8];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+8];
rC1_3 += m1; m1 = rA1 * rB0; pA0 += 8;
rC2_3 += m2; m2 = rA2 * rB0; pB0 += 8;
rC3_3 += m3; m3 = rA3 * rB0;
}
rC0_0 += m0; m0 = rA0 * rB1; rB0 = pB0[1];
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+1];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB4);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+1];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[1];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+1];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+1];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+1];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+1];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[2];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+2];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB5);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+2];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[2];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+2];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+2];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+2];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+2];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[3];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+3];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB6);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+3];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[3];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+3];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+3];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+3];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+3];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[4];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+4];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pA0+KB7);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+4];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[4];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+4];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+4];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+4];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+4];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[5];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+5];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0-KB+8);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+5];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[5];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+5];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+5];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+5];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+5];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[6];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+6];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0+8);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+6];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[6];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+6];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+6];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+6];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+6];
rC1_3 += m1; m1 = rA1 * rB0;
rC2_3 += m2; m2 = rA2 * rB0;
rC3_3 += m3; m3 = rA3 * rB0; rB0 = pB0[7];
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1;
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; rB1 = pB0[KB+7];
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1R(pB0+KB+8);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; rB2 = pB0[KB2+7];
rC0_2 += m0; m0 = rA0 * rB3; rA0 = pA0[7];
rC1_2 += m1; m1 = rA1 * rB3; rA1 = pA0[KB+7];
rC2_2 += m2; m2 = rA2 * rB3; rA2 = pA0[KB2+7];
rC3_2 += m3; m3 = rA3 * rB3; rA3 = pA0[KB3+7];
rC0_3 += m0; m0 = rA0 * rB0; rB3 = pB0[KB3+7];
rC1_3 += m1; m1 = rA1 * rB0; pB0 += incBm;
rC2_3 += m2; m2 = rA2 * rB0; pA0 += incAm;
rC3_3 += m3; m3 = rA3 * rB0; ATL_pfl1W(pC0);
/*
* Drain pipe on last iteration of K-loop
*/
rC0_0 += m0; m0 = rA0 * rB1;
rC1_0 += m1; m1 = rA1 * rB1; ATL_pfl1W(pC1);
rC2_0 += m2; m2 = rA2 * rB1;
rC3_0 += m3; m3 = rA3 * rB1; ATL_pfl1W(pC2);
rC0_1 += m0; m0 = rA0 * rB2;
rC1_1 += m1; m1 = rA1 * rB2; ATL_pfl1W(pC3);
rC2_1 += m2; m2 = rA2 * rB2;
rC3_1 += m3; m3 = rA3 * rB2; ATL_pfl1R(pC0+4);
rC0_2 += m0; m0 = rA0 * rB3;
rC1_2 += m1; m1 = rA1 * rB3; ATL_pfl1R(pC1+4);
rC2_2 += m2; m2 = rA2 * rB3;
rC3_2 += m3; m3 = rA3 * rB3; ATL_pfl1R(pC2+4);
rC0_3 += m0;
rC1_3 += m1; ATL_pfl1R(pC3+4);
rC2_3 += m2;
rC3_3 += m3; ATL_pfl1R(pB0+KB3+8);
#ifdef TREAL
*pC0 = rC0_0; pC0[1] = rC1_0; pC0[2] = rC2_0; pC0[3] = rC3_0; pC0 += 4;
*pC1 = rC0_1; pC1[1] = rC1_1; pC1[2] = rC2_1; pC1[3] = rC3_1; pC1 += 4;
*pC2 = rC0_2; pC2[1] = rC1_2; pC2[2] = rC2_2; pC2[3] = rC3_2; pC2 += 4;
*pC3 = rC0_3; pC3[1] = rC1_3; pC3[2] = rC2_3; pC3[3] = rC3_3; pC3 += 4;
#else
*pC0 = rC0_0; pC0[2] = rC1_0; pC0[4] = rC2_0; pC0[6] = rC3_0; pC0 += 8;
*pC1 = rC0_1; pC1[2] = rC1_1; pC1[4] = rC2_1; pC1[6] = rC3_1; pC1 += 8;
*pC2 = rC0_2; pC2[2] = rC1_2; pC2[4] = rC2_2; pC2[6] = rC3_2; pC2 += 8;
*pC3 = rC0_3; pC3[2] = rC1_3; pC3[4] = rC2_3; pC3[6] = rC3_3; pC3 += 8;
#endif
}
while(pA0 != stM);
pC0 += incCn; pC1 += incCn; pC2 += incCn; pC3 += incCn;
pA0 += incAn; pB0 += incBn;
}
while(pB0 != stN);
}
#undef incBn
|
968255.c | inherit ROOM;
void create()
{
set("short", "寒山寺");
set("long", @LONG
你走出西门约三十里地处,就到了枫桥镇,在唐元和年间,
有高僧寒山子曾来此居住,后希迁禅师在此建寺院时,更名寒
山寺。唐开元年间,诗人张继赴京应试落第,返途舟船停靠枫
桥,赋诗一首:“月落乌啼霜满天,江枫渔火对愁眠,姑苏城
外寒山寺,夜半歌声到客船”。从此,诗韵钟声,脍炙人口,
寺因诗而名扬天下。
LONG );
set("no_clean_up", 0);
set("exits", ([
"south" : __DIR__"road4",
"enter" : __DIR__"zhengdian",
]));
setup();
replace_program(ROOM);
}
|
204559.c | #include "nemu.h"
#include "cpu/register.h"
static void nemu_init(int argc, char **argv, NEMU *nes) {
cpu_define_inst();
cpubus_init(&nes->cpubus, &nes->ram, &nes->ppu, &nes->cpu.apu, nes->cassette, &nes->pad);
cpu_init(&nes->cpu, &nes->cpubus);
ppubus_init(&nes->ppubus, nes->cassette);
ppu_init(&nes->ppu, &nes->ppubus);
gui_init(&nes->gui);
joypad_init(&nes->pad);
audio_init(argc, argv, &nes->audio);
apu_init(&nes->cpu.apu);
}
void nemu_close(NEMU *nes) {
gui_close(&nes->gui);
audio_close(&nes->audio);
}
void nemu_boot(int argc, char **argv, NEMU *nes, Cassette *cas) {
nes->cassette = cas;
nemu_init(argc, argv, nes);
cpu_reset(&nes->cpu);
for(;;) {
if(!request_frame(&nes->gui)) // 60 FPS
break;
int pcycle = 0;
/* draw 1frame */
while(pcycle < 262 * 341) {
int ccycle = cpu_step(&nes->cpu);
if(nes->ppu.dma_write_flag) {
ccycle += 513;
nes->ppu.dma_write_flag = 0;
}
pcycle += ccycle * 3;
int nmi = ppu_step(&nes->ppu, nes->screen, ccycle * 3);
if(nmi) cpu_interrupt(&nes->cpu, NMI);
apu_step(&nes->cpu.apu, &nes->audio, ccycle);
if(nes->cpu.apu.irq) cpu_interrupt(&nes->cpu, IRQ);
}
gui_render(nes->screen);
audio_update(&nes->audio);
}
}
|
642075.c | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE191_Integer_Underflow__char_fscanf_sub_08.c
Label Definition File: CWE191_Integer_Underflow.label.xml
Template File: sources-sinks-08.tmpl.c
*/
/*
* @description
* CWE: 191 Integer Underflow
* BadSource: fscanf Read data from the console using fscanf()
* GoodSource: Set data to a small, non-zero number (negative two)
* Sinks: sub
* GoodSink: Ensure there will not be an underflow before subtracting 1 from data
* BadSink : Subtract 1 from data, which can cause an Underflow
* Flow Variant: 08 Control flow: if(staticReturnsTrue()) and if(staticReturnsFalse())
*
* */
#include "std_testcase.h"
/* The two function below always return the same value, so a tool
should be able to identify that calls to the functions will always
return a fixed value. */
static int staticReturnsTrue()
{
return 1;
}
static int staticReturnsFalse()
{
return 0;
}
#ifndef OMITBAD
void CWE191_Integer_Underflow__char_fscanf_sub_08_bad()
{
char data;
data = ' ';
if(staticReturnsTrue())
{
/* POTENTIAL FLAW: Use a value input from the console */
fscanf (stdin, "%c", &data);
}
if(staticReturnsTrue())
{
{
/* POTENTIAL FLAW: Subtracting 1 from data could cause an underflow */
char result = data - 1;
printHexCharLine(result);
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodB2G1() - use badsource and goodsink by changing the second staticReturnsTrue() to staticReturnsFalse() */
static void goodB2G1()
{
char data;
data = ' ';
if(staticReturnsTrue())
{
/* POTENTIAL FLAW: Use a value input from the console */
fscanf (stdin, "%c", &data);
}
if(staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
/* FIX: Add a check to prevent an underflow from occurring */
if (data > CHAR_MIN)
{
char result = data - 1;
printHexCharLine(result);
}
else
{
printLine("data value is too large to perform subtraction.");
}
}
}
/* goodB2G2() - use badsource and goodsink by reversing the blocks in the second if */
static void goodB2G2()
{
char data;
data = ' ';
if(staticReturnsTrue())
{
/* POTENTIAL FLAW: Use a value input from the console */
fscanf (stdin, "%c", &data);
}
if(staticReturnsTrue())
{
/* FIX: Add a check to prevent an underflow from occurring */
if (data > CHAR_MIN)
{
char result = data - 1;
printHexCharLine(result);
}
else
{
printLine("data value is too large to perform subtraction.");
}
}
}
/* goodG2B1() - use goodsource and badsink by changing the first staticReturnsTrue() to staticReturnsFalse() */
static void goodG2B1()
{
char data;
data = ' ';
if(staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
/* FIX: Use a small, non-zero value that will not cause an underflow in the sinks */
data = -2;
}
if(staticReturnsTrue())
{
{
/* POTENTIAL FLAW: Subtracting 1 from data could cause an underflow */
char result = data - 1;
printHexCharLine(result);
}
}
}
/* goodG2B2() - use goodsource and badsink by reversing the blocks in the first if */
static void goodG2B2()
{
char data;
data = ' ';
if(staticReturnsTrue())
{
/* FIX: Use a small, non-zero value that will not cause an underflow in the sinks */
data = -2;
}
if(staticReturnsTrue())
{
{
/* POTENTIAL FLAW: Subtracting 1 from data could cause an underflow */
char result = data - 1;
printHexCharLine(result);
}
}
}
void CWE191_Integer_Underflow__char_fscanf_sub_08_good()
{
goodB2G1();
goodB2G2();
goodG2B1();
goodG2B2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE191_Integer_Underflow__char_fscanf_sub_08_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE191_Integer_Underflow__char_fscanf_sub_08_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
976091.c | /*!
* @file main.c
* @brief 02. Variables y Tipos de Datos - 02. Ingreso por consola
* @author Javier Balloffet <[email protected]>
* @date Sep 7, 2018
*/
#include <stdio.h>
int main(void)
{
// Puedo declarar varias variables en una misma línea.
char char_value;
int int_value1, int_value2;
float float_value;
double double_value;
// Ingreso los valores de las variables por consola.
printf("Ingrese el valor a almacenar en char_value\n");
scanf("%c", &char_value);
printf("Ingrese el valor a almacenar en int_value1\n");
scanf("%d", &int_value1);
printf("Ingrese el valor a almacenar en int_value2\n");
scanf("%d", &int_value2);
printf("Ingrese el valor a almacenar en myDecimalVariable\n");
scanf("%f", &float_value);
printf("Ingrese el valor a almacenar en double_value\n");
scanf("%lf", &double_value);
// Imprimo el contenido de las variables en pantalla.
printf("El contenido de char_value es %c\n", char_value);
printf("El contenido de int_value1 es %d\n", int_value1);
printf("El contenido de int_value2 es %d\n", int_value2);
printf("El contenido de float_value es %f\n", float_value);
printf("El contenido de double_value es %lf\n", double_value);
return 0;
}
|
665733.c | /* Created RJudd September 30, 2000 */
/* SPAWARSYSCEN D857 */
/**********************************************************************
// For TASP VSIPL Documentation and Code neither the United States /
// Government, the United States Navy, nor any of their employees, /
// makes any warranty, express or implied, including the warranties /
// of merchantability and fitness for a particular purpose, or /
// assumes any legal liability or responsibility for the accuracy, /
// completeness, or usefulness of any information, apparatus, /
// product, or process disclosed, or represents that its use would /
// not infringe privately owned rights /
**********************************************************************/
/* $Id: vsip_vputstride_mi.c,v 2.0 2003/02/22 15:19:17 judd Exp $ */
#include<vsip.h>
#include<vsip_vviewattributes_mi.h>
vsip_vview_mi* (vsip_vputstride_mi)(
vsip_vview_mi* v,
vsip_stride s) {
v->stride = s;
return v;
}
|
425561.c | /*
* Copyright (c) 2017 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <toolchain.h>
#include <sections.h>
#include <string.h>
#include <errno.h>
#include <shell/shell.h>
#include <misc/printk.h>
#include <net/net_core.h>
#include <net/net_l2.h>
#include <net/net_if.h>
#include <net/bt.h>
#include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h>
#define BT_SHELL_MODULE "net_bt"
static int char2hex(const char *c, uint8_t *x)
{
if (*c >= '0' && *c <= '9') {
*x = *c - '0';
} else if (*c >= 'a' && *c <= 'f') {
*x = *c - 'a' + 10;
} else if (*c >= 'A' && *c <= 'F') {
*x = *c - 'A' + 10;
} else {
return -EINVAL;
}
return 0;
}
static int str2bt_addr_le(const char *str, const char *type, bt_addr_le_t *addr)
{
int i, j;
uint8_t tmp;
if (strlen(str) != 17) {
return -EINVAL;
}
for (i = 5, j = 1; *str != '\0'; str++, j++) {
if (!(j % 3) && (*str != ':')) {
return -EINVAL;
} else if (*str == ':') {
i--;
continue;
}
addr->a.val[i] = addr->a.val[i] << 4;
if (char2hex(str, &tmp) < 0) {
return -EINVAL;
}
addr->a.val[i] |= tmp;
}
if (!strcmp(type, "public") || !strcmp(type, "(public)")) {
addr->type = BT_ADDR_LE_PUBLIC;
} else if (!strcmp(type, "random") || !strcmp(type, "(random)")) {
addr->type = BT_ADDR_LE_RANDOM;
} else {
return -EINVAL;
}
return 0;
}
static int shell_cmd_connect(int argc, char *argv[])
{
int err;
bt_addr_le_t addr;
struct net_if *iface = net_if_get_default();
if (argc < 3) {
return -EINVAL;
}
err = str2bt_addr_le(argv[1], argv[2], &addr);
if (err) {
printk("Invalid peer address (err %d)\n", err);
return 0;
}
if (net_mgmt(NET_REQUEST_BT_CONNECT, iface, &addr, sizeof(addr))) {
printk("Connection failed\n");
} else {
printk("Connection pending\n");
}
return 0;
}
static int shell_cmd_scan(int argc, char *argv[])
{
struct net_if *iface = net_if_get_default();
if (argc < 2) {
return -EINVAL;
}
if (net_mgmt(NET_REQUEST_BT_SCAN, iface, argv[1], strlen(argv[1]))) {
printk("Scan failed\n");
} else {
printk("Scan in progress\n");
}
return 0;
}
static int shell_cmd_disconnect(int argc, char *argv[])
{
struct net_if *iface = net_if_get_default();
if (net_mgmt(NET_REQUEST_BT_DISCONNECT, iface, NULL, 0)) {
printk("Disconnect failed\n");
} else {
printk("Disconnected\n");
}
return 0;
}
static struct shell_cmd bt_commands[] = {
{ "connect", shell_cmd_connect,
"<address: XX:XX:XX:XX:XX:XX> <type: (public|random)>" },
{ "scan", shell_cmd_scan, "<on/off/active/passive>" },
{ "disconnect", shell_cmd_disconnect, "" },
{ NULL, NULL, NULL },
};
SHELL_REGISTER(BT_SHELL_MODULE, bt_commands);
|
402227.c | /*
* corePKCS11 V3.0.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://aws.amazon.com/freertos
* http://www.FreeRTOS.org
*/
/**
* @file core_pkcs11_mbedtls.c
* @brief mbedTLS-based PKCS#11 implementation for software keys. This
* file deviates from the FreeRTOS style standard for some function names and
* data types in order to maintain compliance with the PKCS #11 standard.
*/
/* PKCS #11 includes. */
#include "core_pkcs11_config.h"
#include "core_pkcs11.h"
#include "core_pkcs11_pal.h"
#include "core_pki_utils.h"
/* mbedTLS includes. */
#include "mbedtls/pk.h"
#include "mbedtls/pk_internal.h"
#include "mbedtls/x509_crt.h"
#include "mbedtls/ctr_drbg.h"
#include "mbedtls/entropy.h"
#include "mbedtls/sha256.h"
#include "mbedtls/platform.h"
#include "mbedtls/threading.h"
/* Custom mbedtls utils include. */
#include "mbedtls_error.h"
/* C runtime includes. */
#include <string.h>
#define LogDebug(X)
#define LogError(X)
#define LogWarn(X)
#define LogInfo(X)
/*-----------------------------------------------------------*/
/**
* @defgroup pkcs11_macros PKCS #11 Implementation Macros
* @brief Macros for PKCS #11 software implementation.
*/
/**
* @defgroup pkcs11_datatypes PKCS #11 Datatypes
* @brief Internal datatypes for PKCS #11 software implementation.
*/
#ifndef DISABLE_LOGGING
/**
* @brief Represents string to be logged when mbedTLS returned error
* does not contain a high-level code.
*/
static const char * pNoHighLevelMbedTlsCodeStr = "<No-High-Level-Code>";
/**
* @brief Represents string to be logged when mbedTLS returned error
* does not contain a low-level code.
*/
static const char * pNoLowLevelMbedTlsCodeStr = "<No-Low-Level-Code>";
/**
* @brief Utility for converting the high-level code in an mbedTLS error to string,
* if the code-contains a high-level code; otherwise, using a default string.
*/
#define mbedtlsHighLevelCodeOrDefault( mbedTlsCode ) \
( mbedtls_strerror_highlevel( mbedTlsCode ) != NULL ) ? \
mbedtls_strerror_highlevel( mbedTlsCode ) : pNoHighLevelMbedTlsCodeStr
/**
* @brief Utility for converting the level-level code in an mbedTLS error to string,
* if the code-contains a level-level code; otherwise, using a default string.
*/
#define mbedtlsLowLevelCodeOrDefault( mbedTlsCode ) \
( mbedtls_strerror_lowlevel( mbedTlsCode ) != NULL ) ? \
mbedtls_strerror_lowlevel( mbedTlsCode ) : pNoLowLevelMbedTlsCodeStr
#endif /* ifndef DISABLE_LOGGING */
/**
* @ingroup pkcs11_macros
* @brief Delay to wait on acquiring a mutex, in ms.
*/
#define pkcs11MUTEX_WAIT_MS ( pdMS_TO_TICKS( 5000U ) )
/**
* @ingroup pkcs11_macros
* @brief Indicates that no PKCS #11 operation is underway for given session.
*/
#define pkcs11NO_OPERATION ( ( CK_MECHANISM_TYPE ) 0xFFFFFFFFUL )
/**
* @ingroup pkcs11_macros
* @brief size of a prime256v1 EC private key in bytes, when encoded in DER.
*/
#define pkcs11_PRIVATE_EC_PRIME_256_DER_SIZE 160
/**
* @ingroup pkcs11_macros
* @brief size of a prime256v1 EC public key in bytes, when encoded in DER.
*/
#define pkcs11_PUBLIC_EC_PRIME_256_DER_SIZE 100
/**
* @ingroup pkcs11_macros
* @brief size of a 2048 bit RSA public key in bytes, when encoded in DER.
*/
#define pkcs11_PUBLIC_RSA_2048_DER_SIZE 300
/**
* @ingroup pkcs11_macros
* @brief size of a 2048 bit RSA private key in bytes, in DER encoding.
*/
#define pkcs11_PRIVATE_RSA_2048_DER_SIZE 1200
/**
* @ingroup pkcs11_macros
* @brief Max size of an EC public key in bytes, in DER encoding.
*/
#define pkcs11_MAX_EC_PUBLIC_KEY_DER_SIZE pkcs11_PUBLIC_EC_PRIME_256_DER_SIZE
/**
* @ingroup pkcs11_macros
* @brief Max size of an EC private key in bytes, in DER encoding.
*/
#define pkcs11_MAX_EC_PRIVATE_KEY_DER_SIZE pkcs11_PRIVATE_EC_PRIME_256_DER_SIZE
/**
* @ingroup pkcs11_macros
* @brief Length of bytes to contain an EC point.
*
* This port currently only uses prime256v1, in which the fields are 32 bytes in
* length. The public EC point is as long as the curve's fields * 2 + 1.
* so the EC point for this port is (32 * 2) + 1 bytes in length.
*
* mbed TLS encodes the length of the point in the first byte of the buffer it
* receives, so an additional 1 byte in length is added to account for this.
*
* In addition to this, an additional 1 byte is added to store information
* indicating that the point is uncompressed.
*
* @note This length needs to be updated if using a different curve.
*
* To summarize:
* 32 points of 2 bytes each + 1 point length byte, 1 length byte, and
* 1 type (uncompressed) byte
*/
#define pkcs11EC_POINT_LENGTH ( ( 32UL * 2UL ) + 1UL + 1UL + 1UL )
/**
* @ingroup pkcs11_macros
* @brief Max size of a public key.
* This macro defines the size of a key in bytes, in DER encoding.
*
* @note The largest RSA public key is used because EC keys are smaller.
*/
#define pkcs11_MAX_PUBLIC_KEY_DER_SIZE pkcs11_PUBLIC_RSA_2048_DER_SIZE
/**
* @ingroup pkcs11_macros
* @brief Max key length of a key.
* This macro defines the size of a key in bytes, in DER format.
*
* Currently the largest key type supported by this port is a 2048 bit
* RSA private key.
*
* @note The largest RSA private key is used because EC keys are smaller and
* the RSA public key is smaller.
*/
#define pkcs11_MAX_PRIVATE_KEY_DER_SIZE pkcs11_PRIVATE_RSA_2048_DER_SIZE
/**
* @ingroup pkcs11_macros
* @brief The size of the buffer malloc'ed for the exported public key in C_GenerateKeyPair.
*/
#define pkcs11KEY_GEN_MAX_DER_SIZE 200
/**
* @ingroup pkcs11_macros
* @brief The slot ID to be returned by this PKCS #11 implementation.
*
* @note that this implementation does not have a concept of "slots" so this number is arbitrary.
*/
#define pkcs11SLOT_ID 1
/**
* @ingroup pkcs11_macros
* @brief Private defines for checking that attribute templates are complete.
*/
#define LABEL_IN_TEMPLATE ( 1U ) /**< Bit set for label in template. */
#define PRIVATE_IN_TEMPLATE ( 1U << 1 ) /**< Bit set for private key in in template. */
#define SIGN_IN_TEMPLATE ( 1U << 2 ) /**< Bit set for sign in template. */
#define EC_PARAMS_IN_TEMPLATE ( 1U << 3 ) /**< Bit set for EC params in template. */
#define VERIFY_IN_TEMPLATE ( 1U << 4 ) /**< Bit set for verify in template. */
/**
* @ingroup pkcs11_macros
* @brief Macro to signify an invalid PKCS #11 key type.
*/
#define PKCS11_INVALID_KEY_TYPE ( ( CK_KEY_TYPE ) 0xFFFFFFFFUL )
/**
* @ingroup pkcs11_datatypes
* @brief PKCS #11 object container.
*
* Maps a PKCS #11 object handle to it's label.
*/
typedef struct P11Object_t
{
CK_OBJECT_HANDLE xHandle; /**< @brief The "PAL Handle". */
CK_ULONG xLabelSize; /**< @brief Size of label. */
CK_BYTE xLabel[ pkcs11configMAX_LABEL_LENGTH + 1 ]; /**< @brief Plus 1 for the null terminator. */
} P11Object_t;
/**
* @ingroup pkcs11_datatypes
* @brief PKCS #11 object container list.
*
* This structure helps the core_pkcs11_mbedtls.c maintain a mapping of all objects in one place.
* Because some objects exist in device NVM and must be called by their "PAL Handles", and other
* objects do not have designated NVM storage locations, the ObjectList maintains a list
* of what object handles are available.
*/
typedef struct P11ObjectList_t
{
mbedtls_threading_mutex_t xMutex; /**< @brief Mutex that protects write operations to the xObjects array. */
P11Object_t xObjects[ pkcs11configMAX_NUM_OBJECTS ]; /**< @brief List of PKCS #11 objects. */
} P11ObjectList_t;
/**
* @ingroup pkcs11_datatypes
* @brief PKCS #11 Module Object
*/
typedef struct P11Struct_t
{
CK_BBOOL xIsInitialized; /**< @brief Indicates whether PKCS #11 module has been initialized with a call to C_Initialize. */
mbedtls_ctr_drbg_context xMbedDrbgCtx; /**< @brief CTR-DRBG context for PKCS #11 module - used to generate pseudo-random numbers. */
mbedtls_entropy_context xMbedEntropyContext; /**< @brief Entropy context for PKCS #11 module - used to collect entropy for RNG. */
mbedtls_threading_mutex_t xSessionMutex; /**< @brief Mutex that protects write operations to the pxSession array. */
P11ObjectList_t xObjectList; /**< @brief List of PKCS #11 objects that have been found/created since module initialization.
* The array position indicates the "App Handle" */
} P11Struct_t;
/**
* @ingroup pkcs11_datatypes
* @brief Session structure.
*/
typedef struct P11Session
{
CK_ULONG ulState; /**< @brief Stores the session flags. */
CK_BBOOL xOpened; /**< @brief Set to CK_TRUE upon opening PKCS #11 session. */
CK_MECHANISM_TYPE xOperationDigestMechanism; /**< @brief Indicates if a digest operation is in progress. */
CK_BYTE * pxFindObjectLabel; /**< @brief Pointer to the label for the search in progress. Should be NULL if no search in progress. */
CK_ULONG xFindObjectLabelLen; /**< @brief Size of current search label. */
CK_MECHANISM_TYPE xOperationVerifyMechanism; /**< @brief The mechanism of verify operation in progress. Set during C_VerifyInit. */
mbedtls_threading_mutex_t xVerifyMutex; /**< @brief Protects the verification key from being modified while in use. */
CK_OBJECT_HANDLE xVerifyKeyHandle; /**< @brief Object handle to the verification key. */
mbedtls_pk_context xVerifyKey; /**< @brief Verification key. Set during C_VerifyInit. */
CK_MECHANISM_TYPE xOperationSignMechanism; /**< @brief Mechanism of the sign operation in progress. Set during C_SignInit. */
mbedtls_threading_mutex_t xSignMutex; /**< @brief Protects the signing key from being modified while in use. */
CK_OBJECT_HANDLE xSignKeyHandle; /**< @brief Object handle to the signing key. */
mbedtls_pk_context xSignKey; /**< @brief Signing key. Set during C_SignInit. */
mbedtls_sha256_context xSHA256Context; /**< @brief Context for in progress digest operation. */
} P11Session_t;
/*-----------------------------------------------------------*/
/**
* @brief The global PKCS #11 module object.
* Entropy/randomness and object lists are shared across PKCS #11 sessions.
*/
static P11Struct_t xP11Context;
/**
* @brief The global PKCS #11 session list.
*/
static P11Session_t pxP11Sessions[ pkcs11configMAX_SESSIONS ] = { 0 };
/**
* @brief Helper to check if the current session is initialized and valid.
*/
static CK_RV prvCheckValidSessionAndModule( const P11Session_t * pxSession )
{
CK_RV xResult = CKR_OK;
/** MISRA Rule 10.5 - Cannot cast from unsigned to signed.
* The rule 10.5 is violated because type of the boolean macros defined by PKCS #11
* are 0 and 1, which results in a signed integer, meanwhile the underlying
* type of CK_BBOOL is an unsigned char.
*
* This means that our implementation conforms to the exception provided by MISRA
* To quote MISRA: "An integer constant expression with the value 0 or 1 of either signedness
* may be cast to a type which is defined as essentially Boolean.
* This allows the implementation of non-C99 Boolean models."
*/
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xP11Context.xIsInitialized == ( CK_BBOOL ) CK_FALSE )
{
LogDebug( ( "Could not get a valid session. PKCS #11 was not initialized since xP11Context.xIsInitialized was CK_FALSE." ) );
xResult = CKR_CRYPTOKI_NOT_INITIALIZED;
}
else if( pxSession == NULL )
{
LogDebug( ( "Could not get a valid session. PKCS #11 session handle pSession was NULL." ) );
xResult = CKR_SESSION_HANDLE_INVALID;
}
/* coverity[misra_c_2012_rule_10_5_violation] */
else if( pxSession->xOpened == ( CK_BBOOL ) CK_FALSE )
{
LogDebug( ( "Could not get a valid session. PKCS #11 session handle was not initialized with a previous call to C_OpenSession." ) );
xResult = CKR_SESSION_HANDLE_INVALID;
}
else
{
/* Session is initialized and valid. */
}
return xResult;
}
/**
* @brief Maps an opaque caller session handle into its internal state structure.
*/
static P11Session_t * prvSessionPointerFromHandle( CK_SESSION_HANDLE xSession )
{
P11Session_t * pxSession = NULL;
if( ( xSession >= 1UL ) && ( xSession <= pkcs11configMAX_SESSIONS ) )
{
/* Decrement by 1, invalid handles in PKCS #11 are defined to be 0. */
pxSession = &pxP11Sessions[ xSession - 1UL ];
}
else
{
LogDebug( ( "Could not convert from CK_SESSION_HANDLE to P11Session_t pointer. Session handle was out of the valid range. Session handle was: %lu.", ( unsigned long int ) xSession ) );
}
return pxSession;
}
/**
* @brief Determines if an operation is in progress.
*/
static CK_BBOOL prvOperationActive( const P11Session_t * pxSession )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xResult = ( CK_BBOOL ) CK_FALSE;
if( ( pxSession->xOperationDigestMechanism < pkcs11NO_OPERATION ) == CK_TRUE )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = ( CK_BBOOL ) CK_TRUE;
}
else if( ( pxSession->xOperationSignMechanism < pkcs11NO_OPERATION ) == CK_TRUE )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = ( CK_BBOOL ) CK_TRUE;
}
else if( ( pxSession->xOperationVerifyMechanism < pkcs11NO_OPERATION ) == CK_TRUE )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = ( CK_BBOOL ) CK_TRUE;
}
else if( pxSession->pxFindObjectLabel != NULL )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = ( CK_BBOOL ) CK_TRUE;
}
else
{
/* MISRA */
}
return xResult;
}
/**
* @brief Initialize mbedTLS.
*/
static CK_RV prvMbedTLS_Initialize( void )
{
CK_RV xResult = CKR_OK;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
int32_t lMbedTLSResult = 0;
( void ) memset( &xP11Context, 0, sizeof( xP11Context ) );
mbedtls_mutex_init( &xP11Context.xObjectList.xMutex );
mbedtls_mutex_init( &xP11Context.xSessionMutex );
/* Initialize the entropy source and DRBG for the PKCS#11 module */
mbedtls_entropy_init( &xP11Context.xMbedEntropyContext );
mbedtls_ctr_drbg_init( &xP11Context.xMbedDrbgCtx );
lMbedTLSResult = mbedtls_ctr_drbg_seed( &xP11Context.xMbedDrbgCtx,
mbedtls_entropy_func,
&xP11Context.xMbedEntropyContext,
NULL,
0 );
if( 0 != lMbedTLSResult )
{
LogError( ( "Could not initialize PKCS #11. Failed to seed the DRBG: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
else
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xP11Context.xIsInitialized = ( CK_BBOOL ) CK_TRUE;
LogDebug( ( "PKCS #11 module was successfully initialized." ) );
}
return xResult;
}
/**
* @brief Searches a template for the CKA_CLASS attribute.
*/
static CK_RV prvGetObjectClass( const CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount,
CK_OBJECT_CLASS * pxClass )
{
CK_RV xResult = CKR_TEMPLATE_INCOMPLETE;
CK_ULONG ulIndex = 0;
/* Search template for class attribute. */
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
if( ( pxTemplate[ ulIndex ].type == CKA_CLASS ) &&
( pxTemplate[ ulIndex ].ulValueLen == sizeof( CK_OBJECT_CLASS ) ) )
{
LogDebug( ( "Successfully found object class attribute." ) );
( void ) memcpy( pxClass, pxTemplate[ ulIndex ].pValue,
sizeof( CK_OBJECT_CLASS ) );
xResult = CKR_OK;
break;
}
}
return xResult;
}
/**
* @brief Parses attribute values for a certificate.
*/
static CK_RV prvCertAttParse( CK_ATTRIBUTE * pxAttribute,
CK_CERTIFICATE_TYPE * pxCertificateType,
CK_BYTE_PTR * ppxCertificateValue,
CK_ULONG * pxCertificateLength,
CK_ATTRIBUTE ** ppxLabel )
{
CK_RV xResult = CKR_OK;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
switch( pxAttribute->type )
{
case ( CKA_VALUE ):
*ppxCertificateValue = pxAttribute->pValue;
*pxCertificateLength = pxAttribute->ulValueLen;
break;
case ( CKA_LABEL ):
if( pxAttribute->ulValueLen <= pkcs11configMAX_LABEL_LENGTH )
{
*ppxLabel = pxAttribute;
}
else
{
LogError( ( "Failed parsing certificate template. Label length "
"was not in the valid range. Found %lu and expected %lu. "
"Consider updating pkcs11configMAX_LABEL_LENGTH.",
( unsigned long int ) pxAttribute->ulValueLen,
( unsigned long int ) pkcs11configMAX_LABEL_LENGTH ) );
xResult = CKR_DATA_LEN_RANGE;
}
break;
case ( CKA_CERTIFICATE_TYPE ):
if( pxAttribute->ulValueLen == sizeof( CK_CERTIFICATE_TYPE ) )
{
( void ) memcpy( pxCertificateType, pxAttribute->pValue, sizeof( CK_CERTIFICATE_TYPE ) );
}
if( *pxCertificateType != CKC_X_509 )
{
LogError( ( "Failed parsing certificate template. Certificate type was invalid. "
"Expected CKC_X_509, but found 0x%0lX.", ( unsigned long int ) *pxCertificateType ) );
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_TOKEN ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_CLASS ):
case ( CKA_SUBJECT ):
/* Do nothing. This was already parsed out of the template previously. */
break;
default:
LogError( ( "Failed parsing certificate template. Received an unknown "
"template type with value 0x%0lX.", ( unsigned long int ) pxAttribute->type ) );
xResult = CKR_ATTRIBUTE_TYPE_INVALID;
break;
}
return xResult;
}
/**
* @brief Parses attribute values for an RSA public Key.
*/
static CK_RV prvRsaPrivKeyAttParse( const CK_ATTRIBUTE * pxAttribute )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
CK_RV xResult = CKR_OK;
if( pxAttribute->type == CKA_SIGN )
{
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, pxAttribute->ulValueLen );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
LogError( ( "Failed to parse RSA private key. Expected sign permissions to be supported." ) );
}
}
return xResult;
}
/**
* @brief Parses attribute values for an RSA public Key.
*/
static CK_RV prvRsaPubKeyAttParse( const CK_ATTRIBUTE * pxAttribute )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
CK_RV xResult = CKR_OK;
if( pxAttribute->type == CKA_VERIFY )
{
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, pxAttribute->ulValueLen );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
LogError( ( "Failed to parse RSA public key. Expected verify permissions to be supported." ) );
}
}
return xResult;
}
/**
* @brief Parses attribute values for an RSA key an puts them in the mbed TLS context.
*/
static CK_RV prvRsaContextParse( const CK_ATTRIBUTE * pxAttribute,
mbedtls_rsa_context * pxRsaContext )
{
CK_RV xResult = CKR_OK;
int32_t lMbedTLSResult = 0;
switch( pxAttribute->type )
{
case ( CKA_MODULUS ):
lMbedTLSResult = mbedtls_rsa_import_raw( pxRsaContext,
pxAttribute->pValue, pxAttribute->ulValueLen, /* N */
NULL, 0, /* P */
NULL, 0, /* Q */
NULL, 0, /* D */
NULL, 0 ); /* E */
break;
case ( CKA_PUBLIC_EXPONENT ):
lMbedTLSResult = mbedtls_rsa_import_raw( pxRsaContext,
NULL, 0, /* N */
NULL, 0, /* P */
NULL, 0, /* Q */
NULL, 0, /* D */
pxAttribute->pValue, pxAttribute->ulValueLen ); /* E */
break;
case ( CKA_PRIME_1 ):
lMbedTLSResult = mbedtls_rsa_import_raw( pxRsaContext,
NULL, 0, /* N */
pxAttribute->pValue, pxAttribute->ulValueLen, /* P */
NULL, 0, /* Q */
NULL, 0, /* D */
NULL, 0 ); /* E */
break;
case ( CKA_PRIME_2 ):
lMbedTLSResult = mbedtls_rsa_import_raw( pxRsaContext,
NULL, 0, /* N */
NULL, 0, /* P */
pxAttribute->pValue, pxAttribute->ulValueLen, /* Q */
NULL, 0, /* D */
NULL, 0 ); /* E */
break;
case ( CKA_PRIVATE_EXPONENT ):
lMbedTLSResult = mbedtls_rsa_import_raw( pxRsaContext,
NULL, 0, /* N */
NULL, 0, /* P */
NULL, 0, /* Q */
pxAttribute->pValue, pxAttribute->ulValueLen, /* D */
NULL, 0 ); /* E */
break;
case ( CKA_EXPONENT_1 ):
lMbedTLSResult = mbedtls_mpi_read_binary( &pxRsaContext->DP, pxAttribute->pValue, pxAttribute->ulValueLen );
break;
case ( CKA_EXPONENT_2 ):
lMbedTLSResult = mbedtls_mpi_read_binary( &pxRsaContext->DQ, pxAttribute->pValue, pxAttribute->ulValueLen );
break;
case ( CKA_COEFFICIENT ):
lMbedTLSResult = mbedtls_mpi_read_binary( &pxRsaContext->QP, pxAttribute->pValue, pxAttribute->ulValueLen );
break;
default:
/* This should never be reached, as the above types are what gets this function called.
* Nevertheless this is an error case, and MISRA requires a default statement. */
xResult = CKR_ATTRIBUTE_TYPE_INVALID;
break;
}
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed to parse RSA private key template: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
return xResult;
}
/**
* @brief Parses attribute values for a RSA Key.
*/
static CK_RV prvRsaKeyAttParse( const CK_ATTRIBUTE * pxAttribute,
mbedtls_rsa_context * pxRsaContext,
CK_BBOOL xIsPrivate )
{
CK_RV xResult = CKR_OK;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
switch( pxAttribute->type )
{
case ( CKA_CLASS ):
case ( CKA_KEY_TYPE ):
case ( CKA_LABEL ):
/* Do nothing. These values were parsed previously. */
break;
case ( CKA_TOKEN ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_VERIFY ):
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_FALSE )
{
xResult = prvRsaPubKeyAttParse( pxAttribute );
}
else
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_SIGN ):
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
xResult = prvRsaPrivKeyAttParse( pxAttribute );
}
else
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_MODULUS ):
case ( CKA_PUBLIC_EXPONENT ):
case ( CKA_PRIME_1 ):
case ( CKA_PRIME_2 ):
case ( CKA_PRIVATE_EXPONENT ):
case ( CKA_EXPONENT_1 ):
case ( CKA_EXPONENT_2 ):
case ( CKA_COEFFICIENT ):
xResult = prvRsaContextParse( pxAttribute, pxRsaContext );
break;
default:
xResult = CKR_ATTRIBUTE_TYPE_INVALID;
break;
}
return xResult;
}
/**
* @brief Parses attribute values for a private EC Key.
*/
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
static CK_RV prvEcPrivKeyAttParse( const CK_ATTRIBUTE * pxAttribute,
const mbedtls_pk_context * pxMbedContext )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
int32_t lMbedTLSResult = 0;
CK_RV xResult = CKR_OK;
mbedtls_ecp_keypair * pxKeyPair = ( mbedtls_ecp_keypair * ) pxMbedContext->pk_ctx;
if( pxAttribute->type == CKA_SIGN )
{
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
LogError( ( "Failed to parse EC private key. Only private keys with signing privileges are supported." ) );
}
}
else
{
lMbedTLSResult = mbedtls_mpi_read_binary( &pxKeyPair->d,
pxAttribute->pValue,
pxAttribute->ulValueLen );
if( lMbedTLSResult != 0 )
{
xResult = CKR_FUNCTION_FAILED;
LogError( ( "Failed to parse EC private key. MPI read binary failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
}
}
return xResult;
}
#endif /* ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM */
/**
* @brief Parses attribute values for a public EC Key.
*/
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
static CK_RV prvEcPubKeyAttParse( const CK_ATTRIBUTE * pxAttribute,
const mbedtls_pk_context * pxMbedContext )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
int32_t lMbedTLSResult = 0;
CK_RV xResult = CKR_OK;
mbedtls_ecp_keypair * pxKeyPair = ( mbedtls_ecp_keypair * ) pxMbedContext->pk_ctx;
if( pxAttribute->type == CKA_VERIFY )
{
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, pxAttribute->ulValueLen );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
LogError( ( "Failed to parse EC public key. Expected verify permissions to be supported." ) );
}
}
else
{
/* Strip the ANS.1 Encoding of type and length. Otherwise mbed TLS
* won't be able to parse the binary EC point. */
if( pxAttribute->ulValueLen >= 2UL )
{
lMbedTLSResult = mbedtls_ecp_point_read_binary( &pxKeyPair->grp,
&pxKeyPair->Q,
( ( uint8_t * ) ( pxAttribute->pValue ) + 2U ),
( pxAttribute->ulValueLen - 2U ) );
if( lMbedTLSResult != 0 )
{
xResult = CKR_FUNCTION_FAILED;
LogError( ( "Failed to parse EC public key. mbedtls_ecp_point_read_binary failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
}
}
else
{
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
}
return xResult;
}
#endif /* ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM */
/**
* @brief Parses attribute values for an EC Key.
*/
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
static CK_RV prvEcKeyAttParse( const CK_ATTRIBUTE * pxAttribute,
const mbedtls_pk_context * pxMbedContext,
CK_BBOOL xIsPrivate )
{
CK_RV xResult = CKR_OK;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
const CK_BYTE pxEcCurve[] = pkcs11DER_ENCODED_OID_P256;
const CK_BYTE * pxEcAttVal = NULL;
const CK_BBOOL * pxEcBoolAtt = NULL;
/* Common EC key attributes. */
switch( pxAttribute->type )
{
case ( CKA_CLASS ):
case ( CKA_KEY_TYPE ):
case ( CKA_LABEL ):
break;
case ( CKA_TOKEN ):
pxEcBoolAtt = ( CK_BBOOL * ) pxAttribute->pValue;
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxEcBoolAtt, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed parsing EC key template. Expected token type to be true, but it was false." ) );
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_EC_PARAMS ):
pxEcAttVal = ( CK_BYTE * ) pxAttribute->pValue;
if( pxAttribute->ulValueLen == sizeof( pxEcCurve ) )
{
if( memcmp( pxEcCurve, pxEcAttVal, sizeof( pxEcCurve ) ) != 0 )
{
xResult = CKR_TEMPLATE_INCONSISTENT;
LogError( ( "Failed parsing EC key template. The elliptic curve was wrong. Expected elliptic curve P-256." ) );
}
}
break;
case ( CKA_VERIFY ):
case ( CKA_EC_POINT ):
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_FALSE )
{
xResult = prvEcPubKeyAttParse( pxAttribute, pxMbedContext );
}
else
{
LogError( ( "Failed parsing EC key template. The key type "
"did not match the template parameters. Expected "
"a public key for CKA_VERIFY or CKA_EC_POINT." ) );
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
case ( CKA_SIGN ):
case ( CKA_VALUE ):
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
xResult = prvEcPrivKeyAttParse( pxAttribute, pxMbedContext );
}
else
{
LogError( ( "Failed parsing EC key template. The key type "
"did not match the template parameters. Expected "
"a private key for CKA_SIGN or CKA_VALUE." ) );
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
}
break;
default:
LogError( ( "Failed parsing EC key template. Unknown attribute "
"0x%0lX found for an EC key.", ( unsigned long int ) pxAttribute->type ) );
xResult = CKR_ATTRIBUTE_TYPE_INVALID;
break;
}
return xResult;
}
#endif /* ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM */
/*-----------------------------------------------------------------------*/
/* Functions for maintaining the PKCS #11 module's label-handle lookups. */
/*-----------------------------------------------------------------------*/
/**
* @brief Searches the PKCS #11 module's object list for label and provides handle.
*
* @param[in] pcLabel Array containing label.
* @param[in] xLabelLength Length of the label, in bytes.
* @param[out] pxPalHandle Pointer to the PAL handle to be provided.
* CK_INVALID_HANDLE if no object found.
* @param[out] pxAppHandle Pointer to the application handle to be provided.
* CK_INVALID_HANDLE if no object found.
*/
static void prvFindObjectInListByLabel( const CK_BYTE * pcLabel,
CK_ULONG xLabelLength,
CK_OBJECT_HANDLE_PTR pxPalHandle,
CK_OBJECT_HANDLE_PTR pxAppHandle )
{
uint32_t ulIndex;
*pxPalHandle = CK_INVALID_HANDLE;
*pxAppHandle = CK_INVALID_HANDLE;
for( ulIndex = 0; ulIndex < pkcs11configMAX_NUM_OBJECTS; ulIndex++ )
{
if( 0 == memcmp( pcLabel, xP11Context.xObjectList.xObjects[ ulIndex ].xLabel, xLabelLength ) )
{
LogDebug( ( "Found object in object list matching label." ) );
*pxPalHandle = xP11Context.xObjectList.xObjects[ ulIndex ].xHandle;
*pxAppHandle = ulIndex + 1UL; /* Zero is not a valid handle, so let's offset by 1. */
break;
}
}
}
/**
* @brief Looks up a PKCS #11 object's label and PAL handle given an application handle.
*
* @param[in] xAppHandle The handle of the object being lookedup for, used by the application.
* @param[out] pxPalHandle Pointer to the handle corresponding to xPalHandle being used by the PAL.
* @param[out] ppcLabel Pointer to an array containing label. NULL if object not found.
* @param[out] pxLabelLength Pointer to label length (includes a string null terminator).
* 0 if no object found.
*/
static void prvFindObjectInListByHandle( CK_OBJECT_HANDLE xAppHandle,
CK_OBJECT_HANDLE_PTR pxPalHandle,
CK_BYTE_PTR * ppcLabel,
CK_ULONG_PTR pxLabelLength )
{
CK_OBJECT_HANDLE ulIndex = xAppHandle - ( ( CK_OBJECT_HANDLE ) 1 );
*ppcLabel = NULL;
*pxLabelLength = 0;
*pxPalHandle = CK_INVALID_HANDLE;
if( ulIndex < pkcs11configMAX_NUM_OBJECTS )
{
if( xP11Context.xObjectList.xObjects[ ulIndex ].xHandle != CK_INVALID_HANDLE )
{
LogDebug( ( "Found object in list by handle." ) );
*ppcLabel = xP11Context.xObjectList.xObjects[ ulIndex ].xLabel;
*pxLabelLength = xP11Context.xObjectList.xObjects[ ulIndex ].xLabelSize;
*pxPalHandle = xP11Context.xObjectList.xObjects[ ulIndex ].xHandle;
}
}
}
/**
* @brief Removes an object from the module object list (xP11Context.xObjectList)
*
* @warning This does not delete the object from NVM.
*
* @param[in] xPalHandle PAL handle of the object to be deleted.
*/
static CK_RV prvDeleteObjectFromList( CK_OBJECT_HANDLE xPalHandle )
{
CK_RV xResult = CKR_OK;
int32_t lGotSemaphore = ( int32_t ) 0;
uint32_t ulIndex = 0;
lGotSemaphore = mbedtls_mutex_lock( &xP11Context.xObjectList.xMutex );
if( lGotSemaphore == 0 )
{
/* Remove all references that have the same PAL handle, as it has now
* been deleted in the PKCS11_PAL. */
for( ulIndex = 0; ulIndex < pkcs11configMAX_NUM_OBJECTS; ulIndex++ )
{
if( xP11Context.xObjectList.xObjects[ ulIndex ].xHandle == xPalHandle )
{
( void ) memset( &xP11Context.xObjectList.xObjects[ ulIndex ], 0, sizeof( P11Object_t ) );
}
}
( void ) mbedtls_mutex_unlock( &xP11Context.xObjectList.xMutex );
}
else
{
LogError( ( "Failed to remove an object from internal object list. "
"Could not take the xObjectList mutex." ) );
xResult = CKR_CANT_LOCK;
}
return xResult;
}
/**
* @brief Add an object that exists in NVM to the application object array.
*
* @param[in] xPalHandle The handle used by the PKCS #11 PAL for object.
* @param[out] pxAppHandle Updated to contain the application handle corresponding to xPalHandle.
* @param[in] pcLabel Pointer to object label.
* @param[in] xLabelLength Length of the PKCS #11 label.
*
*/
static CK_RV prvAddObjectToList( CK_OBJECT_HANDLE xPalHandle,
CK_OBJECT_HANDLE_PTR pxAppHandle,
const CK_BYTE * pcLabel,
CK_ULONG xLabelLength )
{
CK_RV xResult = CKR_HOST_MEMORY;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xObjectFound = ( CK_BBOOL ) CK_FALSE;
uint32_t ulSearchIndex = 0;
if( 0 == mbedtls_mutex_lock( &xP11Context.xObjectList.xMutex ) )
{
for( ulSearchIndex = 0; ulSearchIndex < pkcs11configMAX_NUM_OBJECTS; ulSearchIndex++ )
{
if( xResult == CKR_OK )
{
break;
}
if( xP11Context.xObjectList.xObjects[ ulSearchIndex ].xHandle == xPalHandle )
{
/* Object already exists in list. */
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = CKR_OK;
xObjectFound = ( CK_BBOOL ) CK_TRUE;
}
else if( xP11Context.xObjectList.xObjects[ ulSearchIndex ].xHandle == CK_INVALID_HANDLE )
{
xResult = CKR_OK;
}
else
{
/* Cannot find a free object. */
}
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( ( xResult == CKR_OK ) && ( xObjectFound == ( CK_BBOOL ) CK_FALSE ) && ( xLabelLength <= pkcs11configMAX_LABEL_LENGTH ) )
{
xP11Context.xObjectList.xObjects[ ulSearchIndex - 1UL ].xHandle = xPalHandle;
( void ) memcpy( xP11Context.xObjectList.xObjects[ ulSearchIndex - 1UL ].xLabel, pcLabel, xLabelLength );
xP11Context.xObjectList.xObjects[ ulSearchIndex - 1UL ].xLabelSize = xLabelLength;
*pxAppHandle = ulSearchIndex;
}
( void ) mbedtls_mutex_unlock( &xP11Context.xObjectList.xMutex );
}
else
{
LogError( ( "Failed to add object to internal object list. Could not "
"take xObjectList mutex." ) );
xResult = CKR_CANT_LOCK;
}
return xResult;
}
/**
* @brief Append an empty public key to DER formatted EC private key.
*/
static CK_RV prvAppendEmptyECDerKey( uint8_t * pusECPrivateKey,
uint32_t ulDerBufSize,
int32_t lDerKeyLength,
uint32_t * pulActualKeyLength )
{
CK_RV xResult = CKR_OK;
const uint8_t emptyPubKey[ 6 ] = { 0xa1, 0x04, 0x03, 0x02, 0x00, 0x00 };
int32_t lCompare = 0;
/*
* mbedtls_pk_write_key_der appends empty public
* key data when saving EC private key
* that does not have a public key associated with it.
* a1 04 -> Application identifier of length 4
* 03 02 -> Bit string of length 2
* 00 00 -> "Public key"
* https://forums.mbed.com/t/how-do-i-write-an-ec-private-key-w-no-public-key-to-der-format/4728 */
/* If there was no public key in the structure, this byte
* array will be appended to the valid private key.
* It must be removed so that we can read the private
* key back at a later time. */
lCompare = memcmp( &pusECPrivateKey[ ulDerBufSize - 6UL ], emptyPubKey, sizeof( emptyPubKey ) );
if( ( lCompare == 0 ) && ( *pulActualKeyLength >= 6UL ) )
{
/* Do not write the last 6 bytes to key storage. */
pusECPrivateKey[ ulDerBufSize - ( uint32_t ) lDerKeyLength + ( ( uint32_t ) 1 ) ] -= ( uint8_t ) 6;
*pulActualKeyLength -= ( ( uint32_t ) 6 );
}
return xResult;
}
/**
* @brief Save a DER formatted key in the PKCS #11 PAL.
*/
static CK_RV prvSaveDerKeyToPal( mbedtls_pk_context * pxMbedContext,
CK_OBJECT_HANDLE_PTR pxObject,
CK_ATTRIBUTE * pxLabel,
CK_KEY_TYPE xKeyType,
CK_BBOOL xIsPrivate )
{
CK_RV xResult = CKR_OK;
CK_BYTE_PTR pxDerKey = NULL;
int32_t lDerKeyLength = 0;
uint32_t ulActualKeyLength = 0;
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
uint32_t ulDerBufSize = 0;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
LogDebug( ( "Key was private type." ) );
if( xKeyType == CKK_EC )
{
LogDebug( ( "Received EC key type." ) );
ulDerBufSize = pkcs11_MAX_EC_PRIVATE_KEY_DER_SIZE;
}
else
{
LogDebug( ( "Received RSA key type." ) );
ulDerBufSize = pkcs11_MAX_PRIVATE_KEY_DER_SIZE;
}
}
else
{
LogDebug( ( "Key was public type." ) );
if( xKeyType == CKK_EC )
{
LogDebug( ( "Received EC key type." ) );
ulDerBufSize = pkcs11_MAX_EC_PUBLIC_KEY_DER_SIZE;
}
else
{
LogDebug( ( "Received RSA key type." ) );
ulDerBufSize = pkcs11_PUBLIC_RSA_2048_DER_SIZE;
}
}
LogDebug( ( "Allocating a %lu bytes sized buffer to write the key to.", ( unsigned long int ) ulDerBufSize ) );
pxDerKey = mbedtls_calloc( 1, ulDerBufSize );
if( pxDerKey == NULL )
{
LogError( ( "Failed saving DER formatted key to flash. Failed to malloc a buffer to contain the key for the mbed TLS context." ) );
xResult = CKR_HOST_MEMORY;
}
else
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
lDerKeyLength = mbedtls_pk_write_key_der( pxMbedContext, pxDerKey, ulDerBufSize );
}
else
{
lDerKeyLength = mbedtls_pk_write_pubkey_der( pxMbedContext, pxDerKey, ulDerBufSize );
}
}
if( lDerKeyLength < 0 )
{
LogError( ( "Failed saving DER formatted key to flash. mbed TLS pk_write failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lDerKeyLength ),
mbedtlsLowLevelCodeOrDefault( lDerKeyLength ) ) );
xResult = CKR_FUNCTION_FAILED;
}
else
{
/* Cast to unsigned int as the result was not negative. */
ulActualKeyLength = ( uint32_t ) lDerKeyLength;
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( ( xResult == CKR_OK ) && ( xIsPrivate == ( CK_BBOOL ) CK_TRUE ) && ( xKeyType == CKK_EC ) )
{
xResult = prvAppendEmptyECDerKey( pxDerKey, ulDerBufSize, lDerKeyLength, &ulActualKeyLength );
}
if( ( xResult == CKR_OK ) && ( lDerKeyLength > 0 ) && ( ( uint32_t ) lDerKeyLength < ulDerBufSize ) )
{
xPalHandle = PKCS11_PAL_SaveObject( pxLabel,
pxDerKey + ( ulDerBufSize - ( uint32_t ) lDerKeyLength ),
ulActualKeyLength );
if( xPalHandle == CK_INVALID_HANDLE )
{
LogError( ( "Failed saving DER formatted key to flash. Failed to write DER formatted key to the PKCS #11 PAL." ) );
xResult = CKR_DEVICE_MEMORY;
}
}
if( xResult == CKR_OK )
{
xResult = prvAddObjectToList( xPalHandle, pxObject, pxLabel->pValue, pxLabel->ulValueLen );
}
mbedtls_free( pxDerKey );
return xResult;
}
/*-------------------------------------------------------------*/
/**
* @brief Initializes Cryptoki.
*
* @note C_Initialize is not thread-safe.
*
* C_Initialize should be called (and allowed to return) before
* any additional PKCS #11 operations are invoked.
*
* In this implementation, all arguments are ignored.
* Thread protection for the rest of PKCS #11 functions
* default to FreeRTOS primitives.
*
* @param[in] pInitArgs This parameter is ignored.
*
* @return CKR_OK if successful.
* CKR_CRYPTOKI_ALREADY_INITIALIZED if C_Initialize was previously called.
* All other errors indicate that the PKCS #11 module is not ready to be used.
*/
/* @[declare_pkcs11_mbedtls_c_initialize] */
CK_DECLARE_FUNCTION( CK_RV, C_Initialize )( CK_VOID_PTR pInitArgs )
{
CK_RV xResult = CKR_OK;
( void ) ( pInitArgs );
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xP11Context.xIsInitialized != ( CK_BBOOL ) CK_TRUE )
{
xResult = PKCS11_PAL_Initialize();
if( xResult == CKR_OK )
{
xResult = prvMbedTLS_Initialize();
}
else
{
LogError( ( "Failed to initialize PKCS #11. PAL failed with error code: 0x%0lX", ( unsigned long int ) xResult ) );
}
}
else
{
xResult = CKR_CRYPTOKI_ALREADY_INITIALIZED;
LogWarn( ( "Failed to initialize PKCS #11. PKCS #11 was already initialized." ) );
}
if( xResult == CKR_OK )
{
LogInfo( ( "PKCS #11 successfully initialized." ) );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_initialize] */
/**
* @brief Clean up miscellaneous Cryptoki-associated resources.
*/
/* @[declare_pkcs11_mbedtls_c_finalize] */
CK_DECLARE_FUNCTION( CK_RV, C_Finalize )( CK_VOID_PTR pReserved )
{
CK_RV xResult = CKR_OK;
if( pReserved != NULL )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Failed to un-initialize PKCS #11. Received bad arguments. "
"Parameters must be NULL, but were not." ) );
}
if( xResult == CKR_OK )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xP11Context.xIsInitialized == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_CRYPTOKI_NOT_INITIALIZED;
LogWarn( ( "PKCS #11 was already uninitialized." ) );
}
}
if( xResult == CKR_OK )
{
mbedtls_entropy_free( &xP11Context.xMbedEntropyContext );
mbedtls_ctr_drbg_free( &xP11Context.xMbedDrbgCtx );
mbedtls_mutex_free( &xP11Context.xObjectList.xMutex );
mbedtls_mutex_free( &xP11Context.xSessionMutex );
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xP11Context.xIsInitialized = ( CK_BBOOL ) CK_FALSE;
LogInfo( ( "PKCS #11 was successfully uninitialized." ) );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_finalize] */
/**
* @brief Obtains entry points of Cryptoki library functions.
*
* All other PKCS #11 functions should be invoked using the returned
* function list.
*
* @warning Do not overwrite the function list.
*
* \param[in] ppFunctionList Pointer to the location where
* pointer to function list will be placed.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_getfunctionlist] */
CK_DECLARE_FUNCTION( CK_RV, C_GetFunctionList )( CK_FUNCTION_LIST_PTR_PTR ppFunctionList )
{
CK_RV xResult = CKR_OK;
static CK_FUNCTION_LIST prvP11FunctionList =
{
{ CRYPTOKI_VERSION_MAJOR, CRYPTOKI_VERSION_MINOR },
C_Initialize,
C_Finalize,
NULL, /*C_GetInfo */
C_GetFunctionList,
C_GetSlotList,
NULL, /*C_GetSlotInfo*/
C_GetTokenInfo,
NULL, /*C_GetMechanismList*/
C_GetMechanismInfo,
C_InitToken,
NULL, /*C_InitPIN*/
NULL, /*C_SetPIN*/
C_OpenSession,
C_CloseSession,
NULL, /*C_CloseAllSessions*/
NULL, /*C_GetSessionInfo*/
NULL, /*C_GetOperationState*/
NULL, /*C_SetOperationState*/
C_Login, /*C_Login*/
NULL, /*C_Logout*/
C_CreateObject,
NULL, /*C_CopyObject*/
C_DestroyObject,
NULL, /*C_GetObjectSize*/
C_GetAttributeValue,
NULL, /*C_SetAttributeValue*/
C_FindObjectsInit,
C_FindObjects,
C_FindObjectsFinal,
NULL, /*C_EncryptInit*/
NULL, /*C_Encrypt*/
NULL, /*C_EncryptUpdate*/
NULL, /*C_EncryptFinal*/
NULL, /*C_DecryptInit*/
NULL, /*C_Decrypt*/
NULL, /*C_DecryptUpdate*/
NULL, /*C_DecryptFinal*/
C_DigestInit,
NULL, /*C_Digest*/
C_DigestUpdate,
NULL, /* C_DigestKey*/
C_DigestFinal,
C_SignInit,
C_Sign,
NULL, /*C_SignUpdate*/
NULL, /*C_SignFinal*/
NULL, /*C_SignRecoverInit*/
NULL, /*C_SignRecover*/
C_VerifyInit,
C_Verify,
NULL, /*C_VerifyUpdate*/
NULL, /*C_VerifyFinal*/
NULL, /*C_VerifyRecoverInit*/
NULL, /*C_VerifyRecover*/
NULL, /*C_DigestEncryptUpdate*/
NULL, /*C_DecryptDigestUpdate*/
NULL, /*C_SignEncryptUpdate*/
NULL, /*C_DecryptVerifyUpdate*/
NULL, /*C_GenerateKey*/
C_GenerateKeyPair,
NULL, /*C_WrapKey*/
NULL, /*C_UnwrapKey*/
NULL, /*C_DeriveKey*/
NULL, /*C_SeedRandom*/
C_GenerateRandom,
NULL, /*C_GetFunctionStatus*/
NULL, /*C_CancelFunction*/
NULL /*C_WaitForSlotEvent*/
};
if( NULL == ppFunctionList )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Failed to return function pointer list. Expected a valid "
"pointer to a CK_FUNCTION_LIST, but the pointer was NULL." ) );
}
else
{
*ppFunctionList = &prvP11FunctionList;
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_getfunctionlist] */
/**
* @brief Obtains a list of slots in the system.
*
* This port does not implement the concept of separate slots/tokens.
*
* \param[in] tokenPresent This parameter is unused by this port.
* \param[in] pSlotList Pointer to an array of slot IDs.
* At this time, only 1 slot is implemented.
* \param[in,out] pulCount Length of the slot list pxSlotList. Updated
* to contain the actual number of slots written
* to the list.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_getslotlist] */
CK_DECLARE_FUNCTION( CK_RV, C_GetSlotList )( CK_BBOOL tokenPresent,
CK_SLOT_ID_PTR pSlotList,
CK_ULONG_PTR pulCount )
{
CK_RV xResult = CKR_OK;
/* Since the mbedTLS implementation of PKCS#11 does not depend
* on a physical token, this parameter is ignored. */
( void ) ( tokenPresent );
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xP11Context.xIsInitialized != ( CK_BBOOL ) CK_TRUE )
{
xResult = CKR_CRYPTOKI_NOT_INITIALIZED;
LogError( ( "Failed to get slot list. PKCS #11 must be initialized "
"before any operations." ) );
}
if( NULL == pulCount )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Failed to get slot list. Count pointer was NULL." ) );
}
if( xResult == CKR_OK )
{
if( NULL == pSlotList )
{
*pulCount = 1;
}
else
{
if( 0u == *pulCount )
{
xResult = CKR_BUFFER_TOO_SMALL;
LogWarn( ( "The buffer is too small to contain the slot list." ) );
}
else
{
pSlotList[ 0 ] = pkcs11SLOT_ID;
*pulCount = 1;
LogDebug( ( "Successfully Returned a PKCS #11 slot with ID "
"%lu with a count of %lu.", ( unsigned long int ) pkcs11SLOT_ID, ( unsigned long int ) *pulCount ) );
}
}
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_getslotlist] */
/**
* @brief Obtains information about a particular token.
*
* @param[in] slotID This parameter is unused in this port.
* @param[out] pInfo This parameter is unused in this port.
*
* C_GetTokenInfo() is only implemented for compatibility with other ports.
* All inputs to this function are ignored, and calling this
* function on this port does provide any information about
* the PKCS #11 token.
*
* @return CKR_OK.
*/
/* @[declare_pkcs11_mbedtls_c_gettokeninfo] */
CK_DECLARE_FUNCTION( CK_RV, C_GetTokenInfo )( CK_SLOT_ID slotID,
CK_TOKEN_INFO_PTR pInfo )
{
/* Avoid compiler warnings about unused variables. */
( void ) slotID;
( void ) pInfo;
LogWarn( ( "C_GetTokenInfo is not implemented." ) );
return CKR_OK;
}
/* @[declare_pkcs11_mbedtls_c_gettokeninfo] */
/**
* @brief Obtains information about a particular mechanism.
*
* \param[in] slotID This parameter is unused in this port.
* \param[in] type The cryptographic capability for which support
* information is being queried.
* \param[out] pInfo Algorithm sizes and flags for the requested
* mechanism, if supported.
*
* @return CKR_OK if the mechanism is supported. Otherwise, CKR_MECHANISM_INVALID.
*/
/* @[declare_pkcs11_mbedtls_c_getmechanisminfo] */
CK_DECLARE_FUNCTION( CK_RV, C_GetMechanismInfo )( CK_SLOT_ID slotID,
CK_MECHANISM_TYPE type,
CK_MECHANISM_INFO_PTR pInfo )
{
CK_RV xResult = CKR_MECHANISM_INVALID;
struct CryptoMechanisms
{
CK_MECHANISM_TYPE xType;
CK_MECHANISM_INFO xInfo;
}
pxSupportedMechanisms[] =
{
{ CKM_RSA_PKCS, { 2048, 2048, CKF_SIGN } },
{ CKM_RSA_X_509, { 2048, 2048, CKF_VERIFY } },
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
{ CKM_ECDSA, { 256, 256, CKF_SIGN | CKF_VERIFY } },
{ CKM_EC_KEY_PAIR_GEN, { 256, 256, CKF_GENERATE_KEY_PAIR } },
#endif
{ CKM_SHA256, { 0, 0, CKF_DIGEST } }
};
uint32_t ulMech = 0;
( void ) slotID;
if( pInfo == NULL )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Failed to get mechanism info. pInfo was NULL. Expected a "
"pointer to a valid CK_MECHANISM_INFO struct." ) );
}
else
{
/* Look for the requested mechanism in the above table. */
for( ; ulMech < sizeof( pxSupportedMechanisms ) / sizeof( pxSupportedMechanisms[ 0 ] ); ulMech++ )
{
if( pxSupportedMechanisms[ ulMech ].xType == type )
{
/* The mechanism is supported. Copy out the details and break
* out of the loop. */
( void ) memcpy( pInfo, &( pxSupportedMechanisms[ ulMech ].xInfo ), sizeof( CK_MECHANISM_INFO ) );
xResult = CKR_OK;
break;
}
}
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_getmechanisminfo] */
/**
* @brief Initializes a token. This function is not implemented for this port.
*
* C_InitToken() is only implemented for compatibility with other ports.
* All inputs to this function are ignored, and calling this
* function on this port does not add any security.
*
* @return CKR_OK.
*/
/* @[declare_pkcs11_mbedtls_c_inittoken] */
CK_DECLARE_FUNCTION( CK_RV, C_InitToken )( CK_SLOT_ID slotID,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen,
CK_UTF8CHAR_PTR pLabel )
{
/* Avoid compiler warnings about unused variables. */
( void ) slotID;
( void ) pPin;
( void ) ulPinLen;
( void ) pLabel;
LogWarn( ( "C_InitToken is not implemented." ) );
return CKR_OK;
}
/* @[declare_pkcs11_mbedtls_c_inittoken] */
/**
* @brief Opens a connection between an application and a particular token or sets up an application callback for token insertion.
*
* \note PKCS #11 module must have been previously initialized with a call to
* C_Initialize() before calling C_OpenSession().
*
*
* \param[in] slotID This parameter is unused in this port.
* \param[in] flags Session flags - CKF_SERIAL_SESSION is a
* mandatory flag.
* \param[in] pApplication This parameter is unused in this port.
* \param[in] Notify This parameter is unused in this port.
* \param[in] phSession Pointer to the location that the created
* session's handle will be placed.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_opensession] */
CK_DECLARE_FUNCTION( CK_RV, C_OpenSession )( CK_SLOT_ID slotID,
CK_FLAGS flags,
CK_VOID_PTR pApplication,
CK_NOTIFY Notify,
CK_SESSION_HANDLE_PTR phSession )
{
CK_RV xResult = CKR_OK;
P11Session_t * pxSessionObj = NULL;
uint32_t ulSessionCount = 0;
( void ) ( slotID );
( void ) ( pApplication );
/* Allow unused parameters to be cast to void to silence compiler warnings.
* Even if they are a function pointer. */
/* coverity[misra_c_2012_rule_11_1_violation] */
( void ) Notify;
/* Check that the PKCS #11 module is initialized. */
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xP11Context.xIsInitialized != ( CK_BBOOL ) CK_TRUE )
{
xResult = CKR_CRYPTOKI_NOT_INITIALIZED;
LogError( ( "Could not open a session. PKCS #11 must be initialized "
"before any operations." ) );
}
/* Check arguments. */
if( NULL == phSession )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Could not open a session. phSession cannot be a NULL pointer." ) );
}
/* For legacy reasons, the CKF_SERIAL_SESSION bit MUST always be set. */
if( ( CKR_OK == xResult ) && ( 0UL == ( CKF_SERIAL_SESSION & flags ) ) )
{
xResult = CKR_SESSION_PARALLEL_NOT_SUPPORTED;
LogError( ( "Could not open a session. CKR_SESSION_PARALLEL_NOT_SUPPORTED "
"must always be a set flag." ) );
}
/*
* Make space for the context.
*/
if( CKR_OK == xResult )
{
/* Get next open session slot. */
if( mbedtls_mutex_lock( &xP11Context.xSessionMutex ) == 0 )
{
for( ulSessionCount = 0; ulSessionCount < pkcs11configMAX_SESSIONS; ++ulSessionCount )
{
/* coverity[misra_c_2012_rule_10_5_violation] */
if( pxP11Sessions[ ulSessionCount ].xOpened == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_OK;
pxSessionObj = &pxP11Sessions[ ulSessionCount ];
/* coverity[misra_c_2012_rule_10_5_violation] */
pxSessionObj->xOpened = ( CK_BBOOL ) CK_TRUE;
break;
}
else
{
xResult = CKR_SESSION_COUNT;
}
}
( void ) mbedtls_mutex_unlock( &xP11Context.xSessionMutex );
}
else
{
xResult = CKR_FUNCTION_FAILED;
LogError( ( "Could not open a session. Unsuccessful in taking xSessionMutex." ) );
}
if( CKR_OK == xResult )
{
mbedtls_mutex_init( &pxSessionObj->xSignMutex );
mbedtls_mutex_init( &pxSessionObj->xVerifyMutex );
}
}
if( CKR_OK == xResult )
{
/*
* Assign the session.
*/
pxSessionObj->ulState =
( 0UL != ( flags & CKF_RW_SESSION ) ) ? CKS_RW_PUBLIC_SESSION : CKS_RO_PUBLIC_SESSION;
LogDebug( ( "Assigned a 0x%0lX Type Session.", ( unsigned long int ) pxSessionObj->ulState ) );
}
/*
* Initialize the operation in progress.
*/
if( CKR_OK == xResult )
{
pxSessionObj->xOperationDigestMechanism = pkcs11NO_OPERATION;
pxSessionObj->xOperationVerifyMechanism = pkcs11NO_OPERATION;
pxSessionObj->xOperationSignMechanism = pkcs11NO_OPERATION;
LogDebug( ( "Assigned Mechanisms to no operation in progress." ) );
}
if( xResult == CKR_SESSION_COUNT )
{
/* No available session. */
LogError( ( "Could not open a session. All sessions have "
"been taken. Consider increasing value of "
"pkcs11configMAX_SESSIONS." ) );
}
if( CKR_OK == xResult )
{
/* Increment by one, as invalid handles in PKCS #11 are 0. */
++ulSessionCount;
*phSession = ulSessionCount;
LogDebug( ( "Current session count at %lu", ( unsigned long int ) ( ulSessionCount - 1UL ) ) );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_opensession] */
/**
* @brief Closes a session.
*
* @param[in] hSession The session handle to
* be terminated.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_closesession] */
CK_DECLARE_FUNCTION( CK_RV, C_CloseSession )( CK_SESSION_HANDLE hSession )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = CKR_OK;
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xP11Context.xIsInitialized == ( CK_BBOOL ) CK_FALSE )
{
xResult = CKR_CRYPTOKI_NOT_INITIALIZED;
LogError( ( "Could not close a session. PKCS #11 must be initialized "
"before any operations." ) );
}
else if( pxSession == NULL )
{
xResult = CKR_SESSION_HANDLE_INVALID;
LogError( ( "Could not close a session. The PKCS #11 session handle "
"was invalid." ) );
}
/* coverity[misra_c_2012_rule_10_5_violation] */
else if( pxSession->xOpened == ( CK_BBOOL ) CK_TRUE )
{
/*
* Tear down the session.
*/
mbedtls_pk_free( &pxSession->xSignKey );
pxSession->xSignKeyHandle = CK_INVALID_HANDLE;
mbedtls_mutex_free( &pxSession->xSignMutex );
/* Free the public key context if it exists. */
mbedtls_pk_free( &pxSession->xVerifyKey );
pxSession->xVerifyKeyHandle = CK_INVALID_HANDLE;
mbedtls_mutex_free( &pxSession->xVerifyMutex );
mbedtls_sha256_free( &pxSession->xSHA256Context );
/* memset clears the open flag, so there is no need to set it to CK_FALSE */
( void ) memset( pxSession, 0, sizeof( P11Session_t ) );
LogInfo( ( "Successfully closed PKCS #11 session." ) );
}
else
{
/* MISRA */
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_closesession] */
/**
* @brief Logs into a token. This function is not implemented for this port.
*
* C_Login() is only implemented for compatibility with other ports.
* All inputs to this function are ignored, and calling this
* function on this port does not add any security.
*
* @return CKR_OK.
*/
/* @[declare_pkcs11_mbedtls_c_login] */
CK_DECLARE_FUNCTION( CK_RV, C_Login )( CK_SESSION_HANDLE hSession,
CK_USER_TYPE userType,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen )
{
/* Avoid warnings about unused parameters. */
( void ) hSession;
( void ) userType;
( void ) pPin;
( void ) ulPinLen;
LogWarn( ( "C_Login is not implemented." ) );
/* THIS FUNCTION IS NOT IMPLEMENTED FOR MBEDTLS-BASED PORTS.
* If login capability is required, implement it here.
* Defined for compatibility with other PKCS #11 ports. */
return CKR_OK;
}
/* @[declare_pkcs11_mbedtls_c_login] */
/**
* @brief Helper function for parsing the templates of device certificates for C_CreateObject.
*
* @param[in] pxTemplate Pointer to PKCS #11 attribute template.
* @param[in] ulCount length of templates array.
* @param[in] pxObject Pointer to PKCS #11 object.
* @return CKR_OK.
*/
static CK_RV prvCreateCertificate( CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR pxObject )
{
CK_RV xResult = CKR_OK;
CK_BYTE_PTR pxCertificateValue = NULL;
CK_ULONG xCertificateLength = 0;
CK_ATTRIBUTE_PTR pxLabel = NULL;
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
CK_CERTIFICATE_TYPE xCertificateType = 0;
CK_ULONG ulIndex = 0;
/* Search for the pointer to the certificate VALUE. */
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
xResult = prvCertAttParse( &pxTemplate[ ulIndex ], &xCertificateType,
&pxCertificateValue, &xCertificateLength,
&pxLabel );
if( xResult != CKR_OK )
{
break;
}
}
if( ( xResult == CKR_OK ) && ( ( pxCertificateValue == NULL ) || ( pxLabel == NULL ) ) )
{
LogError( ( "Failed to create certificate. Could not parse the "
"certificate value or label from the template." ) );
xResult = CKR_TEMPLATE_INCOMPLETE;
}
if( xResult == CKR_OK )
{
xPalHandle = PKCS11_PAL_SaveObject( pxLabel, pxCertificateValue, xCertificateLength );
if( xPalHandle == 0UL ) /*Invalid handle. */
{
LogError( ( "Failed to create certificate. Could not save the "
"certificate to the PKCS #11 PAL." ) );
xResult = CKR_DEVICE_MEMORY;
}
}
if( xResult == CKR_OK )
{
xResult = prvAddObjectToList( xPalHandle, pxObject, pxLabel->pValue, pxLabel->ulValueLen );
}
return xResult;
}
/**
* @brief Helper to search an attribute for the key type attribute.
*
* @param[out] pxKeyType pointer to key type.
* @param[in] pxTemplate templates to search for a key in.
* @param[in] ulCount length of templates array.
*/
static void prvGetKeyType( CK_KEY_TYPE * pxKeyType,
const CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount )
{
uint32_t ulIndex;
CK_ATTRIBUTE xAttribute;
*pxKeyType = PKCS11_INVALID_KEY_TYPE;
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
xAttribute = pxTemplate[ ulIndex ];
if( ( xAttribute.type == CKA_KEY_TYPE ) && ( xAttribute.ulValueLen == sizeof( CK_KEY_TYPE ) ) )
{
LogDebug( ( "Successfully found the key type in the template." ) );
( void ) memcpy( pxKeyType, xAttribute.pValue, sizeof( CK_KEY_TYPE ) );
break;
}
}
}
/**
* @brief Helper to search a template for the label attribute.
*
* @param[out] ppxLabel pointer to label.
* @param[in] pxTemplate templates to search for a key in.
* @param[in] ulCount length of templates array.
*/
static void prvGetLabel( CK_ATTRIBUTE ** ppxLabel,
CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount )
{
CK_ATTRIBUTE xAttribute;
CK_ULONG ulIndex;
*ppxLabel = NULL;
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
xAttribute = pxTemplate[ ulIndex ];
if( xAttribute.type == CKA_LABEL )
{
LogDebug( ( "Successfully found the label in the template." ) );
*ppxLabel = &pxTemplate[ ulIndex ];
break;
}
}
}
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
/**
* @brief Helper to search a template for the label attribute.
*
* @param[in] pxPalHandle opaque handle to PKCS #11 object.
* @param[in] pxMbedContext mbedTLS pk context for parsing.
* @param[in] pxLabel label of PKCS #11 object.
*
* @note Because public and private keys are stored in the same slot for this port,
* importing one after the other requires a read of what was previously in the slot,
* combination of the public and private key in DER format, and re-import of the
* combination.
*/
static CK_RV prvGetExistingKeyComponent( CK_OBJECT_HANDLE_PTR pxPalHandle,
mbedtls_pk_context * pxMbedContext,
const CK_ATTRIBUTE * pxLabel )
{
CK_BYTE_PTR pucData = NULL;
CK_ULONG ulDataLength = 0;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xIsPrivate = ( CK_BBOOL ) CK_TRUE;
CK_RV xResult = CKR_OK;
int32_t lMbedTLSResult = 0;
CK_BYTE pxPubKeyLabel[] = { pkcs11configLABEL_DEVICE_PUBLIC_KEY_FOR_TLS };
CK_BYTE pxPrivKeyLabel[] = { pkcs11configLABEL_DEVICE_PRIVATE_KEY_FOR_TLS };
*pxPalHandle = CK_INVALID_HANDLE;
if( 0 == strncmp( pxLabel->pValue, pkcs11configLABEL_DEVICE_PRIVATE_KEY_FOR_TLS, pxLabel->ulValueLen ) )
{
*pxPalHandle = PKCS11_PAL_FindObject( pxPrivKeyLabel, pxLabel->ulValueLen );
}
else if( 0 == strncmp( pxLabel->pValue, pkcs11configLABEL_DEVICE_PUBLIC_KEY_FOR_TLS, pxLabel->ulValueLen ) )
{
*pxPalHandle = PKCS11_PAL_FindObject( pxPubKeyLabel, pxLabel->ulValueLen );
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xIsPrivate = ( CK_BBOOL ) CK_FALSE;
}
else
{
/* Unknown label passed to function */
LogWarn( ( "Unknown label found." ) );
}
if( *pxPalHandle != CK_INVALID_HANDLE )
{
xResult = PKCS11_PAL_GetObjectValue( *pxPalHandle, &pucData, &ulDataLength, &xIsPrivate );
}
else
{
LogDebug( ( "Could not find an existing PKCS #11 PAL object." ) );
}
if( ( xResult == CKR_OK ) && ( *pxPalHandle != CK_INVALID_HANDLE ) )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
lMbedTLSResult = mbedtls_pk_parse_key( pxMbedContext, pucData, ulDataLength, NULL, 0 );
}
else
{
lMbedTLSResult = mbedtls_pk_parse_public_key( pxMbedContext, pucData, ulDataLength );
}
PKCS11_PAL_GetObjectValueCleanup( pucData, ulDataLength );
}
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed to get existing object value. mbedTLS pk parse "
"failed with mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
*pxPalHandle = CK_INVALID_HANDLE;
}
return xResult;
}
/**
* @brief Helper function to load an EC group to the mbed TLS pk context.
* @param[in] pxMbedContext mbedtls context used to load EC group params.
*
*/
static CK_RV prvLoadEcGroup( mbedtls_pk_context * pxMbedContext )
{
CK_RV xResult = CKR_OK;
mbedtls_ecp_keypair * pxKeyPair;
int32_t lMbedTLSResult = 0;
pxKeyPair = mbedtls_calloc( 1, sizeof( mbedtls_ecp_keypair ) );
if( pxKeyPair != NULL )
{
/* Initialize the info. */
pxMbedContext->pk_info = &mbedtls_eckey_info;
/* Initialize the context. */
pxMbedContext->pk_ctx = pxKeyPair;
mbedtls_ecp_keypair_init( pxKeyPair );
mbedtls_ecp_group_init( &pxKeyPair->grp );
/* At this time, only P-256 curves are supported. */
lMbedTLSResult = mbedtls_ecp_group_load( &pxKeyPair->grp,
MBEDTLS_ECP_DP_SECP256R1 );
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed creating an EC key. "
"mbedtls_ecp_group_load failed: mbed "
"TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
}
else
{
LogError( ( "Failed creating an EC key. Could not allocate a "
"mbedtls_ecp_keypair struct." ) );
xResult = CKR_HOST_MEMORY;
}
return xResult;
}
/**
* @brief Helper function for importing elliptic curve keys from
* template using C_CreateObject.
* @param[in] pxTemplate templates to search for a key in.
* @param[in] ulCount length of templates array.
* @param[in] pxObject PKCS #11 object handle.
* @param[in] xIsPrivate boolean indicating whether the key is private or public.
*/
static CK_RV prvCreateECKey( CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR pxObject,
CK_BBOOL xIsPrivate )
{
CK_RV xResult = CKR_OK;
uint32_t ulIndex;
CK_ATTRIBUTE_PTR pxLabel = NULL;
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
mbedtls_pk_context xMbedContext;
mbedtls_pk_init( &xMbedContext );
prvGetLabel( &pxLabel, pxTemplate, ulCount );
if( pxLabel == NULL )
{
LogError( ( "Failed to create an EC key. Received a NULL label pointer." ) );
xResult = CKR_ARGUMENTS_BAD;
}
else
{
xResult = prvGetExistingKeyComponent( &xPalHandle, &xMbedContext, pxLabel );
}
if( ( xResult == CKR_OK ) && ( xPalHandle == CK_INVALID_HANDLE ) )
{
/* An mbedTLS key is comprised of 2 pieces of data- an "info" and a "context".
* Since a valid key was not found by prvGetExistingKeyComponent, we are going to initialize
* the structure so that the mbedTLS structures will look the same as they would if a key
* had been found, minus the private key component. */
/* If a key had been found by prvGetExistingKeyComponent, the keypair context
* would have been malloc'ed. */
xResult = prvLoadEcGroup( &xMbedContext );
}
/* Key will be assembled in the mbedTLS key context and then exported to DER for storage. */
if( xResult == CKR_OK )
{
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
xResult = prvEcKeyAttParse( &pxTemplate[ ulIndex ], &xMbedContext, xIsPrivate );
if( xResult != CKR_OK )
{
break;
}
}
}
if( xResult == CKR_OK )
{
xResult = prvSaveDerKeyToPal( &xMbedContext,
pxObject,
pxLabel,
CKK_EC,
xIsPrivate );
}
/* Clean up the mbedTLS key context. */
mbedtls_pk_free( &xMbedContext );
return xResult;
}
#endif /* ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM */
/**
* @brief Helper function for parsing RSA Private Key attribute templates
* for C_CreateObject.
* @param[in] pxTemplate templates to search for a key in.
* @param[in] ulCount length of templates array.
* @param[in] pxObject PKCS #11 object handle.
* @param[in] xIsPrivate boolean indicating whether the key is private or public.
*/
static CK_RV prvCreateRsaKey( CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR pxObject,
CK_BBOOL xIsPrivate )
{
CK_RV xResult = CKR_OK;
mbedtls_pk_context xMbedContext = { 0 };
uint32_t ulIndex;
CK_ATTRIBUTE_PTR pxLabel = NULL;
/* mbedtls_rsa_context must be malloc'ed to use with mbedtls_pk_free function. */
mbedtls_rsa_context * pxRsaCtx = mbedtls_calloc( 1, sizeof( mbedtls_rsa_context ) );
prvGetLabel( &pxLabel, pxTemplate, ulCount );
if( pxLabel == NULL )
{
LogError( ( "Failed creating an RSA key. Label was a NULL pointer." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( pxRsaCtx != NULL )
{
mbedtls_pk_init( &xMbedContext );
xMbedContext.pk_ctx = pxRsaCtx;
xMbedContext.pk_info = &mbedtls_rsa_info;
mbedtls_rsa_init( pxRsaCtx, MBEDTLS_RSA_PKCS_V15, 0 /*ignored.*/ );
}
else
{
LogError( ( "Failed creating an RSA key. Could not malloc a "
"mbedtls_rsa_context struct." ) );
xResult = CKR_HOST_MEMORY;
}
if( xResult == CKR_OK )
{
/* Parse template and collect the relevant parts. */
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
xResult = prvRsaKeyAttParse( &pxTemplate[ ulIndex ], xMbedContext.pk_ctx, xIsPrivate );
if( xResult != CKR_OK )
{
break;
}
}
}
if( xResult == CKR_OK )
{
xResult = prvSaveDerKeyToPal( &xMbedContext,
pxObject,
pxLabel,
CKK_RSA,
xIsPrivate );
}
/* Clean up the mbedTLS key context. */
mbedtls_pk_free( &xMbedContext );
return xResult;
}
/**
* @brief Helper function for importing private keys using template
* C_CreateObject.
* @param[in] pxTemplate templates to search for a key in.
* @param[in] ulCount length of templates array.
* @param[in] pxObject PKCS #11 object handle.
*/
static CK_RV prvCreatePrivateKey( CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR pxObject )
{
CK_RV xResult = CKR_OK;
CK_KEY_TYPE xKeyType;
prvGetKeyType( &xKeyType, pxTemplate, ulCount );
if( xKeyType == CKK_RSA )
{
xResult = prvCreateRsaKey( pxTemplate,
ulCount,
pxObject,
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
( CK_BBOOL ) CK_TRUE );
}
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
/* CKK_EC = CKK_ECDSA. */
else if( xKeyType == CKK_EC )
{
xResult = prvCreateECKey( pxTemplate,
ulCount,
pxObject,
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
( CK_BBOOL ) CK_TRUE );
}
#endif /* pkcs11configSUPPRESS_ECDSA_MECHANISM */
else
{
LogError( ( "Failed to create a key. Tried to create a key with an "
"invalid or unknown mechanism." ) );
xResult = CKR_MECHANISM_INVALID;
}
return xResult;
}
/**
* @brief Helper function for importing public keys using
* C_CreateObject.
* @param[in] pxTemplate templates to search for a key in.
* @param[in] ulCount length of templates array.
* @param[in] pxObject PKCS #11 object handle.
*/
static CK_RV prvCreatePublicKey( CK_ATTRIBUTE * pxTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR pxObject )
{
CK_KEY_TYPE xKeyType = 0;
CK_RV xResult = CKR_OK;
prvGetKeyType( &xKeyType, pxTemplate, ulCount );
if( xKeyType == CKK_RSA )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = prvCreateRsaKey( pxTemplate, ulCount, pxObject, ( CK_BBOOL ) CK_FALSE );
}
#ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM
else if( xKeyType == CKK_EC ) /* CKK_EC = CKK_ECDSA. */
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xResult = prvCreateECKey( pxTemplate, ulCount, pxObject, ( CK_BBOOL ) CK_FALSE );
}
#endif /* ifndef pkcs11configSUPPRESS_ECDSA_MECHANISM */
else
{
LogError( ( "Failed to create public key. Received an invalid mechanism. "
"Invalid key type 0x%0lX", ( unsigned long int ) xKeyType ) );
xResult = CKR_MECHANISM_INVALID;
}
return xResult;
}
/**
* @brief Creates an object.
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pTemplate List of attributes of the object to
* be created.
* @param[in] ulCount Number of attributes in pTemplate.
* @param[out] phObject Pointer to the location where the created
* object's handle will be placed.
*
* <table>
* <tr><th> Object Type <th> Template Attributes
* <tr><td rowspan="6">Certificate<td>CKA_CLASS
* <tr> <td>CKA_VALUE
* <tr> <td>CKA_TOKEN
* <tr> <td>CKA_LABEL
* <tr> <td>CKA_CERTIFICATE_TYPE
* <tr> <td>CKA_VALUE
* <tr><td rowspan="7">EC Private Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* <tr> <td>CKA_TOKEN
* <tr> <td>CKA_LABEL
* <tr> <td>CKA_SIGN
* <tr> <td>CKA_EC_PARAMS
* <tr> <td>CKA_VALUE
* <tr><td rowspan="7">EC Public Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* <tr> <td>CKA_TOKEN
* <tr> <td>CKA_VERIFY
* <tr> <td>CKA_LABEL
* <tr> <td>CKA_EC_PARAMS
* <tr> <td>CKA_EC_POINT
* <tr><td rowspan="13">RSA Private Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* <tr> <td>CKA_TOKEN
* <tr> <td>CKA_LABEL
* <tr> <td>CKA_SIGN
* <tr> <td>CKA_MODULUS
* <tr> <td>CKA_PUBLIC_EXPONENT
* <tr> <td>CKA_PRIME_1
* <tr> <td>CKA_PRIME_2
* <tr> <td>CKA_PRIVATE_EXPONENT
* <tr> <td>CKA_EXPONENT_1
* <tr> <td>CKA_EXPONENT_2
* <tr> <td>CKA_COEFFICIENT
* </table>
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_createobject] */
CK_DECLARE_FUNCTION( CK_RV, C_CreateObject )( CK_SESSION_HANDLE hSession,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR phObject )
{
CK_OBJECT_CLASS xClass = 0;
const P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
if( ( NULL == pTemplate ) ||
( NULL == phObject ) )
{
LogError( ( "Failed to create object. Received a NULL template or "
"object pointer." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( xResult == CKR_OK )
{
xResult = prvGetObjectClass( pTemplate, ulCount, &xClass );
}
if( xResult == CKR_OK )
{
LogInfo( ( "Creating a 0x%0lX type object.", ( unsigned long int ) xClass ) );
switch( xClass )
{
case CKO_CERTIFICATE:
xResult = prvCreateCertificate( pTemplate, ulCount, phObject );
break;
case CKO_PRIVATE_KEY:
xResult = prvCreatePrivateKey( pTemplate, ulCount, phObject );
break;
case CKO_PUBLIC_KEY:
xResult = prvCreatePublicKey( pTemplate, ulCount, phObject );
break;
default:
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
break;
}
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_createobject] */
/**
* @brief Destroys an object.
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] hObject Handle of the object to be destroyed.
*
* @warning In this implementation, if either the device public key or the device
* private key (labels pkcs11configLABEL_DEVICE_PUBLIC_KEY_FOR_TLS and
* pkcs11configLABEL_DEVICE_PRIVATE_KEY_FOR_TLS) are deleted, both keys will
* be destroyed.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_destroyobject] */
CK_DECLARE_FUNCTION( CK_RV, C_DestroyObject )( CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE hObject )
{
const P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
CK_BYTE_PTR pcLabel = NULL;
CK_ULONG xLabelLength = 0;
prvFindObjectInListByHandle( hObject, &xPalHandle, &pcLabel, &xLabelLength );
if( xPalHandle == CK_INVALID_HANDLE )
{
xResult = CKR_OBJECT_HANDLE_INVALID;
}
if( xResult == CKR_OK )
{
xResult = PKCS11_PAL_DestroyObject( xPalHandle );
if( xResult == CKR_OK )
{
xResult = prvDeleteObjectFromList( xPalHandle );
}
else
{
LogError( ( "Failed to destroy object. PKCS11_PAL_DestroyObject failed." ) );
}
}
else
{
LogError( ( "Failed to destroy object. The session was invalid." ) );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_destroyobject] */
/**
* @brief Obtains an attribute value of an object.
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] hObject PKCS #11 object handle to be queried.
* @param[in,out] pTemplate Attribute template.
* pxTemplate.pValue should be set to the attribute
* to be queried. pxTemplate.ulValueLen should be
* set to the length of the buffer allocated at
* pxTemplate.pValue, and will be updated to contain
* the actual length of the data copied.
* pxTemplate.pValue should be set to point to
* a buffer to receive the attribute value data.
* If parameter length is unknown,
* pxTemplate.pValue may be set to NULL, and
* this function will set the required buffer length
* in pxTemplate.ulValueLen.
* @param[in] ulCount The number of attributes in the template.
*
* <table>
* <tr><th> Object Type <th> Queryable Attributes
* <tr><td rowspan="2">Certificate<td>CKA_CLASS
* <tr> <td>CKA_VALUE
* <tr><td rowspan="3">EC Private Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* <tr> <td>CKA_EC_PARAMS
* <tr><td rowspan="4">EC Public Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* <tr> <td>CKA_EC_PARAMS
* <tr> <td>CKA_EC_POINT
* <tr><td rowspan="2">RSA Private Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* <tr><td rowspan="2">RSA Public Key<td>CKA_CLASS
* <tr> <td>CKA_KEY_TYPE
* </table>
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_getattributevalue] */
CK_DECLARE_FUNCTION( CK_RV, C_GetAttributeValue )( CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE hObject,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xIsPrivate = ( CK_BBOOL ) CK_TRUE;
CK_ULONG iAttrib;
mbedtls_pk_context xKeyContext = { 0 };
mbedtls_x509_crt xMbedX509Context = { 0 };
mbedtls_pk_type_t xKeyType;
const mbedtls_ecp_keypair * pxKeyPair;
CK_KEY_TYPE xPkcsKeyType = ( CK_KEY_TYPE ) ~0UL;
CK_OBJECT_CLASS xClass = ~0UL;
CK_BYTE_PTR pxObjectValue = NULL;
CK_ULONG ulLength = 0;
const CK_BYTE ucP256Oid[] = pkcs11DER_ENCODED_OID_P256;
int32_t lMbedTLSResult = 0;
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
CK_ULONG xSize = 0;
size_t xMbedSize = 0;
CK_BYTE_PTR pcLabel = NULL;
const P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
if( ( CKR_OK == xResult ) && ( ( ( NULL == pTemplate ) ) || ( 0UL == ulCount ) ) )
{
LogError( ( "Failed to get attribute. The template was a NULL pointer "
"or the attribute count was 0." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( ( CKR_OK == xResult ) && ( CK_INVALID_HANDLE == hObject ) )
{
LogError( ( "Failed to get attribute. The object handle was invalid." ) );
xResult = CKR_OBJECT_HANDLE_INVALID;
}
if( xResult == CKR_OK )
{
/*
* Copy the object into a buffer.
*/
prvFindObjectInListByHandle( hObject, &xPalHandle, &pcLabel, &xSize ); /*pcLabel and xSize are ignored. */
if( xPalHandle != CK_INVALID_HANDLE )
{
xResult = PKCS11_PAL_GetObjectValue( xPalHandle, &pxObjectValue, &ulLength, &xIsPrivate );
}
else
{
LogError( ( "Failed to get attribute. Could not find a valid "
"PKCS #11 PAL handle." ) );
xResult = CKR_OBJECT_HANDLE_INVALID;
}
}
/* Determine what kind of object we are dealing with. */
if( xResult == CKR_OK )
{
/* Initialize mbed TLS key context. */
mbedtls_pk_init( &xKeyContext );
/* Initialize mbed TLS x509 context. */
mbedtls_x509_crt_init( &xMbedX509Context );
if( 0 == mbedtls_pk_parse_key( &xKeyContext, pxObjectValue, ulLength, NULL, 0 ) )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
LogDebug( ( "Object was a private key." ) );
xClass = CKO_PRIVATE_KEY;
}
else
{
LogDebug( ( "Object was a public key." ) );
xClass = CKO_PUBLIC_KEY;
}
}
else if( 0 == mbedtls_pk_parse_public_key( &xKeyContext, pxObjectValue, ulLength ) )
{
LogDebug( ( "Object was a public key." ) );
xClass = CKO_PUBLIC_KEY;
}
else if( 0 == mbedtls_x509_crt_parse( &xMbedX509Context, pxObjectValue, ulLength ) )
{
LogDebug( ( "Object was a certificate." ) );
xClass = CKO_CERTIFICATE;
}
else
{
LogDebug( ( "Object handle was invalid." ) );
xResult = CKR_OBJECT_HANDLE_INVALID;
}
}
if( xResult == CKR_OK )
{
for( iAttrib = 0; iAttrib < ulCount; iAttrib++ )
{
if( xResult != CKR_OK )
{
break;
}
switch( pTemplate[ iAttrib ].type )
{
case CKA_CLASS:
if( pTemplate[ iAttrib ].pValue == NULL )
{
pTemplate[ iAttrib ].ulValueLen = sizeof( CK_OBJECT_CLASS );
}
else
{
if( pTemplate[ iAttrib ].ulValueLen == sizeof( CK_OBJECT_CLASS ) )
{
( void ) memcpy( pTemplate[ iAttrib ].pValue, &xClass, sizeof( CK_OBJECT_CLASS ) );
}
else
{
LogError( ( "Failed to parse attribute template. "
"Received a buffer smaller than CK_OBJECT_CLASS." ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
}
break;
case CKA_PUBLIC_KEY_INFO:
case CKA_VALUE:
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate == ( CK_BBOOL ) CK_TRUE )
{
pTemplate[ iAttrib ].ulValueLen = CK_UNAVAILABLE_INFORMATION;
LogError( ( "Failed to parse attribute. This data is "
"sensitive and the value will not be returned." ) );
xResult = CKR_ATTRIBUTE_SENSITIVE;
}
else
{
if( pTemplate[ iAttrib ].pValue == NULL )
{
pTemplate[ iAttrib ].ulValueLen = ulLength;
}
else if( pTemplate[ iAttrib ].ulValueLen < ulLength )
{
LogError( ( "Failed to parse attribute. Buffer was "
"too small to contain data. Expected %lu "
"but got %lu.",
( unsigned long int ) ulLength,
( unsigned long int ) pTemplate[ iAttrib ].ulValueLen ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
else
{
( void ) memcpy( pTemplate[ iAttrib ].pValue, pxObjectValue, ulLength );
}
}
break;
case CKA_KEY_TYPE:
if( pTemplate[ iAttrib ].pValue == NULL )
{
pTemplate[ iAttrib ].ulValueLen = sizeof( CK_KEY_TYPE );
}
else if( pTemplate[ iAttrib ].ulValueLen < sizeof( CK_KEY_TYPE ) )
{
LogError( ( "Failed to parse attribute. Expected buffer "
"of size CK_KEY_TYPE." ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
else
{
xKeyType = mbedtls_pk_get_type( &xKeyContext );
switch( xKeyType )
{
case MBEDTLS_PK_RSA:
case MBEDTLS_PK_RSA_ALT:
case MBEDTLS_PK_RSASSA_PSS:
xPkcsKeyType = CKK_RSA;
break;
case MBEDTLS_PK_ECKEY:
case MBEDTLS_PK_ECKEY_DH:
xPkcsKeyType = CKK_EC;
break;
case MBEDTLS_PK_ECDSA:
xPkcsKeyType = CKK_ECDSA;
break;
default:
LogError( ( "Failed to parse attribute. "
"Could not parse key type." ) );
xResult = CKR_ATTRIBUTE_VALUE_INVALID;
break;
}
( void ) memcpy( pTemplate[ iAttrib ].pValue, &xPkcsKeyType, sizeof( CK_KEY_TYPE ) );
}
break;
case CKA_PRIVATE_EXPONENT:
LogError( ( "Failed to parse attribute. "
"CKA_PRIVATE_EXPONENT is private data." ) );
xResult = CKR_ATTRIBUTE_SENSITIVE;
break;
case CKA_EC_PARAMS:
if( pTemplate[ iAttrib ].pValue == NULL )
{
pTemplate[ iAttrib ].ulValueLen = sizeof( ucP256Oid );
}
else
{
if( pTemplate[ iAttrib ].ulValueLen < sizeof( ucP256Oid ) )
{
LogError( ( "Failed to parse attribute. "
"CKA_EC_PARAMS buffer too small." ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
else
{
( void ) memcpy( pTemplate[ iAttrib ].pValue, ucP256Oid, sizeof( ucP256Oid ) );
}
}
break;
case CKA_EC_POINT:
if( pTemplate[ iAttrib ].pValue == NULL )
{
pTemplate[ iAttrib ].ulValueLen = pkcs11EC_POINT_LENGTH;
}
else
{
if( pTemplate[ iAttrib ].ulValueLen == pkcs11EC_POINT_LENGTH )
{
pxKeyPair = ( mbedtls_ecp_keypair * ) xKeyContext.pk_ctx;
*( ( uint8_t * ) pTemplate[ iAttrib ].pValue ) = 0x04; /* Mark the point as uncompressed. */
/* Copy xSize value to avoid casting a CK_ULONG size pointer
* to a size_t sized pointer. */
xMbedSize = xSize;
lMbedTLSResult = mbedtls_ecp_tls_write_point( &pxKeyPair->grp,
&pxKeyPair->Q,
MBEDTLS_ECP_PF_UNCOMPRESSED,
&xMbedSize,
( uint8_t * ) pTemplate[ iAttrib ].pValue + 1,
pTemplate[ iAttrib ].ulValueLen - 1UL );
xSize = xMbedSize;
}
else
{
xResult = CKR_BUFFER_TOO_SMALL;
}
if( ( xResult == CKR_OK ) && ( lMbedTLSResult < 0 ) )
{
if( lMbedTLSResult == MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL )
{
LogError( ( "Failed to extract EC point. "
"CKA_EC_POINT buffer was too small." ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
else
{
LogError( ( "Failed to extract EC point. "
"mbedtls_ecp_tls_write_point failed: "
"mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
}
else
{
pTemplate[ iAttrib ].ulValueLen = xSize + 1UL;
}
}
break;
default:
LogError( ( "Failed to parse attribute. Received unknown "
"attribute type." ) );
xResult = CKR_ATTRIBUTE_TYPE_INVALID;
break;
}
}
/* Free the buffer where object was stored. */
PKCS11_PAL_GetObjectValueCleanup( pxObjectValue, ulLength );
/* Free the mbedTLS structure used to parse the key. */
mbedtls_pk_free( &xKeyContext );
/* Free the mbedTLS structure used to parse the certificate. */
mbedtls_x509_crt_free( &xMbedX509Context );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_getattributevalue] */
/**
* @brief Initializes an object search operation.
*
* \sa C_FindObjects() and C_FindObjectsFinal() which must be called
* after C_FindObjectsInit().
*
* \note FindObjects parameters are shared by a session. Calling
* C_FindObjectsInit(), C_FindObjects(), and C_FindObjectsFinal() with the
* same session across different tasks may lead to unexpected results.
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pTemplate Pointer to a template which specifies
* the object attributes to match.
* In this port, the only searchable attribute
* is object label. All other attributes will
* be ignored.
* @param[in] ulCount The number of attributes in pTemplate.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_findobjectsinit] */
CK_DECLARE_FUNCTION( CK_RV, C_FindObjectsInit )( CK_SESSION_HANDLE hSession,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
CK_BYTE * pxFindObjectLabel = NULL;
uint32_t ulIndex;
CK_ATTRIBUTE xAttribute;
if( NULL == pTemplate )
{
xResult = CKR_ARGUMENTS_BAD;
}
if( ( ulCount != 1UL ) && ( ulCount != 2UL ) )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Failed to initialize find object operation. Find objects "
"does not support searching by %lu attributes. Expected to "
"search with either 1 or 2 attributes.", ( unsigned long int ) ulCount ) );
}
if( xResult == CKR_OK )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( prvOperationActive( pxSession ) == ( CK_BBOOL ) CK_TRUE )
{
xResult = CKR_OPERATION_ACTIVE;
LogError( ( "Failed to initialize find object operation. Find "
"object operation was already in progress." ) );
}
}
/* Search template for label.
* NOTE: This port only supports looking up objects by CKA_LABEL and all
* other search attributes are ignored. */
if( xResult == CKR_OK )
{
xResult = CKR_TEMPLATE_INCOMPLETE;
for( ulIndex = 0; ulIndex < ulCount; ulIndex++ )
{
xAttribute = pTemplate[ ulIndex ];
if( ( xAttribute.type == CKA_LABEL ) && ( xAttribute.ulValueLen <= pkcs11configMAX_LABEL_LENGTH ) )
{
/* Plus one to leave room for a NULL terminator. */
pxFindObjectLabel = mbedtls_calloc( 1, xAttribute.ulValueLen + 1UL );
if( pxFindObjectLabel != NULL )
{
pxSession->xFindObjectLabelLen = xAttribute.ulValueLen;
pxSession->pxFindObjectLabel = pxFindObjectLabel;
( void ) memcpy( pxSession->pxFindObjectLabel, xAttribute.pValue, xAttribute.ulValueLen );
xResult = CKR_OK;
}
else
{
LogError( ( "Failed to initialize find object operation. Failed to "
"allocate %lu bytes.", ( unsigned long int ) xAttribute.ulValueLen + 1UL ) );
xResult = CKR_HOST_MEMORY;
}
}
else
{
LogDebug( ( "Search parameters other than label are ignored." ) );
}
}
}
/* Clean up memory if there was an error parsing the template. */
if( ( pxSession != NULL ) && ( xResult != CKR_OK ) && ( xResult != CKR_OPERATION_ACTIVE ) )
{
mbedtls_free( pxFindObjectLabel );
pxSession->pxFindObjectLabel = NULL;
pxSession->xFindObjectLabelLen = 0;
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_findobjectsinit] */
/**
* @brief Initializes an object search operation.
*
* \sa C_FindObjectsInit() which must be called before calling C_FindObjects()
* and C_FindObjectsFinal(), which must be called after.
*
* \note FindObjects parameters are shared by a session. Calling
* C_FindObjectsInit(), C_FindObjects(), and C_FindObjectsFinal() with the
* same session across different tasks may lead to unexpected results.
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[out] phObject Points to the handle of the object to
* be found.
* @param[in] ulMaxObjectCount The size of the phObject object handle
* array. In this port, this value should
* always be set to 1, as searching for
* multiple objects is not supported.
* @param[out] pulObjectCount The actual number of objects that are
* found. In this port, if an object is found
* this value will be 1, otherwise if the
* object is not found, it will be set to 0.
*
* \note In the event that an object does not exist, CKR_OK will be returned, but
* pulObjectCount will be set to 0.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_findobjects] */
CK_DECLARE_FUNCTION( CK_RV, C_FindObjects )( CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE_PTR phObject,
CK_ULONG ulMaxObjectCount,
CK_ULONG_PTR pulObjectCount )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
/*
* Check parameters.
*/
if( ( NULL == phObject ) ||
( NULL == pulObjectCount ) )
{
LogError( ( "Failed to find objects. The object handle or the object "
"count pointer was NULL." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( xResult == CKR_OK )
{
if( pxSession->pxFindObjectLabel == NULL )
{
LogError( ( "Failed to find objects. The PKCS #11 stack must be "
"initialized before any operations." ) );
xResult = CKR_OPERATION_NOT_INITIALIZED;
}
if( 1u != ulMaxObjectCount )
{
xResult = CKR_ARGUMENTS_BAD;
LogError( ( "Failed to find objects. Searching for anything other "
"than 1 object at a time is not supported." ) );
}
}
if( xResult == CKR_OK )
{
/* Try to find the object in module's list first. */
prvFindObjectInListByLabel( pxSession->pxFindObjectLabel, pxSession->xFindObjectLabelLen, &xPalHandle, phObject );
/* Check with the PAL if the object was previously stored. */
if( *phObject == CK_INVALID_HANDLE )
{
LogDebug( ( "Could not find the object handle in the list. "
"Trying to search PKCS #11 PAL for object." ) );
xPalHandle = PKCS11_PAL_FindObject( pxSession->pxFindObjectLabel, pxSession->xFindObjectLabelLen );
}
if( xPalHandle != CK_INVALID_HANDLE )
{
LogDebug( ( "Found object in PAL. Adding object handle to list." ) );
xResult = prvAddObjectToList( xPalHandle, phObject, pxSession->pxFindObjectLabel, pxSession->xFindObjectLabelLen );
*pulObjectCount = 1;
}
else
{
/* Note: Objects living in header files are not destroyed. */
/* According to the PKCS #11 standard, not finding an object results in a CKR_OK return value with an object count of 0. */
*pulObjectCount = 0;
}
}
/* Clean up memory if there was an error finding the object. */
if( xResult != CKR_OK )
{
if( pxSession != NULL )
{
mbedtls_free( pxSession->pxFindObjectLabel );
pxSession->pxFindObjectLabel = NULL;
pxSession->xFindObjectLabelLen = 0;
}
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_findobjects] */
/**
* @brief Finishes an object search operation.
*
* \sa C_FindObjectsInit(), C_FindObjects() which must be called before
* calling C_FindObjectsFinal().
*
* \note FindObjects parameters are shared by a session. Calling
* C_FindObjectsInit(), C_FindObjects(), and C_FindObjectsFinal() with the
* same session across different tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_findobjectsfinal] */
CK_DECLARE_FUNCTION( CK_RV, C_FindObjectsFinal )( CK_SESSION_HANDLE hSession )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
/*
* Check parameters.
*/
if( xResult == CKR_OK )
{
if( pxSession->pxFindObjectLabel == NULL )
{
LogError( ( "Failed to end find objects operation. Find operation "
"must be initialized." ) );
xResult = CKR_OPERATION_NOT_INITIALIZED;
}
}
if( xResult == CKR_OK )
{
/*
* Clean-up find objects state.
*/
mbedtls_free( pxSession->pxFindObjectLabel );
pxSession->pxFindObjectLabel = NULL;
pxSession->xFindObjectLabelLen = 0;
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_findobjectsfinal] */
/**
* @brief Initializes a message-digesting operation.
*
* \sa C_DigestUpdate(), C_DigestFinal()
*
* \note Digest parameters are shared by a session. Calling
* C_DigestInit(), C_DigestUpdate(), and C_DigestFinal() with the
* same session across different tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pMechanism Digesting mechanism. This port only supports
* the mechanism CKM_SHA256.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_digestinit] */
CK_DECLARE_FUNCTION( CK_RV, C_DigestInit )( CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
int32_t lMbedTLSResult = 0;
if( pMechanism == NULL )
{
LogError( ( "Failed to initialize digest operation. Mechanism pointer "
"was NULL." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( xResult == CKR_OK )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( prvOperationActive( pxSession ) == ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed to initialize digest operation. An operation "
"was already active." ) );
xResult = CKR_OPERATION_ACTIVE;
}
}
if( xResult == CKR_OK )
{
if( pMechanism->mechanism != CKM_SHA256 )
{
LogError( ( "Failed to initialize digest operation. Currently only "
"the CKM_SHA256 mechanism is supported." ) );
xResult = CKR_MECHANISM_INVALID;
}
}
/*
* Initialize the requested hash type
*/
if( xResult == CKR_OK )
{
mbedtls_sha256_init( &pxSession->xSHA256Context );
lMbedTLSResult = mbedtls_sha256_starts_ret( &pxSession->xSHA256Context, 0 );
if( 0 != lMbedTLSResult )
{
LogError( ( "Failed to initialize digest operation. "
"mbedtls_sha256_starts_ret failed with: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
else
{
pxSession->xOperationDigestMechanism = pMechanism->mechanism;
}
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_digestinit] */
/**
* @brief Continues a multiple-part digesting operation.
*
* \sa C_DigestInit(), C_DigestFinal()
*
* \note Digest parameters are shared by a session. Calling
* C_DigestInit(), C_DigestUpdate(), and C_DigestFinal() with the
* same session across different tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pPart Pointer to the data to be added to the digest.
* @param[in] ulPartLen Length of the data located at pPart.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_digestupdate] */
CK_DECLARE_FUNCTION( CK_RV, C_DigestUpdate )( CK_SESSION_HANDLE hSession,
CK_BYTE_PTR pPart,
CK_ULONG ulPartLen )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
int32_t lMbedTLSResult = 0;
if( pPart == NULL )
{
LogError( ( "Failed to start digest operation. Received a NULL pointer "
"in digest request." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( xResult == CKR_OK )
{
if( pxSession->xOperationDigestMechanism != CKM_SHA256 )
{
LogError( ( "Failed to start digest operation. CKM_SHA256 is the "
"expected digest mechanism." ) );
xResult = CKR_OPERATION_NOT_INITIALIZED;
}
}
if( xResult == CKR_OK )
{
lMbedTLSResult = mbedtls_sha256_update_ret( &pxSession->xSHA256Context, pPart, ulPartLen );
if( 0 != lMbedTLSResult )
{
LogError( ( "Failed to perform digest operation. "
"mbedtls_sha256_update_ret failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
pxSession->xOperationDigestMechanism = pkcs11NO_OPERATION;
xResult = CKR_FUNCTION_FAILED;
}
}
if( ( xResult != CKR_OK ) && ( xResult != CKR_SESSION_HANDLE_INVALID ) &&
( xResult != CKR_OPERATION_NOT_INITIALIZED ) )
{
LogDebug( ( "Tearing down operation due to errors." ) );
pxSession->xOperationDigestMechanism = pkcs11NO_OPERATION;
mbedtls_sha256_free( &pxSession->xSHA256Context );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_digestupdate] */
/**
* @brief Finishes a multiple-part digesting operation.
*
* \sa C_DigestInit(), C_DigestUpdate()
*
* \note Digest parameters are shared by a session. Calling
* C_DigestInit(), C_DigestUpdate(), and C_DigestFinal() with the
* same session across different tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[out] pDigest Pointer to the location that receives
* the message digest. Memory must be allocated
* by the caller. Caller is responsible for allocating memory.
* Providing NULL for this input will cause
* pulDigestLen to be updated for length of
* buffer required.
* @param[in,out] pulDigestLen Points to the location that holds the length
* of the message digest. If pDigest is NULL,
* this value is updated to contain the length
* of the buffer needed to hold the digest. Else
* it is updated to contain the actual length of
* the digest placed in pDigest.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_digestfinal] */
CK_DECLARE_FUNCTION( CK_RV, C_DigestFinal )( CK_SESSION_HANDLE hSession,
CK_BYTE_PTR pDigest,
CK_ULONG_PTR pulDigestLen )
{
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
int32_t lMbedTLSResult = 0;
if( pulDigestLen == NULL )
{
LogError( ( "Failed to finish digest operation. Digest Length pointer "
"was NULL." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( xResult == CKR_OK )
{
if( pxSession->xOperationDigestMechanism != CKM_SHA256 )
{
LogError( ( "Failed to finish digest operation. Digest operation "
"was not initialized." ) );
xResult = CKR_OPERATION_NOT_INITIALIZED;
pxSession->xOperationDigestMechanism = pkcs11NO_OPERATION;
}
}
if( xResult == CKR_OK )
{
if( pDigest == NULL )
{
/* Supply the required buffer size. */
*pulDigestLen = ( CK_ULONG ) pkcs11SHA256_DIGEST_LENGTH;
}
else
{
if( *pulDigestLen == ( CK_ULONG ) pkcs11SHA256_DIGEST_LENGTH )
{
lMbedTLSResult = mbedtls_sha256_finish_ret( &pxSession->xSHA256Context, pDigest );
if( 0 != lMbedTLSResult )
{
LogError( ( "Failed to finish digest operation. "
"mbedtls_sha256_finish_ret failed: mbed TLS "
"error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
pxSession->xOperationDigestMechanism = pkcs11NO_OPERATION;
}
else
{
LogError( ( "Failed to finish digest operation. Received a "
"buffer that was an unexpected size. Expected %lu and "
"received %lu.",
( unsigned long int ) pkcs11SHA256_DIGEST_LENGTH,
( unsigned long int ) *pulDigestLen ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
}
}
if( ( xResult != CKR_OK ) && ( xResult != CKR_BUFFER_TOO_SMALL ) &&
( xResult != CKR_SESSION_HANDLE_INVALID ) &&
( xResult != CKR_OPERATION_NOT_INITIALIZED ) )
{
LogDebug( ( "Error occurred, tearing down digest operation." ) );
pxSession->xOperationDigestMechanism = pkcs11NO_OPERATION;
mbedtls_sha256_free( &pxSession->xSHA256Context );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_digestfinal] */
/**
* @brief Initializes a signature operation.
*
* \sa C_Sign() completes signatures initiated by C_SignInit().
*
* \note C_Sign() parameters are shared by a session. Calling
* C_SignInit() & C_Sign() with the same session across different
* tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pMechanism Mechanism used to sign.
* This port supports the following mechanisms:
* - CKM_RSA_PKCS for RSA signatures
* - CKM_ECDSA for elliptic curve signatures
* Note that neither of these mechanisms perform
* hash operations.
* @param[in] hKey The handle of the private key to be used for
* signature. Key must be compatible with the
* mechanism chosen by pMechanism.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_signinit] */
CK_DECLARE_FUNCTION( CK_RV, C_SignInit )( CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_OBJECT_HANDLE hKey )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xIsPrivate = ( CK_BBOOL ) CK_TRUE;
CK_OBJECT_HANDLE xPalHandle;
CK_BYTE_PTR pxLabel = NULL;
CK_ULONG xLabelLength = 0;
mbedtls_pk_type_t xKeyType;
P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
CK_BYTE_PTR pulKeyData = NULL;
CK_ULONG ulKeyDataLength = 0;
int32_t lMbedTLSResult = 0;
if( NULL == pMechanism )
{
LogError( ( "Failed to initialize sign operation. NULL pointer to "
"signing mechanism provided." ) );
xResult = CKR_ARGUMENTS_BAD;
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( ( xResult == CKR_OK ) && ( prvOperationActive( pxSession ) == ( CK_BBOOL ) CK_TRUE ) )
{
LogError( ( "Failed to initialize sign operation. Operation already active." ) );
xResult = CKR_OPERATION_ACTIVE;
}
/* Retrieve key value from storage. */
if( xResult == CKR_OK )
{
prvFindObjectInListByHandle( hKey,
&xPalHandle,
&pxLabel,
&xLabelLength );
if( xPalHandle != CK_INVALID_HANDLE )
{
xResult = PKCS11_PAL_GetObjectValue( xPalHandle, &pulKeyData, &ulKeyDataLength, &xIsPrivate );
if( xResult != CKR_OK )
{
LogError( ( "Failed to initialize sign operation. Unable to "
"retrieve value of private key for signing 0x%0lX.", ( unsigned long int ) xResult ) );
xResult = CKR_KEY_HANDLE_INVALID;
}
}
else
{
LogDebug( ( "Could not find PKCS #11 PAL Handle." ) );
xResult = CKR_KEY_HANDLE_INVALID;
}
}
/* Check that a private key was retrieved. */
if( xResult == CKR_OK )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate != ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed to initialize sign operation. Sign operation "
"attempted with public key." ) );
xResult = CKR_KEY_TYPE_INCONSISTENT;
}
}
/* Convert the private key from storage format to mbedTLS usable format. */
if( xResult == CKR_OK )
{
/* Grab the sign mutex. This ensures that no signing operation
* is underway on another thread where modification of key would lead to hard fault.*/
if( 0 == mbedtls_mutex_lock( &pxSession->xSignMutex ) )
{
if( ( pxSession->xSignKeyHandle == CK_INVALID_HANDLE ) || ( pxSession->xSignKeyHandle != hKey ) )
{
pxSession->xSignKeyHandle = CK_INVALID_HANDLE;
mbedtls_pk_free( &pxSession->xSignKey );
mbedtls_pk_init( &pxSession->xSignKey );
lMbedTLSResult = mbedtls_pk_parse_key( &pxSession->xSignKey, pulKeyData, ulKeyDataLength, NULL, 0 );
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed to initialize sign operation. "
"mbedtls_pk_parse_key failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_KEY_HANDLE_INVALID;
}
else
{
pxSession->xSignKeyHandle = hKey;
}
}
( void ) mbedtls_mutex_unlock( &pxSession->xSignMutex );
/* Key has been parsed into mbedTLS pk structure.
* Free the memory allocated to copy the key out of flash. */
PKCS11_PAL_GetObjectValueCleanup( pulKeyData, ulKeyDataLength );
}
else
{
LogError( ( "Failed to initialize sign operation. Could not "
"take xSignMutex." ) );
xResult = CKR_CANT_LOCK;
}
}
/* Check that the mechanism and key type are compatible, supported. */
if( xResult == CKR_OK )
{
xKeyType = mbedtls_pk_get_type( &pxSession->xSignKey );
if( pMechanism->mechanism == CKM_RSA_PKCS )
{
if( xKeyType != MBEDTLS_PK_RSA )
{
LogError( ( "Failed to initialize sign operation. Signing key "
"type (0x%0lX) does not match RSA mechanism.", ( unsigned long int ) xKeyType ) );
xResult = CKR_KEY_TYPE_INCONSISTENT;
}
}
else if( pMechanism->mechanism == CKM_ECDSA )
{
if( ( xKeyType != MBEDTLS_PK_ECDSA ) && ( xKeyType != MBEDTLS_PK_ECKEY ) )
{
LogError( ( "Failed to initialize sign operation. Signing key "
"type (0x%0lX) does not match ECDSA mechanism.", ( unsigned long int ) xKeyType ) );
xResult = CKR_KEY_TYPE_INCONSISTENT;
}
}
else
{
LogError( ( "Failed to initialize sign operation. Unsupported "
"mechanism type (0x%0lX).", ( unsigned long int ) pMechanism->mechanism ) );
xResult = CKR_MECHANISM_INVALID;
}
}
if( xResult == CKR_OK )
{
pxSession->xOperationSignMechanism = pMechanism->mechanism;
LogDebug( ( "Successfully started sign operation." ) );
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_signinit] */
/**
* @brief Signs single-part data.
*
* \sa C_SignInit() initiates signatures signature creation.
*
* \note C_Sign() parameters are shared by a session. Calling
* C_SignInit() & C_Sign() with the same session across different
* tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pData Data to be signed.
* Note: Some applications may require this data to
* be hashed before passing to C_Sign().
* @param[in] ulDataLen Length of pucData, in bytes.
* @param[out] pSignature Buffer where signature will be placed.
* Caller is responsible for allocating memory.
* Providing NULL for this input will cause
* pulSignatureLen to be updated for length of
* buffer required.
* @param[in,out] pulSignatureLen Length of pucSignature buffer.
* If pucSignature is non-NULL, pulSignatureLen is
* updated to contain the actual signature length.
* If pucSignature is NULL, pulSignatureLen is
* updated to the buffer length required for signature
* data.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_sign] */
CK_DECLARE_FUNCTION( CK_RV, C_Sign )( CK_SESSION_HANDLE hSession,
CK_BYTE_PTR pData,
CK_ULONG ulDataLen,
CK_BYTE_PTR pSignature,
CK_ULONG_PTR pulSignatureLen )
{
P11Session_t * pxSessionObj = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSessionObj );
CK_ULONG xSignatureLength = 0;
size_t xExpectedInputLength = 0;
CK_BYTE_PTR pxSignatureBuffer = pSignature;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xSignatureGenerated = ( CK_BBOOL ) CK_FALSE;
/* 8 bytes added to hold ASN.1 encoding information. */
uint8_t ecSignature[ pkcs11ECDSA_P256_SIGNATURE_LENGTH + 8 ];
int32_t lMbedTLSResult;
mbedtls_md_type_t xHashType = MBEDTLS_MD_NONE;
if( ( NULL == pulSignatureLen ) || ( NULL == pData ) )
{
LogError( ( "Failed sign operation. Received a NULL pointer." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( CKR_OK == xResult )
{
/* Update the signature length. */
if( pxSessionObj->xOperationSignMechanism == CKM_RSA_PKCS )
{
xSignatureLength = pkcs11RSA_2048_SIGNATURE_LENGTH;
xExpectedInputLength = pkcs11RSA_SIGNATURE_INPUT_LENGTH;
}
else if( pxSessionObj->xOperationSignMechanism == CKM_ECDSA )
{
xSignatureLength = pkcs11ECDSA_P256_SIGNATURE_LENGTH;
xExpectedInputLength = pkcs11SHA256_DIGEST_LENGTH;
pxSignatureBuffer = ecSignature;
xHashType = MBEDTLS_MD_SHA256;
}
else
{
LogError( ( "Failed sign operation. The sign operation was not "
"initialized with a call to C_SignInit." ) );
xResult = CKR_OPERATION_NOT_INITIALIZED;
}
}
if( xResult == CKR_OK )
{
/* Calling application is trying to determine length needed for signature buffer. */
if( NULL != pSignature )
{
/* Check that the signature buffer is long enough. */
if( *pulSignatureLen < xSignatureLength )
{
LogError( ( "Failed sign operation. The signature buffer was "
"too small. Expected: %lu bytes and received %lu "
"bytes.",
( unsigned long int ) xSignatureLength,
( unsigned long int ) *pulSignatureLen ) );
xResult = CKR_BUFFER_TOO_SMALL;
}
/* Check that input data to be signed is the expected length. */
if( CKR_OK == xResult )
{
if( xExpectedInputLength != ulDataLen )
{
LogError( ( "Failed sign operation. The data buffer was "
"too small. Expected: %lu bytes and received "
"%lu bytes.",
( unsigned long int ) xExpectedInputLength,
( unsigned long int ) ulDataLen ) );
xResult = CKR_DATA_LEN_RANGE;
}
}
/* Sign the data.*/
if( CKR_OK == xResult )
{
if( 0 == mbedtls_mutex_lock( &pxSessionObj->xSignMutex ) )
{
/* Per mbed TLS documentation, if using RSA, md_alg should
* be MBEDTLS_MD_NONE. If ECDSA, md_alg should never be
* MBEDTLS_MD_NONE. SHA-256 will be used for ECDSA for
* consistency with the rest of the port.
*/
lMbedTLSResult = mbedtls_pk_sign( &pxSessionObj->xSignKey,
xHashType,
pData,
ulDataLen,
pxSignatureBuffer,
&xExpectedInputLength,
mbedtls_ctr_drbg_random,
&xP11Context.xMbedDrbgCtx );
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed sign operation. mbedtls_pk_sign "
"failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
( void ) mbedtls_mutex_unlock( &pxSessionObj->xSignMutex );
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
xSignatureGenerated = ( CK_BBOOL ) CK_TRUE;
}
else
{
LogError( ( "Failed sign operation. Could not take xSignMutex." ) );
xResult = CKR_CANT_LOCK;
}
}
}
}
if( xResult == CKR_OK )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( ( pxSessionObj->xOperationSignMechanism == CKM_ECDSA ) && ( xSignatureGenerated == ( CK_BBOOL ) CK_TRUE ) )
{
/* If this an EC signature, reformat from ASN.1 encoded to 64-byte R & S components */
lMbedTLSResult = PKI_mbedTLSSignatureToPkcs11Signature( pSignature, ecSignature );
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed sign operation. Failed to convert from "
"ASN.1 encoding to 64 byte R & S components." ) );
xResult = CKR_FUNCTION_FAILED;
}
}
}
if( ( xResult == CKR_OK ) || ( xResult == CKR_BUFFER_TOO_SMALL ) )
{
*pulSignatureLen = xSignatureLength;
}
/* Complete the operation in the context. */
if( ( xResult != CKR_BUFFER_TOO_SMALL ) && ( xResult != CKR_SESSION_HANDLE_INVALID ) )
{
LogDebug( ( "Ended Sign operation." ) );
pxSessionObj->xOperationSignMechanism = pkcs11NO_OPERATION;
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_sign] */
/**
* @brief Initializes a verification operation.
*
* \sa C_Verify() completes verifications initiated by C_VerifyInit().
*
* \note C_Verify() parameters are shared by a session. Calling
* C_VerifyInit() & C_Verify() with the same session across different
* tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pMechanism Mechanism used to verify signature.
* This port supports the following mechanisms:
* - CKM_RSA_X_509 for RSA verifications
* - CKM_ECDSA for elliptic curve verifications
* @param[in] hKey The handle of the public key to be used for
* verification. Key must be compatible with the
* mechanism chosen by pxMechanism.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_verifyinit] */
CK_DECLARE_FUNCTION( CK_RV, C_VerifyInit )( CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_OBJECT_HANDLE hKey )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xIsPrivate = ( CK_BBOOL ) CK_TRUE;
P11Session_t * pxSession;
CK_BYTE_PTR pucKeyData = NULL;
CK_ULONG ulKeyDataLength = 0;
mbedtls_pk_type_t xKeyType;
CK_OBJECT_HANDLE xPalHandle = CK_INVALID_HANDLE;
CK_BYTE_PTR pxLabel = NULL;
CK_ULONG xLabelLength = 0;
int32_t lMbedTLSResult = 0;
CK_RV xResult = CKR_OK;
pxSession = prvSessionPointerFromHandle( hSession );
xResult = prvCheckValidSessionAndModule( pxSession );
if( NULL == pMechanism )
{
LogError( ( "Failed to initialize verify operation. Null verification "
"mechanism provided." ) );
xResult = CKR_ARGUMENTS_BAD;
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( ( xResult == CKR_OK ) && ( prvOperationActive( pxSession ) == ( CK_BBOOL ) CK_TRUE ) )
{
LogError( ( "Failed to initialize verify operation. An operation was "
"already active." ) );
xResult = CKR_OPERATION_ACTIVE;
}
/* Retrieve key value from storage. */
if( xResult == CKR_OK )
{
prvFindObjectInListByHandle( hKey,
&xPalHandle,
&pxLabel,
&xLabelLength );
if( xPalHandle != CK_INVALID_HANDLE )
{
xResult = PKCS11_PAL_GetObjectValue( xPalHandle, &pucKeyData, &ulKeyDataLength, &xIsPrivate );
if( xResult != CKR_OK )
{
LogError( ( "Failed to initialize verify operation. Unable to "
"retrieve value of private key for signing 0x%0lX.",
( unsigned long int ) xResult ) );
xResult = CKR_KEY_HANDLE_INVALID;
}
}
else
{
LogError( ( "Failed to initialize verify operation. Couldn't find "
"a valid PKCS #11 PAL Handle." ) );
xResult = CKR_KEY_HANDLE_INVALID;
}
}
/* Check that a public key was retrieved. */
if( xResult == CKR_OK )
{
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xIsPrivate != ( CK_BBOOL ) CK_FALSE )
{
LogError( ( "Failed to initialize verify operation. Verify "
"operation attempted with private key." ) );
xResult = CKR_KEY_TYPE_INCONSISTENT;
}
}
if( xResult == CKR_OK )
{
if( 0 == mbedtls_mutex_lock( &pxSession->xVerifyMutex ) )
{
if( ( pxSession->xVerifyKeyHandle == CK_INVALID_HANDLE ) || ( pxSession->xVerifyKeyHandle != hKey ) )
{
pxSession->xVerifyKeyHandle = CK_INVALID_HANDLE;
mbedtls_pk_free( &pxSession->xVerifyKey );
mbedtls_pk_init( &pxSession->xVerifyKey );
lMbedTLSResult = mbedtls_pk_parse_public_key( &pxSession->xVerifyKey, pucKeyData, ulKeyDataLength );
if( 0 != lMbedTLSResult )
{
lMbedTLSResult = mbedtls_pk_parse_key( &pxSession->xVerifyKey, pucKeyData, ulKeyDataLength, NULL, 0 );
if( 0 != lMbedTLSResult )
{
LogError( ( "Failed to initialize verify operation. "
"mbedtls_pk_parse_key failed: mbed TLS "
"error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_KEY_HANDLE_INVALID;
}
else
{
LogDebug( ( "Found verify key handle." ) );
pxSession->xVerifyKeyHandle = hKey;
}
}
else
{
LogDebug( ( "Found verify key handle." ) );
pxSession->xVerifyKeyHandle = hKey;
}
}
( void ) mbedtls_mutex_unlock( &pxSession->xVerifyMutex );
PKCS11_PAL_GetObjectValueCleanup( pucKeyData, ulKeyDataLength );
}
else
{
LogError( ( "Failed to initialize verify operation. Could not "
"take xVerifyMutex." ) );
xResult = CKR_CANT_LOCK;
}
}
/* Check that the mechanism and key type are compatible, supported. */
if( xResult == CKR_OK )
{
xKeyType = mbedtls_pk_get_type( &pxSession->xVerifyKey );
if( pMechanism->mechanism == CKM_RSA_X_509 )
{
if( xKeyType != MBEDTLS_PK_RSA )
{
LogError( ( "Failed to initialize verify operation. "
"Verification key type (0x%0lX) does not match "
"RSA mechanism.",
( unsigned long int ) xKeyType ) );
xResult = CKR_KEY_TYPE_INCONSISTENT;
}
}
else if( pMechanism->mechanism == CKM_ECDSA )
{
if( ( xKeyType != MBEDTLS_PK_ECDSA ) && ( xKeyType != MBEDTLS_PK_ECKEY ) )
{
LogError( ( "Failed to initialize verify operation. "
"Verification key type (0x%0lX) does not match "
"ECDSA mechanism.",
( unsigned long int ) xKeyType ) );
xResult = CKR_KEY_TYPE_INCONSISTENT;
}
}
else
{
LogError( ( "Failed to initialize verify operation. Unsupported "
"mechanism type 0x%0lX",
( unsigned long int ) pMechanism->mechanism ) );
xResult = CKR_MECHANISM_INVALID;
}
}
if( xResult == CKR_OK )
{
LogDebug( ( "Verify mechanism set to 0x%0lX.", ( unsigned long int ) pMechanism->mechanism ) );
pxSession->xOperationVerifyMechanism = pMechanism->mechanism;
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_verifyinit] */
/**
* @brief Verifies a signature on single-part data.
*
* \note C_VerifyInit() must have been called previously.
*
* \note C_Verify() parameters are shared by a session. Calling
* C_VerifyInit() & C_Verify() with the same session across different
* tasks may lead to unexpected results.
*
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pData Data who's signature is to be verified.
* Note: In this implementation, this is generally
* expected to be the hash of the data.
* @param[in] ulDataLen Length of pucData.
* @param[in] pSignature The signature to be verified.
* @param[in] ulSignatureLen Length of pucSignature in bytes.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_verify] */
CK_DECLARE_FUNCTION( CK_RV, C_Verify )( CK_SESSION_HANDLE hSession,
CK_BYTE_PTR pData,
CK_ULONG ulDataLen,
CK_BYTE_PTR pSignature,
CK_ULONG ulSignatureLen )
{
P11Session_t * pxSessionObj;
int32_t lMbedTLSResult;
CK_RV xResult = CKR_OK;
pxSessionObj = prvSessionPointerFromHandle( hSession );
xResult = prvCheckValidSessionAndModule( pxSessionObj );
/* Check parameters. */
if( ( NULL == pData ) ||
( NULL == pSignature ) )
{
LogError( ( "Failed verify operation. Received a NULL pointer." ) );
xResult = CKR_ARGUMENTS_BAD;
}
/* Check that the signature and data are the expected length.
* These PKCS #11 mechanism expect data to be pre-hashed/formatted. */
if( xResult == CKR_OK )
{
if( pxSessionObj->xOperationVerifyMechanism == CKM_RSA_X_509 )
{
LogDebug( ( "CKM_RSA_X_509 verify mechanism." ) );
if( ulDataLen != pkcs11RSA_2048_SIGNATURE_LENGTH )
{
LogError( ( "Failed verify operation. Data Length was too "
"short for pkcs11RSA_2048_SIGNATURE_LENGTH." ) );
xResult = CKR_DATA_LEN_RANGE;
}
if( ulSignatureLen != pkcs11RSA_2048_SIGNATURE_LENGTH )
{
LogError( ( "Failed verify operation. Signature Length was too "
"short for pkcs11RSA_2048_SIGNATURE_LENGTH." ) );
xResult = CKR_SIGNATURE_LEN_RANGE;
}
}
else if( pxSessionObj->xOperationVerifyMechanism == CKM_ECDSA )
{
LogDebug( ( "CKM_ECDSA verify mechanism." ) );
if( ulDataLen != pkcs11SHA256_DIGEST_LENGTH )
{
LogError( ( "Failed verify operation. Data Length was too "
"short for pkcs11SHA256_DIGEST_LENGTH." ) );
xResult = CKR_DATA_LEN_RANGE;
}
if( ulSignatureLen != pkcs11ECDSA_P256_SIGNATURE_LENGTH )
{
LogError( ( "Failed verify operation. Data Length was too "
"short for pkcs11ECDSA_P256_SIGNATURE_LENGTH." ) );
xResult = CKR_SIGNATURE_LEN_RANGE;
}
}
else
{
LogError( ( "Failed verify operation. A C_Verify operation must be "
"initialized by a preceding call to C_VerifyInit. "
"This must happen before every call to C_Verify." ) );
xResult = CKR_OPERATION_NOT_INITIALIZED;
}
}
/* Verification step. */
if( xResult == CKR_OK )
{
/* Perform an RSA verification. */
if( pxSessionObj->xOperationVerifyMechanism == CKM_RSA_X_509 )
{
if( 0 == mbedtls_mutex_lock( &pxSessionObj->xVerifyMutex ) )
{
/* Verify the signature. If a public key is present, use it. */
if( NULL != pxSessionObj->xVerifyKey.pk_ctx )
{
lMbedTLSResult = mbedtls_pk_verify( &pxSessionObj->xVerifyKey,
MBEDTLS_MD_SHA256,
pData,
ulDataLen,
pSignature,
ulSignatureLen );
if( 0 != lMbedTLSResult )
{
LogError( ( "Failed verify operation. mbedtls_pk_verify "
"failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_SIGNATURE_INVALID;
}
}
( void ) mbedtls_mutex_unlock( &pxSessionObj->xVerifyMutex );
}
else
{
LogError( ( "Failed verify operation. Could not take verify mutex." ) );
xResult = CKR_CANT_LOCK;
}
}
/* Perform an ECDSA verification. */
else if( pxSessionObj->xOperationVerifyMechanism == CKM_ECDSA )
{
/* An ECDSA signature is comprised of 2 components - R & S. C_Sign returns them one after another. */
mbedtls_ecdsa_context * pxEcdsaContext;
mbedtls_mpi xR;
mbedtls_mpi xS;
mbedtls_mpi_init( &xR );
mbedtls_mpi_init( &xS );
lMbedTLSResult = mbedtls_mpi_read_binary( &xR, &pSignature[ 0 ], 32 );
if( lMbedTLSResult != 0 )
{
xResult = CKR_SIGNATURE_INVALID;
LogError( ( "Failed verify operation. Failed to parse R in EC "
"signature: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
}
else
{
lMbedTLSResult = mbedtls_mpi_read_binary( &xS, &pSignature[ 32 ], 32 );
if( lMbedTLSResult != 0 )
{
xResult = CKR_SIGNATURE_INVALID;
LogError( ( "Failed verify operation. Failed to parse S in "
"EC signature: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
}
}
if( xResult == CKR_OK )
{
if( 0 == mbedtls_mutex_lock( &pxSessionObj->xVerifyMutex ) )
{
/* Verify the signature. If a public key is present, use it. */
if( NULL != pxSessionObj->xVerifyKey.pk_ctx )
{
pxEcdsaContext = pxSessionObj->xVerifyKey.pk_ctx;
lMbedTLSResult = mbedtls_ecdsa_verify( &pxEcdsaContext->grp, pData, ulDataLen, &pxEcdsaContext->Q, &xR, &xS );
}
( void ) mbedtls_mutex_unlock( &pxSessionObj->xVerifyMutex );
if( lMbedTLSResult != 0 )
{
xResult = CKR_SIGNATURE_INVALID;
LogError( ( "Failed verify operation. "
"mbedtls_ecdsa_verify failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
}
}
else
{
LogError( ( "Failed verify operation. Could not take verify mutex." ) );
}
}
mbedtls_mpi_free( &xR );
mbedtls_mpi_free( &xS );
}
else
{
LogError( ( "Failed verify operation. Received an unexpected mechanism." ) );
}
}
if( xResult != CKR_SESSION_HANDLE_INVALID )
{
LogDebug( ( "Reset Verify mechanism to pkcs11NO_OPERATION." ) );
pxSessionObj->xOperationVerifyMechanism = pkcs11NO_OPERATION;
}
/* Return the signature verification result. */
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_verify] */
/**
* @brief Checks that the private key template provided for C_GenerateKeyPair
* contains all necessary attributes, and does not contain any invalid
* attributes.
*
* @param[out] ppxLabel Pointer to PKCS #11 label.
* @param[in] pxAttribute PKCS #11 attribute to search.
* @param[in] pulAttributeMap Flag to track whether all required attribute
* are in the key generation template.
* @return CKR_OK if successful.
*/
static CK_RV prvCheckGenerateKeyPairPrivateTemplate( CK_ATTRIBUTE ** ppxLabel,
CK_ATTRIBUTE * pxAttribute,
uint32_t * pulAttributeMap )
{
CK_RV xResult = CKR_OK;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_FALSE;
CK_ULONG xTemp = 0;
switch( pxAttribute->type )
{
case ( CKA_LABEL ):
*ppxLabel = pxAttribute;
*pulAttributeMap |= LABEL_IN_TEMPLATE;
break;
case ( CKA_KEY_TYPE ):
if( pxAttribute->ulValueLen == sizeof( CK_ULONG ) )
{
( void ) memcpy( &xTemp, pxAttribute->pValue, sizeof( CK_ULONG ) );
}
if( xTemp != CKK_EC )
{
LogError( ( "Failed parsing private key template. Only EC key "
"pair generation is supported." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
break;
case ( CKA_SIGN ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed parsing private key template. Generating "
"private keys that cannot sign is not supported." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
LogDebug( ( "CKA_SIGN was in template." ) );
*pulAttributeMap |= SIGN_IN_TEMPLATE;
break;
case ( CKA_PRIVATE ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed parsing private key template. Private must "
"be set to true in order to generate a private key." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
LogDebug( ( "CKA_PRIVATE was in template." ) );
*pulAttributeMap |= PRIVATE_IN_TEMPLATE;
break;
case ( CKA_TOKEN ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed parsing private key template. Generating "
"private keys that are false for attribute CKA_TOKEN "
"is not supported." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
break;
default:
LogError( ( "Failed parsing private key template. Found an unknown "
"attribute type." ) );
xResult = CKR_ATTRIBUTE_TYPE_INVALID;
break;
}
return xResult;
}
/**
* @brief Checks that the public key template provided for C_GenerateKeyPair
* contains all necessary attributes, and does not contain any invalid
* attributes.
*
* @param[out] ppxLabel Pointer to PKCS #11 label.
* @param[in] pxAttribute PKCS #11 attribute to search.
* @param[in] pulAttributeMap Flag to track whether all required attribute
* are in the key generation template.
*
* @return CKR_OK if successful.
*/
static CK_RV prvCheckGenerateKeyPairPublicTemplate( CK_ATTRIBUTE ** ppxLabel,
CK_ATTRIBUTE * pxAttribute,
uint32_t * pulAttributeMap )
{
CK_RV xResult = CKR_OK;
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
CK_BBOOL xBool = ( CK_BBOOL ) CK_TRUE;
CK_KEY_TYPE xKeyType = 0xFFFFFFFFUL;
const CK_BYTE pxEcParams[] = pkcs11DER_ENCODED_OID_P256;
const CK_BYTE * pxEcAttVal = NULL;
switch( pxAttribute->type )
{
case ( CKA_LABEL ):
*ppxLabel = pxAttribute;
*pulAttributeMap |= LABEL_IN_TEMPLATE;
break;
case ( CKA_KEY_TYPE ):
if( pxAttribute->ulValueLen == sizeof( CK_KEY_TYPE ) )
{
( void ) memcpy( &xKeyType, ( CK_KEY_TYPE * ) pxAttribute->pValue, sizeof( CK_KEY_TYPE ) );
}
if( xKeyType != CKK_EC )
{
LogError( ( "Failed parsing public key template. Only EC key "
"pair generation is supported." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
break;
case ( CKA_EC_PARAMS ):
pxEcAttVal = ( CK_BYTE * ) pxAttribute->pValue;
if( pxAttribute->ulValueLen == sizeof( pxEcParams ) )
{
if( memcmp( pxEcParams, pxEcAttVal, sizeof( pxEcParams ) ) != 0 )
{
LogError( ( "Failed parsing public key template. Only P-256 key "
"generation is supported." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
}
*pulAttributeMap |= EC_PARAMS_IN_TEMPLATE;
break;
case ( CKA_VERIFY ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
LogError( ( "Failed parsing public key template. Generating public "
"keys that have a value of CK_FALSE for attribute "
"CKA_VERIFY is not supported." ) );
xResult = CKR_TEMPLATE_INCONSISTENT;
}
*pulAttributeMap |= VERIFY_IN_TEMPLATE;
break;
case ( CKA_TOKEN ):
if( pxAttribute->ulValueLen == sizeof( CK_BBOOL ) )
{
( void ) memcpy( &xBool, pxAttribute->pValue, sizeof( CK_BBOOL ) );
}
/* See explanation in prvCheckValidSessionAndModule for this exception. */
/* coverity[misra_c_2012_rule_10_5_violation] */
if( xBool != ( CK_BBOOL ) CK_TRUE )
{
xResult = CKR_TEMPLATE_INCONSISTENT;
}
break;
default:
xResult = CKR_TEMPLATE_INCONSISTENT;
break;
}
return xResult;
}
/**
* @brief Generates a public-key/private-key pair.
*
* This port only supports generating elliptic curve P-256
* key pairs.
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[in] pMechanism Pointer to a mechanism. At this time,
* CKM_EC_KEY_PAIR_GEN is the only supported mechanism.
* @param[in] pPublicKeyTemplate Pointer to a list of attributes that the generated
* public key should possess.
* Public key template must have the following attributes:
* - CKA_LABEL
* - Label should be no longer than pkcs11configMAX_LABEL_LENGTH
* and must be supported by port's PKCS #11 PAL.
* - CKA_EC_PARAMS
* - Must equal pkcs11DER_ENCODED_OID_P256.
* Only P-256 keys are supported.
* - CKA_VERIFY
* - Must be set to true. Only public keys used
* for verification are supported.
* Public key templates may have the following attributes:
* - CKA_KEY_TYPE
* - Must be set to CKK_EC. Only elliptic curve key
* generation is supported.
* - CKA_TOKEN
* - Must be set to CK_TRUE.
* @param[in] ulPublicKeyAttributeCount Number of attributes in pPublicKeyTemplate.
* @param[in] pPrivateKeyTemplate Pointer to a list of attributes that the generated
* private key should possess.
* Private key template must have the following attributes:
* - CKA_LABEL
* - Label should be no longer than pkcs11configMAX_LABEL_LENGTH
* and must be supported by port's PKCS #11 PAL.
* - CKA_PRIVATE
* - Must be set to true.
* - CKA_SIGN
* - Must be set to true. Only private keys used
* for signing are supported.
* Private key template may have the following attributes:
* - CKA_KEY_TYPE
* - Must be set to CKK_EC. Only elliptic curve key
* generation is supported.
* - CKA_TOKEN
* - Must be set to CK_TRUE.
*
* @param[in] ulPrivateKeyAttributeCount Number of attributes in pPrivateKeyTemplate.
* @param[out] phPublicKey Pointer to the handle of the public key to be created.
* @param[out] phPrivateKey Pointer to the handle of the private key to be created.
*
* \note Not all attributes specified by the PKCS #11 standard are supported.
* \note CKA_LOCAL attribute is not supported.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_generatekeypair] */
CK_DECLARE_FUNCTION( CK_RV, C_GenerateKeyPair )( CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_ATTRIBUTE_PTR pPublicKeyTemplate,
CK_ULONG ulPublicKeyAttributeCount,
CK_ATTRIBUTE_PTR pPrivateKeyTemplate,
CK_ULONG ulPrivateKeyAttributeCount,
CK_OBJECT_HANDLE_PTR phPublicKey,
CK_OBJECT_HANDLE_PTR phPrivateKey )
{
uint8_t * pucDerFile = mbedtls_calloc( 1, pkcs11KEY_GEN_MAX_DER_SIZE );
int32_t lMbedTLSResult = 0;
uint32_t ulIndex = 0;
mbedtls_pk_context xCtx = { 0 };
CK_ATTRIBUTE_PTR pxPrivateLabel = NULL;
CK_ATTRIBUTE_PTR pxPublicLabel = NULL;
CK_OBJECT_HANDLE xPalPublic = CK_INVALID_HANDLE;
CK_OBJECT_HANDLE xPalPrivate = CK_INVALID_HANDLE;
uint32_t xPublicRequiredAttributeMap = ( LABEL_IN_TEMPLATE | EC_PARAMS_IN_TEMPLATE | VERIFY_IN_TEMPLATE );
uint32_t xPrivateRequiredAttributeMap = ( LABEL_IN_TEMPLATE | PRIVATE_IN_TEMPLATE | SIGN_IN_TEMPLATE );
uint32_t xAttributeMap = 0;
CK_RV xAddObjectListResult = CKR_OK;
const P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
CK_RV xResult = prvCheckValidSessionAndModule( pxSession );
#ifdef pkcs11configSUPPRESS_ECDSA_MECHANISM
if( xResult == CKR_OK )
{
LogDebug( ( "ECDSA Mechanism is suppressed on this port." ) );
xResult = CKR_MECHANISM_INVALID;
}
#endif
if( xResult == CKR_OK )
{
if( ( pPublicKeyTemplate == NULL ) ||
( pPrivateKeyTemplate == NULL ) ||
( phPublicKey == NULL ) ||
( phPrivateKey == NULL ) ||
( pMechanism == NULL ) )
{
LogError( ( "Failed generating a key pair. One of the arguments "
"was NULL." ) );
xResult = CKR_ARGUMENTS_BAD;
}
}
if( xResult == CKR_OK )
{
if( pucDerFile == NULL )
{
LogError( ( "Failed generating a key pair. Could not allocated a "
"buffer of size %u bytes.", ( unsigned int ) pkcs11KEY_GEN_MAX_DER_SIZE ) );
xResult = CKR_HOST_MEMORY;
}
}
if( xResult == CKR_OK )
{
if( CKM_EC_KEY_PAIR_GEN != pMechanism->mechanism )
{
LogError( ( "Failed generating a key pair. CKM_EC_KEY_PAIR_GEN is "
"the only valid key generation mechanism currently." ) );
xResult = CKR_MECHANISM_INVALID;
}
}
if( xResult == CKR_OK )
{
for( ulIndex = 0; ulIndex < ulPrivateKeyAttributeCount; ++ulIndex )
{
xResult = prvCheckGenerateKeyPairPrivateTemplate( &pxPrivateLabel,
&pPrivateKeyTemplate[ ulIndex ],
&xAttributeMap );
if( xResult != CKR_OK )
{
break;
}
}
if( ( xResult == CKR_OK ) && ( ( xAttributeMap & xPrivateRequiredAttributeMap ) != xPrivateRequiredAttributeMap ) )
{
LogError( ( "Failed generating a key pair. Attributes were missing "
"in the private key template." ) );
xResult = CKR_TEMPLATE_INCOMPLETE;
}
}
if( xResult == CKR_OK )
{
xAttributeMap = 0;
for( ulIndex = 0; ulIndex < ulPublicKeyAttributeCount; ++ulIndex )
{
xResult = prvCheckGenerateKeyPairPublicTemplate( &pxPublicLabel,
&pPublicKeyTemplate[ ulIndex ],
&xAttributeMap );
if( xResult != CKR_OK )
{
break;
}
}
if( ( xResult == CKR_OK ) && ( ( xAttributeMap & xPublicRequiredAttributeMap ) != xPublicRequiredAttributeMap ) )
{
LogError( ( "Failed generating a key pair. Attributes were missing "
"in the public key template." ) );
xResult = CKR_TEMPLATE_INCOMPLETE;
}
}
if( xResult == CKR_OK )
{
mbedtls_pk_init( &xCtx );
lMbedTLSResult = mbedtls_pk_setup( &xCtx, mbedtls_pk_info_from_type( MBEDTLS_PK_ECKEY ) );
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed generating a key pair. mbedtls_pk_setup failed: "
"mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
else
{
LogDebug( ( "mbedtls_pk_setup was successful." ) );
}
}
if( xResult == CKR_OK )
{
lMbedTLSResult = mbedtls_ecp_gen_key( MBEDTLS_ECP_DP_SECP256R1,
mbedtls_pk_ec( xCtx ),
mbedtls_ctr_drbg_random,
&xP11Context.xMbedDrbgCtx );
if( 0 != lMbedTLSResult )
{
LogError( ( "Failed generating a key pair. mbedtls_ecp_gen_key "
"failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
}
if( xResult == CKR_OK )
{
lMbedTLSResult = mbedtls_pk_write_pubkey_der( &xCtx, pucDerFile, pkcs11KEY_GEN_MAX_DER_SIZE );
if( lMbedTLSResult > 0 )
{
xPalPublic = PKCS11_PAL_SaveObject( pxPublicLabel, pucDerFile + pkcs11KEY_GEN_MAX_DER_SIZE - lMbedTLSResult, ( uint32_t ) lMbedTLSResult );
LogDebug( ( "PKCS11_PAL_SaveObject returned a %lu PAL handle value "
"for the public key.", ( unsigned long int ) xPalPublic ) );
}
else
{
LogError( ( "Failed generating a key pair. "
"mbedtls_pk_write_pubkey_der failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_GENERAL_ERROR;
}
}
if( xResult == CKR_OK )
{
lMbedTLSResult = mbedtls_pk_write_key_der( &xCtx, pucDerFile, pkcs11KEY_GEN_MAX_DER_SIZE );
if( lMbedTLSResult > 0 )
{
xPalPrivate = PKCS11_PAL_SaveObject( pxPrivateLabel, pucDerFile + pkcs11KEY_GEN_MAX_DER_SIZE - lMbedTLSResult, ( uint32_t ) lMbedTLSResult );
LogDebug( ( "PKCS11_PAL_SaveObject returned a %lu PAL handle value "
"for the private key.", ( unsigned long int ) xPalPrivate ) );
}
else
{
LogError( ( "Failed generating a key pair. mbedtls_pk_write_key_der "
"failed: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_GENERAL_ERROR;
}
}
if( ( xPalPublic != CK_INVALID_HANDLE ) && ( xPalPrivate != CK_INVALID_HANDLE ) )
{
xAddObjectListResult = prvAddObjectToList( xPalPrivate, phPrivateKey, pxPrivateLabel->pValue, pxPrivateLabel->ulValueLen );
if( xAddObjectListResult == CKR_OK )
{
xAddObjectListResult = prvAddObjectToList( xPalPublic, phPublicKey, pxPublicLabel->pValue, pxPublicLabel->ulValueLen );
}
if( xAddObjectListResult != CKR_OK )
{
LogError( ( "Could not add private key to object list failed with (0x%0lX). Cleaning up PAL objects.", xResult ) );
xResult = PKCS11_PAL_DestroyObject( xPalPrivate );
if( xResult != CKR_OK )
{
LogError( ( "Could not clean up private key. PKCS11_PAL_DestroyObject failed with (0x%0lX).", xResult ) );
}
xResult = prvDeleteObjectFromList( xPalPrivate );
if( xResult != CKR_OK )
{
LogError( ( "Could not remove private key object from internal list. Failed with (0x%0lX).", xResult ) );
}
xResult = PKCS11_PAL_DestroyObject( xPalPublic );
if( xResult != CKR_OK )
{
LogError( ( "Could not clean up public key. PKCS11_PAL_DestroyObject failed with (0x%0lX).", xResult ) );
}
xResult = prvDeleteObjectFromList( xPalPublic );
if( xResult != CKR_OK )
{
LogError( ( "Could not remove private key object from internal list. Failed with (0x%0lX).", xResult ) );
}
if( xResult == CKR_OK )
{
xResult = xAddObjectListResult;
}
}
}
/* Clean up. */
mbedtls_free( pucDerFile );
mbedtls_pk_free( &xCtx );
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_generatekeypair] */
/**
* @brief Generates random data.
*
* @param[in] hSession Handle of a valid PKCS #11 session.
* @param[out] RandomData Pointer to location that random data will be placed.
* It is the responsibility of the application to allocate
* this memory.
* @param[in] ulRandomLen Length of data (in bytes) to be generated.
*
* @return CKR_OK if successful.
*/
/* @[declare_pkcs11_mbedtls_c_generate_random] */
CK_DECLARE_FUNCTION( CK_RV, C_GenerateRandom )( CK_SESSION_HANDLE hSession,
CK_BYTE_PTR RandomData,
CK_ULONG ulRandomLen )
{
CK_RV xResult = CKR_OK;
int32_t lMbedTLSResult = 0;
const P11Session_t * pxSession = prvSessionPointerFromHandle( hSession );
xResult = prvCheckValidSessionAndModule( pxSession );
if( ( NULL == RandomData ) ||
( ulRandomLen == 0UL ) )
{
LogError( ( "Failed to generate random bytes. The buffer to store "
"random numbers in was NULL or the length of the buffer was 0." ) );
xResult = CKR_ARGUMENTS_BAD;
}
if( xResult == CKR_OK )
{
lMbedTLSResult = mbedtls_ctr_drbg_random( &xP11Context.xMbedDrbgCtx, RandomData, ulRandomLen );
if( lMbedTLSResult != 0 )
{
LogError( ( "Failed to generate random bytes. mbed TLS DRBG failed "
"to generate a random number: mbed TLS error = %s : %s.",
mbedtlsHighLevelCodeOrDefault( lMbedTLSResult ),
mbedtlsLowLevelCodeOrDefault( lMbedTLSResult ) ) );
xResult = CKR_FUNCTION_FAILED;
}
else
{
LogDebug( ( "Successfully generated %lu random bytes.", ( unsigned long int ) ulRandomLen ) );
}
}
return xResult;
}
/* @[declare_pkcs11_mbedtls_c_generate_random] */
|
14860.c | /****************************************************************************
* boards/arm/stm32f0l0g0/stm32f072-discovery/src/board_buttons.c
*
* Copyright (C) 2017 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <[email protected]>
* Alan Carvalho de Assis <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <errno.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include <arch/board/board.h>
#include "stm32f072-discovery.h"
#ifdef CONFIG_ARCH_BUTTONS
/****************************************************************************
* Private Data
****************************************************************************/
/* Pin configuration for each STM32F3Discovery button. This array is
* indexed by the BUTTON_* definitions in board.h
*/
static const uint32_t g_buttons[NUM_BUTTONS] =
{
GPIO_BTN_USER
};
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: board_button_initialize
*
* Description:
* board_button_initialize() must be called to initialize button resources.
* After that, board_buttons() may be called to collect the current state
* of all buttons or board_button_irq() may be called to register button
* interrupt handlers.
*
****************************************************************************/
uint32_t board_button_initialize(void)
{
int i;
/* Configure the GPIO pins as inputs. NOTE that EXTI interrupts are
* configured for all pins.
*/
for (i = 0; i < NUM_BUTTONS; i++)
{
stm32_configgpio(g_buttons[i]);
}
return NUM_BUTTONS;
}
/****************************************************************************
* Name: board_buttons
****************************************************************************/
uint8_t board_buttons(void)
{
uint8_t ret = 0;
int i;
/* Check that state of each key */
for (i = 0; i < NUM_BUTTONS; i++)
{
/* A LOW value means that the key is pressed. */
bool released = stm32_gpioread(g_buttons[i]);
/* Accumulate the set of depressed (not released) keys */
if (!released)
{
ret |= (1 << i);
}
}
return ret;
}
/****************************************************************************
* Button support.
*
* Description:
* board_button_initialize() must be called to initialize button resources.
* After that, board_buttons() may be called to collect the current state
* of all buttons or board_button_irq() may be called to register button
* interrupt handlers.
*
* After board_button_initialize() has been called, board_buttons() may be
* called to collect the state of all buttons. board_buttons() returns an
* 8-bit bit set with each bit associated with a button. See the
* BUTTON_*_BIT definitions in board.h for the meaning of each bit.
*
* board_button_irq() may be called to register an interrupt handler that
* will be called when a button is depressed or released. The ID value is a
* button enumeration value that uniquely identifies a button resource. See
* the BUTTON_* definitions in board.h for the meaning of enumeration
* value.
*
****************************************************************************/
#ifdef CONFIG_ARCH_IRQBUTTONS
int board_button_irq(int id, xcpt_t irqhandler, FAR void *arg)
{
int ret = -EINVAL;
/* The following should be atomic */
if (id >= MIN_IRQBUTTON && id <= MAX_IRQBUTTON)
{
ret = stm32_gpiosetevent(g_buttons[id], true, true, true,
irqhandler, arg);
}
return ret;
}
#endif
#endif /* CONFIG_ARCH_BUTTONS */
|
563810.c | #include <stdio.h>
#include <stdlib.h>
int main()
{
int n, i;
int *a;
printf("n = ");
scanf("%d", &n); // dimensiune vector
a = (int*) calloc(n, sizeof(int));
printf("Componente vector: \n");
for (i = 0; i < n; i++) {
scanf("%d", &a[i]);
}
for (i = 0; i < n; i++) {
printf("%d ", a[i]);
}
printf("\n");
free(a);
return 0;
} |
222197.c | /*-
* Copyright (C) 2012-2013 Michael Tuexen
* Copyright (C) 2012-2013 Irene Ruengeler
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: rtcweb.c,v 1.26 2012-07-17 13:50:02 tuexen Exp $
*/
/*
* gcc -Wall -std=c99 -pedantic -o rtcweb rtcweb.c -lusrsctp
*/
#include <sys/types.h>
#ifdef _WIN32
#define _CRT_SECURE_NO_WARNINGS
#include <winsock2.h>
#include <ws2tcpip.h>
#include <crtdbg.h>
#else
#include <sys/socket.h>
#include <sys/select.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <unistd.h>
#include <stdint.h>
#endif
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <usrsctp.h>
#include "programs_helper.h"
#define LINE_LENGTH (1024)
#define BUFFER_SIZE (1<<16)
#define NUMBER_OF_CHANNELS (100)
#define NUMBER_OF_STREAMS (100)
#define DATA_CHANNEL_PPID_CONTROL 50
#define DATA_CHANNEL_PPID_DOMSTRING 51
#define DATA_CHANNEL_PPID_BINARY 52
#define DATA_CHANNEL_CLOSED 0
#define DATA_CHANNEL_CONNECTING 1
#define DATA_CHANNEL_OPEN 2
#define DATA_CHANNEL_CLOSING 3
#define DATA_CHANNEL_FLAGS_SEND_REQ 0x00000001
#define DATA_CHANNEL_FLAGS_SEND_RSP 0x00000002
#define DATA_CHANNEL_FLAGS_SEND_ACK 0x00000004
struct channel {
uint32_t id;
uint32_t pr_value;
uint16_t pr_policy;
uint16_t i_stream;
uint16_t o_stream;
uint8_t unordered;
uint8_t state;
uint32_t flags;
};
struct peer_connection {
struct channel channels[NUMBER_OF_CHANNELS];
struct channel *i_stream_channel[NUMBER_OF_STREAMS];
struct channel *o_stream_channel[NUMBER_OF_STREAMS];
uint16_t o_stream_buffer[NUMBER_OF_STREAMS];
uint32_t o_stream_buffer_counter;
#ifdef _WIN32
CRITICAL_SECTION mutex;
#else
pthread_mutex_t mutex;
#endif
struct socket *sock;
} peer_connection;
#define DATA_CHANNEL_OPEN_REQUEST 0
#define DATA_CHANNEL_OPEN_RESPONSE 1
#define DATA_CHANNEL_ACK 2
#define DATA_CHANNEL_RELIABLE 0
#define DATA_CHANNEL_RELIABLE_STREAM 1
#define DATA_CHANNEL_UNRELIABLE 2
#define DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT 3
#define DATA_CHANNEL_PARTIAL_RELIABLE_TIMED 4
#define DATA_CHANNEL_FLAG_OUT_OF_ORDER_ALLOWED 0x0001
#ifndef _WIN32
#define SCTP_PACKED __attribute__((packed))
#else
#pragma pack (push, 1)
#define SCTP_PACKED
#endif
#if defined(_WIN32) && !defined(__MINGW32__)
#pragma warning( push )
#pragma warning( disable : 4200 )
#endif /* defined(_WIN32) && !defined(__MINGW32__) */
struct rtcweb_datachannel_open_request {
uint8_t msg_type; /* DATA_CHANNEL_OPEN_REQUEST */
uint8_t channel_type;
uint16_t flags;
uint16_t reliability_params;
int16_t priority;
char label[];
} SCTP_PACKED;
#if defined(_WIN32) && !defined(__MINGW32__)
#pragma warning( pop )
#endif /* defined(_WIN32) && !defined(__MINGW32__) */
struct rtcweb_datachannel_open_response {
uint8_t msg_type; /* DATA_CHANNEL_OPEN_RESPONSE */
uint8_t error;
uint16_t flags;
uint16_t reverse_stream;
} SCTP_PACKED;
struct rtcweb_datachannel_ack {
uint8_t msg_type; /* DATA_CHANNEL_ACK */
} SCTP_PACKED;
#ifdef _WIN32
#pragma pack(pop)
#endif
#undef SCTP_PACKED
static void
lock_peer_connection(struct peer_connection *);
static void
unlock_peer_connection(struct peer_connection *);
static void
init_peer_connection(struct peer_connection *pc)
{
uint32_t i;
struct channel *channel;
#ifdef _WIN32
InitializeCriticalSection(&(pc->mutex));
#else
pthread_mutex_init(&pc->mutex, NULL);
#endif
lock_peer_connection(pc);
for (i = 0; i < NUMBER_OF_CHANNELS; i++) {
channel = &(pc->channels[i]);
channel->id = i;
channel->state = DATA_CHANNEL_CLOSED;
channel->pr_policy = SCTP_PR_SCTP_NONE;
channel->pr_value = 0;
channel->i_stream = 0;
channel->o_stream = 0;
channel->unordered = 0;
channel->flags = 0;
}
for (i = 0; i < NUMBER_OF_STREAMS; i++) {
pc->i_stream_channel[i] = NULL;
pc->o_stream_channel[i] = NULL;
pc->o_stream_buffer[i] = 0;
}
pc->o_stream_buffer_counter = 0;
pc->sock = NULL;
unlock_peer_connection(pc);
}
static void
lock_peer_connection(struct peer_connection *pc)
{
#ifdef _WIN32
EnterCriticalSection(&(pc->mutex));
#else
pthread_mutex_lock(&pc->mutex);
#endif
}
static void
unlock_peer_connection(struct peer_connection *pc)
{
#ifdef _WIN32
LeaveCriticalSection(&(pc->mutex));
#else
pthread_mutex_unlock(&pc->mutex);
#endif
}
static struct channel *
find_channel_by_i_stream(struct peer_connection *pc, uint16_t i_stream)
{
if (i_stream < NUMBER_OF_STREAMS) {
return (pc->i_stream_channel[i_stream]);
} else {
return (NULL);
}
}
static struct channel *
find_channel_by_o_stream(struct peer_connection *pc, uint16_t o_stream)
{
if (o_stream < NUMBER_OF_STREAMS) {
return (pc->o_stream_channel[o_stream]);
} else {
return (NULL);
}
}
static struct channel *
find_free_channel(struct peer_connection *pc)
{
uint32_t i;
for (i = 0; i < NUMBER_OF_CHANNELS; i++) {
if (pc->channels[i].state == DATA_CHANNEL_CLOSED) {
break;
}
}
if (i == NUMBER_OF_CHANNELS) {
return (NULL);
} else {
return (&(pc->channels[i]));
}
}
static uint16_t
find_free_o_stream(struct peer_connection *pc)
{
struct sctp_status status;
uint32_t i, limit;
socklen_t len;
len = (socklen_t)sizeof(struct sctp_status);
if (usrsctp_getsockopt(pc->sock, IPPROTO_SCTP, SCTP_STATUS, &status, &len) < 0) {
perror("getsockopt");
return (0);
}
if (status.sstat_outstrms < NUMBER_OF_STREAMS) {
limit = status.sstat_outstrms;
} else {
limit = NUMBER_OF_STREAMS;
}
/* stream id 0 is reserved */
for (i = 1; i < limit; i++) {
if (pc->o_stream_channel[i] == NULL) {
break;
}
}
if (i == limit) {
return (0);
} else {
return ((uint16_t)i);
}
}
static void
request_more_o_streams(struct peer_connection *pc)
{
struct sctp_status status;
struct sctp_add_streams sas;
uint32_t i, o_streams_needed;
socklen_t len;
o_streams_needed = 0;
for (i = 0; i < NUMBER_OF_CHANNELS; i++) {
if ((pc->channels[i].state == DATA_CHANNEL_CONNECTING) &&
(pc->channels[i].o_stream == 0)) {
o_streams_needed++;
}
}
len = (socklen_t)sizeof(struct sctp_status);
if (usrsctp_getsockopt(pc->sock, IPPROTO_SCTP, SCTP_STATUS, &status, &len) < 0) {
perror("getsockopt");
return;
}
if (status.sstat_outstrms + o_streams_needed > NUMBER_OF_STREAMS) {
o_streams_needed = NUMBER_OF_STREAMS - status.sstat_outstrms;
}
if (o_streams_needed == 0) {
return;
}
memset(&sas, 0, sizeof(struct sctp_add_streams));
sas.sas_instrms = 0;
sas.sas_outstrms = (uint16_t)o_streams_needed; /* XXX eror handling */
if (usrsctp_setsockopt(pc->sock, IPPROTO_SCTP, SCTP_ADD_STREAMS, &sas, (socklen_t)sizeof(struct sctp_add_streams)) < 0) {
perror("setsockopt");
}
return;
}
static int
send_open_request_message(struct socket *sock, uint16_t o_stream, uint8_t unordered, uint16_t pr_policy, uint32_t pr_value)
{
/* XXX: This should be encoded in a better way */
struct rtcweb_datachannel_open_request req;
struct sctp_sndinfo sndinfo;
memset(&req, 0, sizeof(struct rtcweb_datachannel_open_request));
req.msg_type = DATA_CHANNEL_OPEN_REQUEST;
switch (pr_policy) {
case SCTP_PR_SCTP_NONE:
/* XXX: What about DATA_CHANNEL_RELIABLE_STREAM */
req.channel_type = DATA_CHANNEL_RELIABLE;
break;
case SCTP_PR_SCTP_TTL:
/* XXX: What about DATA_CHANNEL_UNRELIABLE */
req.channel_type = DATA_CHANNEL_PARTIAL_RELIABLE_TIMED;
break;
case SCTP_PR_SCTP_RTX:
req.channel_type = DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT;
break;
default:
return (0);
}
req.flags = htons(0);
if (unordered) {
req.flags |= htons(DATA_CHANNEL_FLAG_OUT_OF_ORDER_ALLOWED);
}
req.reliability_params = htons((uint16_t)pr_value); /* XXX Why 16-bit */
req.priority = htons(0); /* XXX: add support */
memset(&sndinfo, 0, sizeof(struct sctp_sndinfo));
sndinfo.snd_sid = o_stream;
sndinfo.snd_flags = SCTP_EOR;
sndinfo.snd_ppid = htonl(DATA_CHANNEL_PPID_CONTROL);
if (usrsctp_sendv(sock,
&req, sizeof(struct rtcweb_datachannel_open_request),
NULL, 0,
&sndinfo, (socklen_t)sizeof(struct sctp_sndinfo),
SCTP_SENDV_SNDINFO, 0) < 0) {
perror("sctp_sendv");
return (0);
} else {
return (1);
}
}
static int
send_open_response_message(struct socket *sock, uint16_t o_stream, uint16_t i_stream)
{
/* XXX: This should be encoded in a better way */
struct rtcweb_datachannel_open_response rsp;
struct sctp_sndinfo sndinfo;
memset(&rsp, 0, sizeof(struct rtcweb_datachannel_open_response));
rsp.msg_type = DATA_CHANNEL_OPEN_RESPONSE;
rsp.error = 0;
rsp.flags = htons(0);
rsp.reverse_stream = htons(i_stream);
memset(&sndinfo, 0, sizeof(struct sctp_sndinfo));
sndinfo.snd_sid = o_stream;
sndinfo.snd_flags = SCTP_EOR;
sndinfo.snd_ppid = htonl(DATA_CHANNEL_PPID_CONTROL);
if (usrsctp_sendv(sock,
&rsp, sizeof(struct rtcweb_datachannel_open_response),
NULL, 0,
&sndinfo, (socklen_t)sizeof(struct sctp_sndinfo),
SCTP_SENDV_SNDINFO, 0) < 0) {
perror("sctp_sendv");
return (0);
} else {
return (1);
}
}
static int
send_open_ack_message(struct socket *sock, uint16_t o_stream)
{
/* XXX: This should be encoded in a better way */
struct rtcweb_datachannel_ack ack;
struct sctp_sndinfo sndinfo;
memset(&ack, 0, sizeof(struct rtcweb_datachannel_ack));
ack.msg_type = DATA_CHANNEL_ACK;
memset(&sndinfo, 0, sizeof(struct sctp_sndinfo));
sndinfo.snd_sid = o_stream;
sndinfo.snd_flags = SCTP_EOR;
sndinfo.snd_ppid = htonl(DATA_CHANNEL_PPID_CONTROL);
if (usrsctp_sendv(sock,
&ack, sizeof(struct rtcweb_datachannel_ack),
NULL, 0,
&sndinfo, (socklen_t)sizeof(struct sctp_sndinfo),
SCTP_SENDV_SNDINFO, 0) < 0) {
perror("sctp_sendv");
return (0);
} else {
return (1);
}
}
static void
send_deferred_messages(struct peer_connection *pc)
{
uint32_t i;
struct channel *channel;
for (i = 0; i < NUMBER_OF_CHANNELS; i++) {
channel = &(pc->channels[i]);
if (channel->flags & DATA_CHANNEL_FLAGS_SEND_REQ) {
if (send_open_request_message(pc->sock, channel->o_stream, channel->unordered, channel->pr_policy, channel->pr_value)) {
channel->flags &= ~DATA_CHANNEL_FLAGS_SEND_REQ;
} else {
if (errno != EAGAIN) {
/* XXX: error handling */
}
}
}
if (channel->flags & DATA_CHANNEL_FLAGS_SEND_RSP) {
if (send_open_response_message(pc->sock, channel->o_stream, channel->i_stream)) {
channel->flags &= ~DATA_CHANNEL_FLAGS_SEND_RSP;
} else {
if (errno != EAGAIN) {
/* XXX: error handling */
}
}
}
if (channel->flags & DATA_CHANNEL_FLAGS_SEND_ACK) {
if (send_open_ack_message(pc->sock, channel->o_stream)) {
channel->flags &= ~DATA_CHANNEL_FLAGS_SEND_ACK;
} else {
if (errno != EAGAIN) {
/* XXX: error handling */
}
}
}
}
return;
}
static struct channel *
open_channel(struct peer_connection *pc, uint8_t unordered, uint16_t pr_policy, uint32_t pr_value)
{
struct channel *channel;
uint16_t o_stream;
if ((pr_policy != SCTP_PR_SCTP_NONE) &&
(pr_policy != SCTP_PR_SCTP_TTL) &&
(pr_policy != SCTP_PR_SCTP_RTX)) {
return (NULL);
}
if ((unordered != 0) && (unordered != 1)) {
return (NULL);
}
if ((pr_policy == SCTP_PR_SCTP_NONE) && (pr_value != 0)) {
return (NULL);
}
if ((channel = find_free_channel(pc)) == NULL) {
return (NULL);
}
o_stream = find_free_o_stream(pc);
channel->state = DATA_CHANNEL_CONNECTING;
channel->unordered = unordered;
channel->pr_policy = pr_policy;
channel->pr_value = pr_value;
channel->o_stream = o_stream;
channel->flags = 0;
if (o_stream == 0) {
request_more_o_streams(pc);
} else {
if (send_open_request_message(pc->sock, o_stream, unordered, pr_policy, pr_value)) {
pc->o_stream_channel[o_stream] = channel;
} else {
if (errno == EAGAIN) {
pc->o_stream_channel[o_stream] = channel;
channel->flags |= DATA_CHANNEL_FLAGS_SEND_REQ;
} else {
channel->state = DATA_CHANNEL_CLOSED;
channel->unordered = 0;
channel->pr_policy = 0;
channel->pr_value = 0;
channel->o_stream = 0;
channel->flags = 0;
channel = NULL;
}
}
}
return (channel);
}
static int
send_user_message(struct peer_connection *pc, struct channel *channel, char *message, size_t length)
{
struct sctp_sendv_spa spa;
if (channel == NULL) {
return (0);
}
if ((channel->state != DATA_CHANNEL_OPEN) &&
(channel->state != DATA_CHANNEL_CONNECTING)) {
/* XXX: What to do in other states */
return (0);
}
memset(&spa, 0, sizeof(struct sctp_sendv_spa));
spa.sendv_sndinfo.snd_sid = channel->o_stream;
if ((channel->state == DATA_CHANNEL_OPEN) &&
(channel->unordered)) {
spa.sendv_sndinfo.snd_flags = SCTP_EOR | SCTP_UNORDERED;
} else {
spa.sendv_sndinfo.snd_flags = SCTP_EOR;
}
spa.sendv_sndinfo.snd_ppid = htonl(DATA_CHANNEL_PPID_DOMSTRING);
spa.sendv_flags = SCTP_SEND_SNDINFO_VALID;
if ((channel->pr_policy == SCTP_PR_SCTP_TTL) ||
(channel->pr_policy == SCTP_PR_SCTP_RTX)) {
spa.sendv_prinfo.pr_policy = channel->pr_policy;
spa.sendv_prinfo.pr_value = channel->pr_value;
spa.sendv_flags |= SCTP_SEND_PRINFO_VALID;
}
if (usrsctp_sendv(pc->sock,
message, length,
NULL, 0,
&spa, (socklen_t)sizeof(struct sctp_sendv_spa),
SCTP_SENDV_SPA, 0) < 0) {
perror("sctp_sendv");
return (0);
} else {
return (1);
}
}
static void
reset_outgoing_stream(struct peer_connection *pc, uint16_t o_stream)
{
uint32_t i;
for (i = 0; i < pc->o_stream_buffer_counter; i++) {
if (pc->o_stream_buffer[i] == o_stream) {
return;
}
}
pc->o_stream_buffer[pc->o_stream_buffer_counter++] = o_stream;
return;
}
static void
send_outgoing_stream_reset(struct peer_connection *pc)
{
struct sctp_reset_streams *srs;
uint32_t i;
size_t len;
if (pc->o_stream_buffer_counter == 0) {
return;
}
len = sizeof(sctp_assoc_t) + (2 + pc->o_stream_buffer_counter) * sizeof(uint16_t);
srs = (struct sctp_reset_streams *)malloc(len);
if (srs == NULL) {
return;
}
memset(srs, 0, len);
srs->srs_flags = SCTP_STREAM_RESET_OUTGOING;
srs->srs_number_streams = pc->o_stream_buffer_counter;
for (i = 0; i < pc->o_stream_buffer_counter; i++) {
srs->srs_stream_list[i] = pc->o_stream_buffer[i];
}
if (usrsctp_setsockopt(pc->sock, IPPROTO_SCTP, SCTP_RESET_STREAMS, srs, (socklen_t)len) < 0) {
perror("setsockopt");
} else {
for (i = 0; i < pc->o_stream_buffer_counter; i++) {
srs->srs_stream_list[i] = 0;
}
pc->o_stream_buffer_counter = 0;
}
free(srs);
return;
}
static void
close_channel(struct peer_connection *pc, struct channel *channel)
{
if (channel == NULL) {
return;
}
if (channel->state != DATA_CHANNEL_OPEN) {
return;
}
reset_outgoing_stream(pc, channel->o_stream);
send_outgoing_stream_reset(pc);
channel->state = DATA_CHANNEL_CLOSING;
return;
}
static void
handle_open_request_message(struct peer_connection *pc,
struct rtcweb_datachannel_open_request *req,
size_t length,
uint16_t i_stream)
{
struct channel *channel;
uint32_t pr_value;
uint16_t pr_policy;
uint16_t o_stream;
uint8_t unordered;
if ((channel = find_channel_by_i_stream(pc, i_stream))) {
printf("handle_open_request_message: channel %u is in state %u instead of CLOSED.\n",
channel->id, channel->state);
/* XXX: some error handling */
return;
}
if ((channel = find_free_channel(pc)) == NULL) {
/* XXX: some error handling */
return;
}
switch (req->channel_type) {
case DATA_CHANNEL_RELIABLE:
pr_policy = SCTP_PR_SCTP_NONE;
break;
/* XXX Doesn't make sense */
case DATA_CHANNEL_RELIABLE_STREAM:
pr_policy = SCTP_PR_SCTP_NONE;
break;
/* XXX Doesn't make sense */
case DATA_CHANNEL_UNRELIABLE:
pr_policy = SCTP_PR_SCTP_TTL;
break;
case DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT:
pr_policy = SCTP_PR_SCTP_RTX;
break;
case DATA_CHANNEL_PARTIAL_RELIABLE_TIMED:
pr_policy = SCTP_PR_SCTP_TTL;
break;
default:
pr_policy = SCTP_PR_SCTP_NONE;
/* XXX error handling */
break;
}
pr_value = ntohs(req->reliability_params);
if (ntohs(req->flags) & DATA_CHANNEL_FLAG_OUT_OF_ORDER_ALLOWED) {
unordered = 1;
} else {
unordered = 0;
}
o_stream = find_free_o_stream(pc);
channel->state = DATA_CHANNEL_CONNECTING;
channel->unordered = unordered;
channel->pr_policy = pr_policy;
channel->pr_value = pr_value;
channel->i_stream = i_stream;
channel->o_stream = o_stream;
channel->flags = 0;
pc->i_stream_channel[i_stream] = channel;
if (o_stream == 0) {
request_more_o_streams(pc);
} else {
if (send_open_response_message(pc->sock, o_stream, i_stream)) {
pc->o_stream_channel[o_stream] = channel;
} else {
if (errno == EAGAIN) {
channel->flags |= DATA_CHANNEL_FLAGS_SEND_RSP;
pc->o_stream_channel[o_stream] = channel;
} else {
/* XXX: Signal error to the other end. */
pc->i_stream_channel[i_stream] = NULL;
channel->state = DATA_CHANNEL_CLOSED;
channel->unordered = 0;
channel->pr_policy = 0;
channel->pr_value = 0;
channel->i_stream = 0;
channel->o_stream = 0;
channel->flags = 0;
}
}
}
}
static void
handle_open_response_message(struct peer_connection *pc,
struct rtcweb_datachannel_open_response *rsp,
size_t length, uint16_t i_stream)
{
uint16_t o_stream;
struct channel *channel;
o_stream = ntohs(rsp->reverse_stream);
channel = find_channel_by_o_stream(pc, o_stream);
if (channel == NULL) {
/* XXX: improve error handling */
printf("handle_open_response_message: Can't find channel for outgoing steam %d.\n", o_stream);
return;
}
if (channel->state != DATA_CHANNEL_CONNECTING) {
/* XXX: improve error handling */
printf("handle_open_response_message: Channel with id %u for outgoing steam %u is in state %u.\n", channel->id, o_stream, channel->state);
return;
}
if (find_channel_by_i_stream(pc, i_stream)) {
/* XXX: improve error handling */
printf("handle_open_response_message: Channel collision for channel with id %u and streams (in/out) = (%u/%u).\n", channel->id, i_stream, o_stream);
return;
}
channel->i_stream = i_stream;
channel->state = DATA_CHANNEL_OPEN;
pc->i_stream_channel[i_stream] = channel;
if (send_open_ack_message(pc->sock, o_stream)) {
channel->flags = 0;
} else {
channel->flags |= DATA_CHANNEL_FLAGS_SEND_ACK;
}
return;
}
static void
handle_open_ack_message(struct peer_connection *pc,
struct rtcweb_datachannel_ack *ack,
size_t length, uint16_t i_stream)
{
struct channel *channel;
channel = find_channel_by_i_stream(pc, i_stream);
if (channel == NULL) {
/* XXX: some error handling */
return;
}
if (channel->state == DATA_CHANNEL_OPEN) {
return;
}
if (channel->state != DATA_CHANNEL_CONNECTING) {
/* XXX: error handling */
return;
}
channel->state = DATA_CHANNEL_OPEN;
return;
}
static void
handle_unknown_message(char *msg, size_t length, uint16_t i_stream)
{
/* XXX: Send an error message */
return;
}
static void
handle_data_message(struct peer_connection *pc,
char *buffer, size_t length, uint16_t i_stream)
{
struct channel *channel;
channel = find_channel_by_i_stream(pc, i_stream);
if (channel == NULL) {
/* XXX: Some error handling */
return;
}
if (channel->state == DATA_CHANNEL_CONNECTING) {
/* Implicit ACK */
channel->state = DATA_CHANNEL_OPEN;
}
if (channel->state != DATA_CHANNEL_OPEN) {
/* XXX: What about other states? */
/* XXX: Some error handling */
return;
} else {
/* Assuming DATA_CHANNEL_PPID_DOMSTRING */
/* XXX: Protect for non 0 terminated buffer */
printf("Message received of length %zu on channel with id %u: %.*s\n",
length, channel->id, (int)length, buffer);
}
return;
}
static void
handle_message(struct peer_connection *pc, char *buffer, size_t length, uint32_t ppid, uint16_t i_stream)
{
struct rtcweb_datachannel_open_request *req;
struct rtcweb_datachannel_open_response *rsp;
struct rtcweb_datachannel_ack *ack, *msg;
switch (ppid) {
case DATA_CHANNEL_PPID_CONTROL:
if (length < sizeof(struct rtcweb_datachannel_ack)) {
return;
}
msg = (struct rtcweb_datachannel_ack *)buffer;
switch (msg->msg_type) {
case DATA_CHANNEL_OPEN_REQUEST:
if (length < sizeof(struct rtcweb_datachannel_open_request)) {
/* XXX: error handling? */
return;
}
req = (struct rtcweb_datachannel_open_request *)buffer;
handle_open_request_message(pc, req, length, i_stream);
break;
case DATA_CHANNEL_OPEN_RESPONSE:
if (length < sizeof(struct rtcweb_datachannel_open_response)) {
/* XXX: error handling? */
return;
}
rsp = (struct rtcweb_datachannel_open_response *)buffer;
handle_open_response_message(pc, rsp, length, i_stream);
break;
case DATA_CHANNEL_ACK:
if (length < sizeof(struct rtcweb_datachannel_ack)) {
/* XXX: error handling? */
return;
}
ack = (struct rtcweb_datachannel_ack *)buffer;
handle_open_ack_message(pc, ack, length, i_stream);
break;
default:
handle_unknown_message(buffer, length, i_stream);
break;
}
break;
case DATA_CHANNEL_PPID_DOMSTRING:
case DATA_CHANNEL_PPID_BINARY:
handle_data_message(pc, buffer, length, i_stream);
break;
default:
printf("Message of length %zu, PPID %u on stream %u received.\n",
length, ppid, i_stream);
break;
}
}
static void
handle_association_change_event(struct sctp_assoc_change *sac)
{
unsigned int i, n;
printf("Association change ");
switch (sac->sac_state) {
case SCTP_COMM_UP:
printf("SCTP_COMM_UP");
break;
case SCTP_COMM_LOST:
printf("SCTP_COMM_LOST");
break;
case SCTP_RESTART:
printf("SCTP_RESTART");
break;
case SCTP_SHUTDOWN_COMP:
printf("SCTP_SHUTDOWN_COMP");
break;
case SCTP_CANT_STR_ASSOC:
printf("SCTP_CANT_STR_ASSOC");
break;
default:
printf("UNKNOWN");
break;
}
printf(", streams (in/out) = (%u/%u)",
sac->sac_inbound_streams, sac->sac_outbound_streams);
n = sac->sac_length - sizeof(struct sctp_assoc_change);
if (((sac->sac_state == SCTP_COMM_UP) ||
(sac->sac_state == SCTP_RESTART)) && (n > 0)) {
printf(", supports");
for (i = 0; i < n; i++) {
switch (sac->sac_info[i]) {
case SCTP_ASSOC_SUPPORTS_PR:
printf(" PR");
break;
case SCTP_ASSOC_SUPPORTS_AUTH:
printf(" AUTH");
break;
case SCTP_ASSOC_SUPPORTS_ASCONF:
printf(" ASCONF");
break;
case SCTP_ASSOC_SUPPORTS_MULTIBUF:
printf(" MULTIBUF");
break;
case SCTP_ASSOC_SUPPORTS_RE_CONFIG:
printf(" RE-CONFIG");
break;
case SCTP_ASSOC_SUPPORTS_INTERLEAVING:
printf(" INTERLEAVING");
break;
default:
printf(" UNKNOWN(0x%02x)", sac->sac_info[i]);
break;
}
}
} else if (((sac->sac_state == SCTP_COMM_LOST) ||
(sac->sac_state == SCTP_CANT_STR_ASSOC)) && (n > 0)) {
printf(", ABORT =");
for (i = 0; i < n; i++) {
printf(" 0x%02x", sac->sac_info[i]);
}
}
printf(".\n");
if ((sac->sac_state == SCTP_CANT_STR_ASSOC) ||
(sac->sac_state == SCTP_SHUTDOWN_COMP) ||
(sac->sac_state == SCTP_COMM_LOST)) {
exit(0);
}
return;
}
static void
handle_peer_address_change_event(struct sctp_paddr_change *spc)
{
char addr_buf[INET6_ADDRSTRLEN];
const char *addr;
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6;
switch (spc->spc_aaddr.ss_family) {
case AF_INET:
sin = (struct sockaddr_in *)&spc->spc_aaddr;
addr = inet_ntop(AF_INET, &sin->sin_addr, addr_buf, INET_ADDRSTRLEN);
break;
case AF_INET6:
sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
addr = inet_ntop(AF_INET6, &sin6->sin6_addr, addr_buf, INET6_ADDRSTRLEN);
break;
default:
#ifdef _WIN32
if (_snprintf(addr_buf, INET6_ADDRSTRLEN, "Unknown family %d", spc->spc_aaddr.ss_family) < 0) {
#else
if (snprintf(addr_buf, INET6_ADDRSTRLEN, "Unknown family %d", spc->spc_aaddr.ss_family) < 0) {
#endif
addr_buf[0] = '\0';
}
addr = addr_buf;
break;
}
printf("Peer address %s is now ", addr);
switch (spc->spc_state) {
case SCTP_ADDR_AVAILABLE:
printf("SCTP_ADDR_AVAILABLE");
break;
case SCTP_ADDR_UNREACHABLE:
printf("SCTP_ADDR_UNREACHABLE");
break;
case SCTP_ADDR_REMOVED:
printf("SCTP_ADDR_REMOVED");
break;
case SCTP_ADDR_ADDED:
printf("SCTP_ADDR_ADDED");
break;
case SCTP_ADDR_MADE_PRIM:
printf("SCTP_ADDR_MADE_PRIM");
break;
case SCTP_ADDR_CONFIRMED:
printf("SCTP_ADDR_CONFIRMED");
break;
default:
printf("UNKNOWN");
break;
}
printf(" (error = 0x%08x).\n", spc->spc_error);
return;
}
static void
handle_adaptation_indication(struct sctp_adaptation_event *sai)
{
printf("Adaptation indication: %x.\n", sai-> sai_adaptation_ind);
return;
}
static void
handle_shutdown_event(struct sctp_shutdown_event *sse)
{
printf("Shutdown event.\n");
/* XXX: notify all channels. */
return;
}
static void
handle_stream_reset_event(struct peer_connection *pc, struct sctp_stream_reset_event *strrst)
{
uint32_t n, i;
struct channel *channel;
n = (strrst->strreset_length - sizeof(struct sctp_stream_reset_event)) / sizeof(uint16_t);
printf("Stream reset event: flags = %x, ", strrst->strreset_flags);
if (strrst->strreset_flags & SCTP_STREAM_RESET_INCOMING_SSN) {
if (strrst->strreset_flags & SCTP_STREAM_RESET_OUTGOING_SSN) {
printf("incoming/");
}
printf("incoming ");
}
if (strrst->strreset_flags & SCTP_STREAM_RESET_OUTGOING_SSN) {
printf("outgoing ");
}
printf("stream ids = ");
for (i = 0; i < n; i++) {
if (i > 0) {
printf(", ");
}
printf("%d", strrst->strreset_stream_list[i]);
}
printf(".\n");
if (!(strrst->strreset_flags & SCTP_STREAM_RESET_DENIED) &&
!(strrst->strreset_flags & SCTP_STREAM_RESET_FAILED)) {
for (i = 0; i < n; i++) {
if (strrst->strreset_flags & SCTP_STREAM_RESET_INCOMING_SSN) {
channel = find_channel_by_i_stream(pc, strrst->strreset_stream_list[i]);
if (channel != NULL) {
pc->i_stream_channel[channel->i_stream] = NULL;
channel->i_stream = 0;
if (channel->o_stream == 0) {
channel->pr_policy = SCTP_PR_SCTP_NONE;
channel->pr_value = 0;
channel->unordered = 0;
channel->flags = 0;
channel->state = DATA_CHANNEL_CLOSED;
} else {
if (channel->state == DATA_CHANNEL_OPEN) {
reset_outgoing_stream(pc, channel->o_stream);
channel->state = DATA_CHANNEL_CLOSING;
} else {
/* XXX: What to do? */
}
}
}
}
if (strrst->strreset_flags & SCTP_STREAM_RESET_OUTGOING_SSN) {
channel = find_channel_by_o_stream(pc, strrst->strreset_stream_list[i]);
if (channel != NULL) {
pc->o_stream_channel[channel->o_stream] = NULL;
channel->o_stream = 0;
if (channel->i_stream == 0) {
channel->pr_policy = SCTP_PR_SCTP_NONE;
channel->pr_value = 0;
channel->unordered = 0;
channel->flags = 0;
channel->state = DATA_CHANNEL_CLOSED;
}
}
}
}
}
return;
}
static void
handle_stream_change_event(struct peer_connection *pc, struct sctp_stream_change_event *strchg)
{
uint16_t o_stream;
uint32_t i;
struct channel *channel;
printf("Stream change event: streams (in/out) = (%u/%u), flags = %x.\n",
strchg->strchange_instrms, strchg->strchange_outstrms, strchg->strchange_flags);
for (i = 0; i < NUMBER_OF_CHANNELS; i++) {
channel = &(pc->channels[i]);
if ((channel->state == DATA_CHANNEL_CONNECTING) &&
(channel->o_stream == 0)) {
if ((strchg->strchange_flags & SCTP_STREAM_CHANGE_DENIED) ||
(strchg->strchange_flags & SCTP_STREAM_CHANGE_FAILED)) {
/* XXX: Signal to the other end. */
if (channel->i_stream != 0) {
pc->i_stream_channel[channel->i_stream] = NULL;
}
channel->unordered = 0;
channel->pr_policy = SCTP_PR_SCTP_NONE;
channel->pr_value = 0;
channel->i_stream = 0;
channel->o_stream = 0;
channel->flags = 0;
channel->state = DATA_CHANNEL_CLOSED;
} else {
o_stream = find_free_o_stream(pc);
if (o_stream != 0) {
channel->o_stream = o_stream;
pc->o_stream_channel[o_stream] = channel;
if (channel->i_stream == 0) {
channel->flags |= DATA_CHANNEL_FLAGS_SEND_REQ;
} else {
channel->flags |= DATA_CHANNEL_FLAGS_SEND_RSP;
}
} else {
/* We will not find more ... */
break;
}
}
}
}
return;
}
static void
handle_remote_error_event(struct sctp_remote_error *sre)
{
size_t i, n;
n = sre->sre_length - sizeof(struct sctp_remote_error);
printf("Remote Error (error = 0x%04x): ", sre->sre_error);
for (i = 0; i < n; i++) {
printf(" 0x%02x", sre-> sre_data[i]);
}
printf(".\n");
return;
}
static void
handle_send_failed_event(struct sctp_send_failed_event *ssfe)
{
size_t i, n;
if (ssfe->ssfe_flags & SCTP_DATA_UNSENT) {
printf("Unsent ");
}
if (ssfe->ssfe_flags & SCTP_DATA_SENT) {
printf("Sent ");
}
if (ssfe->ssfe_flags & ~(SCTP_DATA_SENT | SCTP_DATA_UNSENT)) {
printf("(flags = %x) ", ssfe->ssfe_flags);
}
printf("message with PPID = %u, SID = %u, flags: 0x%04x due to error = 0x%08x",
ntohl(ssfe->ssfe_info.snd_ppid), ssfe->ssfe_info.snd_sid,
ssfe->ssfe_info.snd_flags, ssfe->ssfe_error);
n = ssfe->ssfe_length - sizeof(struct sctp_send_failed_event);
for (i = 0; i < n; i++) {
printf(" 0x%02x", ssfe->ssfe_data[i]);
}
printf(".\n");
return;
}
static void
handle_notification_rtcweb(struct peer_connection *pc, union sctp_notification *notif, size_t n)
{
if (notif->sn_header.sn_length != (uint32_t)n) {
return;
}
switch (notif->sn_header.sn_type) {
case SCTP_ASSOC_CHANGE:
handle_association_change_event(&(notif->sn_assoc_change));
break;
case SCTP_PEER_ADDR_CHANGE:
handle_peer_address_change_event(&(notif->sn_paddr_change));
break;
case SCTP_REMOTE_ERROR:
handle_remote_error_event(&(notif->sn_remote_error));
break;
case SCTP_SHUTDOWN_EVENT:
handle_shutdown_event(&(notif->sn_shutdown_event));
break;
case SCTP_ADAPTATION_INDICATION:
handle_adaptation_indication(&(notif->sn_adaptation_event));
break;
case SCTP_PARTIAL_DELIVERY_EVENT:
break;
case SCTP_AUTHENTICATION_EVENT:
break;
case SCTP_SENDER_DRY_EVENT:
break;
case SCTP_NOTIFICATIONS_STOPPED_EVENT:
break;
case SCTP_SEND_FAILED_EVENT:
handle_send_failed_event(&(notif->sn_send_failed_event));
break;
case SCTP_STREAM_RESET_EVENT:
handle_stream_reset_event(pc, &(notif->sn_strreset_event));
send_deferred_messages(pc);
send_outgoing_stream_reset(pc);
request_more_o_streams(pc);
break;
case SCTP_ASSOC_RESET_EVENT:
break;
case SCTP_STREAM_CHANGE_EVENT:
handle_stream_change_event(pc, &(notif->sn_strchange_event));
send_deferred_messages(pc);
send_outgoing_stream_reset(pc);
request_more_o_streams(pc);
break;
default:
break;
}
}
static void
print_status(struct peer_connection *pc)
{
struct sctp_status status;
socklen_t len;
uint32_t i;
struct channel *channel;
len = (socklen_t)sizeof(struct sctp_status);
if (usrsctp_getsockopt(pc->sock, IPPROTO_SCTP, SCTP_STATUS, &status, &len) < 0) {
perror("getsockopt");
return;
}
printf("Association state: ");
switch (status.sstat_state) {
case SCTP_CLOSED:
printf("CLOSED\n");
break;
case SCTP_BOUND:
printf("BOUND\n");
break;
case SCTP_LISTEN:
printf("LISTEN\n");
break;
case SCTP_COOKIE_WAIT:
printf("COOKIE_WAIT\n");
break;
case SCTP_COOKIE_ECHOED:
printf("COOKIE_ECHOED\n");
break;
case SCTP_ESTABLISHED:
printf("ESTABLISHED\n");
break;
case SCTP_SHUTDOWN_PENDING:
printf("SHUTDOWN_PENDING\n");
break;
case SCTP_SHUTDOWN_SENT:
printf("SHUTDOWN_SENT\n");
break;
case SCTP_SHUTDOWN_RECEIVED:
printf("SHUTDOWN_RECEIVED\n");
break;
case SCTP_SHUTDOWN_ACK_SENT:
printf("SHUTDOWN_ACK_SENT\n");
break;
default:
printf("UNKNOWN\n");
break;
}
printf("Number of streams (i/o) = (%u/%u)\n",
status.sstat_instrms, status.sstat_outstrms);
for (i = 0; i < NUMBER_OF_CHANNELS; i++) {
channel = &(pc->channels[i]);
if (channel->state == DATA_CHANNEL_CLOSED) {
continue;
}
printf("Channel with id = %u: state ", channel->id);
switch (channel->state) {
case DATA_CHANNEL_CLOSED:
printf("CLOSED");
break;
case DATA_CHANNEL_CONNECTING:
printf("CONNECTING");
break;
case DATA_CHANNEL_OPEN:
printf("OPEN");
break;
case DATA_CHANNEL_CLOSING:
printf("CLOSING");
break;
default:
printf("UNKNOWN(%d)", channel->state);
break;
}
printf(", flags = 0x%08x, stream id (in/out): (%u/%u), ",
channel->flags,
channel->i_stream,
channel->o_stream);
if (channel->unordered) {
printf("unordered, ");
} else {
printf("ordered, ");
}
switch (channel->pr_policy) {
case SCTP_PR_SCTP_NONE:
printf("reliable.\n");
break;
case SCTP_PR_SCTP_TTL:
printf("unreliable (timeout %ums).\n", channel->pr_value);
break;
case SCTP_PR_SCTP_RTX:
printf("unreliable (max. %u rtx).\n", channel->pr_value);
break;
default:
printf("unknown policy %u.\n", channel->pr_policy);
break;
}
}
}
static int
receive_cb(struct socket *sock, union sctp_sockstore addr, void *data,
size_t datalen, struct sctp_rcvinfo rcv, int flags, void *ulp_info)
{
struct peer_connection *pc;
pc = (struct peer_connection *)ulp_info;
if (data) {
lock_peer_connection(pc);
if (flags & MSG_NOTIFICATION) {
handle_notification_rtcweb(pc, (union sctp_notification *)data, datalen);
} else {
handle_message(pc, data, datalen, ntohl(rcv.rcv_ppid), rcv.rcv_sid);
}
unlock_peer_connection(pc);
}
return (1);
}
int
main(int argc, char *argv[])
{
struct socket *sock;
struct sockaddr_in addr;
socklen_t addr_len;
char line[LINE_LENGTH + 1];
unsigned int unordered, policy, value, id, seconds;
unsigned int i;
struct channel *channel;
const int on = 1;
struct sctp_assoc_value av;
struct sctp_event event;
struct sctp_udpencaps encaps;
struct sctp_initmsg initmsg;
uint16_t event_types[] = {SCTP_ASSOC_CHANGE,
SCTP_PEER_ADDR_CHANGE,
SCTP_REMOTE_ERROR,
SCTP_SHUTDOWN_EVENT,
SCTP_ADAPTATION_INDICATION,
SCTP_SEND_FAILED_EVENT,
SCTP_STREAM_RESET_EVENT,
SCTP_STREAM_CHANGE_EVENT};
char addrbuf[INET_ADDRSTRLEN];
if (argc > 1) {
usrsctp_init(atoi(argv[1]), NULL, debug_printf_stack);
} else {
usrsctp_init(9899, NULL, debug_printf_stack);
}
#ifdef SCTP_DEBUG
usrsctp_sysctl_set_sctp_debug_on(SCTP_DEBUG_NONE);
#endif
usrsctp_sysctl_set_sctp_blackhole(2);
usrsctp_sysctl_set_sctp_no_csum_on_loopback(0);
if ((sock = usrsctp_socket(AF_INET, SOCK_STREAM, IPPROTO_SCTP, receive_cb, NULL, 0, &peer_connection)) == NULL) {
perror("socket");
}
init_peer_connection(&peer_connection);
if (argc > 2) {
memset(&encaps, 0, sizeof(struct sctp_udpencaps));
encaps.sue_address.ss_family = AF_INET6;
encaps.sue_port = htons(atoi(argv[2]));
if (usrsctp_setsockopt(sock, IPPROTO_SCTP, SCTP_REMOTE_UDP_ENCAPS_PORT, (const void*)&encaps, (socklen_t)sizeof(struct sctp_udpencaps)) < 0) {
perror("setsockopt");
}
}
if (usrsctp_setsockopt(sock, IPPROTO_SCTP, SCTP_RECVRCVINFO, &on, sizeof(int)) < 0) {
perror("setsockopt SCTP_RECVRCVINFO");
}
if (usrsctp_setsockopt(sock, IPPROTO_SCTP, SCTP_EXPLICIT_EOR, &on, sizeof(int)) < 0) {
perror("setsockopt SCTP_EXPLICIT_EOR");
}
/* Allow resetting streams. */
av.assoc_id = SCTP_ALL_ASSOC;
av.assoc_value = SCTP_ENABLE_RESET_STREAM_REQ | SCTP_ENABLE_CHANGE_ASSOC_REQ;
if (usrsctp_setsockopt(sock, IPPROTO_SCTP, SCTP_ENABLE_STREAM_RESET, &av, sizeof(struct sctp_assoc_value)) < 0) {
perror("setsockopt SCTP_ENABLE_STREAM_RESET");
}
/* Enable the events of interest. */
memset(&event, 0, sizeof(event));
event.se_assoc_id = SCTP_ALL_ASSOC;
event.se_on = 1;
for (i = 0; i < sizeof(event_types)/sizeof(uint16_t); i++) {
event.se_type = event_types[i];
if (usrsctp_setsockopt(sock, IPPROTO_SCTP, SCTP_EVENT, &event, sizeof(event)) < 0) {
perror("setsockopt SCTP_EVENT");
}
}
memset(&initmsg, 0, sizeof(struct sctp_initmsg));
initmsg.sinit_num_ostreams = 5;
initmsg.sinit_max_instreams = 65535;
if (usrsctp_setsockopt(sock, IPPROTO_SCTP, SCTP_INITMSG, &initmsg, sizeof(struct sctp_initmsg)) < 0) {
perror("setsockopt SCTP_INITMSG");
}
if (argc == 5) {
/* operating as client */
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
#ifdef HAVE_SIN_LEN
addr.sin_len = sizeof(struct sockaddr_in);
#endif
if (!inet_pton(AF_INET, argv[3], &addr.sin_addr.s_addr)){
printf("error: invalid address\n");
exit(1);
}
addr.sin_port = htons(atoi(argv[4]));
if (usrsctp_connect(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)) < 0) {
perror("connect");
}
printf("Connected to %s:%d.\n", inet_ntop(AF_INET, &(addr.sin_addr), addrbuf, INET_ADDRSTRLEN), ntohs(addr.sin_port));
} else if (argc == 4) {
struct socket *conn_sock;
/* operating as server */
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
#ifdef HAVE_SIN_LEN
addr.sin_len = sizeof(struct sockaddr_in);
#endif
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_port = htons(atoi(argv[3]));
if (usrsctp_bind(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)) < 0) {
perror("bind");
}
if (usrsctp_listen(sock, 1) < 0) {
perror("listen");
}
addr_len = (socklen_t)sizeof(struct sockaddr_in);
memset(&addr, 0, sizeof(struct sockaddr_in));
if ((conn_sock = usrsctp_accept(sock, (struct sockaddr *)&addr, &addr_len)) == NULL) {
perror("accept");
}
usrsctp_close(sock);
sock = conn_sock;
printf("Connected to %s:%d.\n", inet_ntop(AF_INET, &(addr.sin_addr), addrbuf, INET_ADDRSTRLEN), ntohs(addr.sin_port));
} else {
printf("Usage: %s local_udp_port remote_udp_port local_port when operating as server\n"
" %s local_udp_port remote_udp_port remote_addr remote_port when operating as client\n",
argv[0], argv[0]);
return (0);
}
lock_peer_connection(&peer_connection);
peer_connection.sock = sock;
unlock_peer_connection(&peer_connection);
for (;;) {
#if defined(_WIN32) && !defined(__MINGW32__)
if (gets_s(line, LINE_LENGTH) == NULL) {
#else
if (fgets(line, LINE_LENGTH, stdin) == NULL) {
#endif
if (usrsctp_shutdown(sock, SHUT_WR) < 0) {
perror("usrsctp_shutdown");
}
while (usrsctp_finish() != 0) {
#ifdef _WIN32
Sleep(1000);
#else
sleep(1);
#endif
}
break;
}
if (strncmp(line, "?", strlen("?")) == 0 ||
strncmp(line, "help", strlen("help")) == 0) {
printf("Commands:\n"
"open unordered pr_policy pr_value - opens a channel\n"
"close channel - closes the channel\n"
"send channel:string - sends string using channel\n"
"status - prints the status\n"
"sleep n - sleep for n seconds\n"
"help - this message\n");
} else if (strncmp(line, "status", strlen("status")) == 0) {
lock_peer_connection(&peer_connection);
print_status(&peer_connection);
unlock_peer_connection(&peer_connection);
} else if (strncmp(line, "quit", strlen("quit")) == 0) {
if (usrsctp_shutdown(sock, SHUT_WR) < 0) {
perror("usrsctp_shutdown");
}
while (usrsctp_finish() != 0) {
#ifdef _WIN32
Sleep(1000);
#else
sleep(1);
#endif
}
break;
} else if (sscanf(line, "open %u %u %u", &unordered, &policy, &value) == 3) {
lock_peer_connection(&peer_connection);
channel = open_channel(&peer_connection, (uint8_t)unordered, (uint16_t)policy, (uint32_t)value);
unlock_peer_connection(&peer_connection);
if (channel == NULL) {
printf("Creating channel failed.\n");
} else {
printf("Channel with id %u created.\n", channel->id);
}
} else if (sscanf(line, "close %u", &id) == 1) {
if (id < NUMBER_OF_CHANNELS) {
lock_peer_connection(&peer_connection);
close_channel(&peer_connection, &peer_connection.channels[id]);
unlock_peer_connection(&peer_connection);
}
} else if (sscanf(line, "send %u", &id) == 1) {
if (id < NUMBER_OF_CHANNELS) {
char *msg;
msg = strstr(line, ":");
if (msg) {
msg++;
lock_peer_connection(&peer_connection);
#ifdef _WIN32
if (send_user_message(&peer_connection, &peer_connection.channels[id], msg, strlen(msg))) {
#else
if (send_user_message(&peer_connection, &peer_connection.channels[id], msg, strlen(msg) - 1)) {
#endif
printf("Message sent.\n");
} else {
printf("Message sending failed.\n");
}
unlock_peer_connection(&peer_connection);
}
}
} else if (sscanf(line, "sleep %u", &seconds) == 1) {
#ifdef _WIN32
Sleep(seconds * 1000);
#else
sleep(seconds);
#endif
} else {
printf("Unknown command: %s", line);
}
}
return (0);
}
|
685275.c | /*----------------------------------------------------------------------------*/
/* Hobbit message daemon. . */
/* */
/* This is a hobbitd worker module for the "stachg" channel. */
/* This module implements the file-based history logging, and keeps the */
/* historical logfiles in bbvar/hist/ and bbvar/histlogs/ updated to keep */
/* track of the status changes. */
/* */
/* Copyright (C) 2004-2009 Henrik Storner <[email protected]> */
/* */
/* This program is released under the GNU General Public License (GPL), */
/* version 2. See the file "COPYING" for details. */
/* */
/*----------------------------------------------------------------------------*/
static char rcsid[] = "$Id$";
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/stat.h>
#include <errno.h>
#include <signal.h>
#include <dirent.h>
#include <sys/wait.h>
#include <time.h>
#include <limits.h>
#include "libbbgen.h"
#include "hobbitd_worker.h"
int rotatefiles = 0;
void sig_handler(int signum)
{
/*
* Why this? Because we must have our own signal handler installed to call wait()
*/
switch (signum) {
case SIGCHLD:
break;
case SIGHUP:
rotatefiles = 1;
break;
}
}
typedef struct columndef_t {
char *name;
int saveit;
} columndef_t;
RbtHandle columndefs;
int main(int argc, char *argv[])
{
time_t starttime = gettimer();
char *histdir = NULL;
char *histlogdir = NULL;
char *msg;
int argi, seq;
int save_allevents = 1;
int save_hostevents = 1;
int save_statusevents = 1;
int save_histlogs = 1, defaultsaveop = 1;
FILE *alleventsfd = NULL;
int running = 1;
struct sigaction sa;
char newcol2[3];
char oldcol2[3];
char alleventsfn[PATH_MAX];
char pidfn[PATH_MAX];
MEMDEFINE(pidfn);
MEMDEFINE(alleventsfn);
MEMDEFINE(newcol2);
MEMDEFINE(oldcol2);
/* Dont save the error buffer */
save_errbuf = 0;
if (xgetenv("BBALLHISTLOG")) save_allevents = (strcmp(xgetenv("BBALLHISTLOG"), "TRUE") == 0);
if (xgetenv("BBHOSTHISTLOG")) save_hostevents = (strcmp(xgetenv("BBHOSTHISTLOG"), "TRUE") == 0);
if (xgetenv("SAVESTATUSLOG")) save_histlogs = (strncmp(xgetenv("SAVESTATUSLOG"), "FALSE", 5) != 0);
for (argi = 1; (argi < argc); argi++) {
if (argnmatch(argv[argi], "--histdir=")) {
histdir = strchr(argv[argi], '=')+1;
}
else if (argnmatch(argv[argi], "--histlogdir=")) {
histlogdir = strchr(argv[argi], '=')+1;
}
else if (argnmatch(argv[argi], "--debug")) {
debug = 1;
}
}
if (xgetenv("BBHIST") && (histdir == NULL)) {
histdir = strdup(xgetenv("BBHIST"));
}
if (histdir == NULL) {
errprintf("No history directory given, aborting\n");
return 1;
}
if (save_histlogs && (histlogdir == NULL) && xgetenv("BBHISTLOGS")) {
histlogdir = strdup(xgetenv("BBHISTLOGS"));
}
if (save_histlogs && (histlogdir == NULL)) {
errprintf("No history-log directory given, aborting\n");
return 1;
}
columndefs = rbtNew(string_compare);
{
char *defaultsave, *tok;
char *savelist;
columndef_t *newrec;
savelist = strdup(xgetenv("SAVESTATUSLOG"));
defaultsave = strtok(savelist, ",");
/*
* TRUE: Save everything by default; may list some that are not saved.
* ONLY: Save nothing by default; may list some that are saved.
* FALSE: Save nothing.
*/
defaultsaveop = (strcasecmp(defaultsave, "TRUE") == 0);
tok = strtok(NULL, ",");
while (tok) {
newrec = (columndef_t *)malloc(sizeof(columndef_t));
if (*tok == '!') {
newrec->saveit = 0;
newrec->name = strdup(tok+1);
}
else {
newrec->saveit = 1;
newrec->name = strdup(tok);
}
rbtInsert(columndefs, newrec->name, newrec);
tok = strtok(NULL, ",");
}
xfree(savelist);
}
sprintf(pidfn, "%s/hobbitd_history.pid", xgetenv("BBSERVERLOGS"));
{
FILE *pidfd = fopen(pidfn, "w");
if (pidfd) {
fprintf(pidfd, "%d\n", getpid());
fclose(pidfd);
}
}
sprintf(alleventsfn, "%s/allevents", histdir);
if (save_allevents) {
alleventsfd = fopen(alleventsfn, "a");
if (alleventsfd == NULL) {
errprintf("Cannot open the all-events file '%s'\n", alleventsfn);
}
setvbuf(alleventsfd, (char *)NULL, _IOLBF, 0);
}
/* For picking up lost children */
setup_signalhandler("hobbitd_history");
memset(&sa, 0, sizeof(sa));
sa.sa_handler = sig_handler;
sigaction(SIGCHLD, &sa, NULL);
sigaction(SIGHUP, &sa, NULL);
signal(SIGPIPE, SIG_DFL);
while (running) {
char *metadata[20] = { NULL, };
int metacount;
char *p;
char *statusdata = "";
char *hostname, *hostnamecommas, *testname, *dismsg;
time_t tstamp, lastchg, disabletime, clienttstamp;
int tstamp_i, lastchg_i;
int newcolor, oldcolor;
int downtimeactive;
struct tm tstamptm;
int trend;
int childstat;
/* Pickup any finished child processes to avoid zombies */
while (wait3(&childstat, WNOHANG, NULL) > 0) ;
if (rotatefiles && alleventsfd) {
fclose(alleventsfd);
alleventsfd = fopen(alleventsfn, "a");
if (alleventsfd == NULL) {
errprintf("Cannot re-open the all-events file '%s'\n", alleventsfn);
}
else {
setvbuf(alleventsfd, (char *)NULL, _IOLBF, 0);
}
}
msg = get_hobbitd_message(C_STACHG, "hobbitd_history", &seq, NULL);
if (msg == NULL) {
running = 0;
continue;
}
p = strchr(msg, '\n');
if (p) {
*p = '\0';
statusdata = msg_data(p+1);
}
metacount = 0;
memset(&metadata, 0, sizeof(metadata));
p = gettok(msg, "|");
while (p && (metacount < 20)) {
metadata[metacount++] = p;
p = gettok(NULL, "|");
}
if ((metacount > 9) && (strncmp(metadata[0], "@@stachg", 8) == 0)) {
RbtIterator handle;
columndef_t *saveit = NULL;
/* @@stachg#seq|timestamp|sender|origin|hostname|testname|expiretime|color|prevcolor|changetime|disabletime|disablemsg|downtimeactive|clienttstamp */
sscanf(metadata[1], "%d.%*d", &tstamp_i); tstamp = tstamp_i;
hostname = metadata[4];
testname = metadata[5];
newcolor = parse_color(metadata[7]);
oldcolor = parse_color(metadata[8]);
lastchg = atoi(metadata[9]);
disabletime = atoi(metadata[10]);
dismsg = metadata[11];
downtimeactive = (atoi(metadata[12]) > 0);
clienttstamp = atoi(metadata[13]);
if (newcolor == -1) {
errprintf("Bad message: newcolor is unknown '%s'\n", metadata[7]);
continue;
}
p = hostnamecommas = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = ',';
handle = rbtFind(columndefs, testname);
if (handle == rbtEnd(columndefs)) {
saveit = (columndef_t *)malloc(sizeof(columndef_t));
saveit->name = strdup(testname);
saveit->saveit = defaultsaveop;
rbtInsert(columndefs, saveit->name, saveit);
}
else {
saveit = (columndef_t *) gettreeitem(columndefs, handle);
}
if (save_statusevents) {
char statuslogfn[PATH_MAX];
int logexists;
FILE *statuslogfd;
char oldcol[100];
char timestamp[40];
struct stat st;
MEMDEFINE(statuslogfn);
MEMDEFINE(oldcol);
MEMDEFINE(timestamp);
sprintf(statuslogfn, "%s/%s.%s", histdir, hostnamecommas, testname);
stat(statuslogfn, &st);
statuslogfd = fopen(statuslogfn, "r+");
logexists = (statuslogfd != NULL);
*oldcol = '\0';
if (logexists) {
/*
* There is a fair chance hobbitd has not been
* running all the time while this system was monitored.
* So get the time of the latest status change from the file,
* instead of relying on the "lastchange" value we get
* from hobbitd. This is also needed when migrating from
* standard bbd to hobbitd.
*/
off_t pos = -1;
char l[1024];
int gotit;
MEMDEFINE(l);
fseeko(statuslogfd, 0, SEEK_END);
if (ftello(statuslogfd) > 512) {
/* Go back 512 from EOF, and skip to start of a line */
fseeko(statuslogfd, -512, SEEK_END);
gotit = (fgets(l, sizeof(l)-1, statuslogfd) == NULL);
}
else {
/* Read from beginning of file */
fseeko(statuslogfd, 0, SEEK_SET);
gotit = 0;
}
while (!gotit) {
off_t tmppos = ftello(statuslogfd);
time_t dur;
int dur_i;
if (fgets(l, sizeof(l)-1, statuslogfd)) {
/* Sun Oct 10 06:49:42 2004 red 1097383782 602 */
if ((strlen(l) > 24) &&
(sscanf(l+24, " %s %d %d", oldcol, &lastchg_i, &dur_i) == 2) &&
(parse_color(oldcol) != -1)) {
/*
* Record the start location of the line
*/
pos = tmppos;
lastchg = lastchg_i;
dur = dur_i;
}
}
else {
gotit = 1;
}
}
if (pos == -1) {
/*
* Couldnt find anything in the log.
* Take lastchg from the timestamp of the logfile,
* and just append the data.
*/
lastchg = st.st_mtime;
fseeko(statuslogfd, 0, SEEK_END);
}
else {
/*
* lastchg was updated above.
* Seek to where the last line starts.
*/
fseeko(statuslogfd, pos, SEEK_SET);
}
MEMUNDEFINE(l);
}
else {
/*
* Logfile does not exist.
*/
lastchg = tstamp;
statuslogfd = fopen(statuslogfn, "a");
if (statuslogfd == NULL) {
errprintf("Cannot open status historyfile '%s' : %s\n",
statuslogfn, strerror(errno));
}
}
if (strcmp(oldcol, colorname(newcolor)) == 0) {
/* We wont update history unless the color did change. */
if ((gettimer() - starttime) > 300) {
errprintf("Will not update %s - color unchanged (%s)\n",
statuslogfn, oldcol);
}
if (hostnamecommas) xfree(hostnamecommas);
if (statuslogfd) fclose(statuslogfd);
MEMUNDEFINE(statuslogfn);
MEMUNDEFINE(oldcol);
MEMUNDEFINE(timestamp);
continue;
}
if (statuslogfd) {
if (logexists) {
struct tm oldtm;
/* Re-print the old record, now with the final duration */
memcpy(&oldtm, localtime(&lastchg), sizeof(oldtm));
strftime(timestamp, sizeof(timestamp), "%a %b %e %H:%M:%S %Y", &oldtm);
fprintf(statuslogfd, "%s %s %d %d\n",
timestamp, oldcol, (int)lastchg, (int)(tstamp - lastchg));
}
/* And the new record. */
memcpy(&tstamptm, localtime(&tstamp), sizeof(tstamptm));
strftime(timestamp, sizeof(timestamp), "%a %b %e %H:%M:%S %Y", &tstamptm);
fprintf(statuslogfd, "%s %s %d", timestamp, colorname(newcolor), (int)tstamp);
fclose(statuslogfd);
}
MEMUNDEFINE(statuslogfn);
MEMUNDEFINE(oldcol);
MEMUNDEFINE(timestamp);
}
if (save_histlogs && saveit->saveit) {
char *hostdash;
char fname[PATH_MAX];
FILE *histlogfd;
MEMDEFINE(fname);
p = hostdash = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = '_';
sprintf(fname, "%s/%s", histlogdir, hostdash);
mkdir(fname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
p = fname + sprintf(fname, "%s/%s/%s", histlogdir, hostdash, testname);
mkdir(fname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
p += sprintf(p, "/%s", histlogtime(tstamp));
histlogfd = fopen(fname, "w");
if (histlogfd) {
/*
* When a host gets disabled or goes purple, the status
* message data is not changed - so it will include a
* wrong color as the first word of the message.
* Therefore we need to fixup this so it matches the
* newcolor value.
*/
int txtcolor = parse_color(statusdata);
char *origstatus = statusdata;
if (txtcolor != -1) {
fprintf(histlogfd, "%s", colorname(newcolor));
statusdata += strlen(colorname(txtcolor));
}
if (dismsg && *dismsg) nldecode(dismsg);
if (disabletime > 0) {
fprintf(histlogfd, " Disabled until %s\n%s\n\n",
ctime(&disabletime), (dismsg ? dismsg : ""));
fprintf(histlogfd, "Status message when disabled follows:\n\n");
statusdata = origstatus;
}
else if (dismsg && *dismsg) {
fprintf(histlogfd, " Planned downtime: %s\n\n", dismsg);
fprintf(histlogfd, "Original status message follows:\n\n");
statusdata = origstatus;
}
fwrite(statusdata, strlen(statusdata), 1, histlogfd);
fprintf(histlogfd, "Status unchanged in 0.00 minutes\n");
fprintf(histlogfd, "Message received from %s\n", metadata[2]);
if (clienttstamp) fprintf(histlogfd, "Client data ID %d\n", (int) clienttstamp);
fclose(histlogfd);
}
else {
errprintf("Cannot create histlog file '%s' : %s\n", fname, strerror(errno));
}
xfree(hostdash);
MEMUNDEFINE(fname);
}
strncpy(oldcol2, ((oldcolor >= 0) ? colorname(oldcolor) : "-"), 2);
strncpy(newcol2, colorname(newcolor), 2);
newcol2[2] = oldcol2[2] = '\0';
if (oldcolor == -1) trend = -1; /* we dont know how bad it was */
else if (newcolor > oldcolor) trend = 2; /* It's getting worse */
else if (newcolor < oldcolor) trend = 1; /* It's getting better */
else trend = 0; /* Shouldn't happen ... */
if (save_hostevents) {
char hostlogfn[PATH_MAX];
FILE *hostlogfd;
MEMDEFINE(hostlogfn);
sprintf(hostlogfn, "%s/%s", histdir, hostname);
hostlogfd = fopen(hostlogfn, "a");
if (hostlogfd) {
fprintf(hostlogfd, "%s %d %d %d %s %s %d\n",
testname, (int)tstamp, (int)lastchg, (int)(tstamp - lastchg),
newcol2, oldcol2, trend);
fclose(hostlogfd);
}
else {
errprintf("Cannot open host logfile '%s' : %s\n", hostlogfn, strerror(errno));
}
MEMUNDEFINE(hostlogfn);
}
if (save_allevents) {
fprintf(alleventsfd, "%s %s %d %d %d %s %s %d\n",
hostname, testname, (int)tstamp, (int)lastchg, (int)(tstamp - lastchg),
newcol2, oldcol2, trend);
fflush(alleventsfd);
}
xfree(hostnamecommas);
}
else if ((metacount > 3) && ((strncmp(metadata[0], "@@drophost", 10) == 0))) {
/* @@drophost|timestamp|sender|hostname */
hostname = metadata[3];
if (save_histlogs) {
char *hostdash;
char testdir[PATH_MAX];
MEMDEFINE(testdir);
/* Remove all directories below the host-specific histlog dir */
p = hostdash = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = '_';
sprintf(testdir, "%s/%s", histlogdir, hostdash);
dropdirectory(testdir, 1);
xfree(hostdash);
MEMUNDEFINE(testdir);
}
if (save_hostevents) {
char hostlogfn[PATH_MAX];
struct stat st;
MEMDEFINE(hostlogfn);
sprintf(hostlogfn, "%s/%s", histdir, hostname);
if ((stat(hostlogfn, &st) == 0) && S_ISREG(st.st_mode)) {
unlink(hostlogfn);
}
MEMUNDEFINE(hostlogfn);
}
if (save_statusevents) {
DIR *dirfd;
struct dirent *de;
char *hostlead;
char statuslogfn[PATH_MAX];
struct stat st;
MEMDEFINE(statuslogfn);
/* Remove bbvar/hist/host,name.* */
p = hostnamecommas = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = ',';
hostlead = malloc(strlen(hostname) + 2);
strcpy(hostlead, hostnamecommas); strcat(hostlead, ".");
dirfd = opendir(histdir);
if (dirfd) {
while ((de = readdir(dirfd)) != NULL) {
if (strncmp(de->d_name, hostlead, strlen(hostlead)) == 0) {
sprintf(statuslogfn, "%s/%s", histdir, de->d_name);
if ((stat(statuslogfn, &st) == 0) && S_ISREG(st.st_mode)) {
unlink(statuslogfn);
}
}
}
closedir(dirfd);
}
xfree(hostlead);
xfree(hostnamecommas);
MEMUNDEFINE(statuslogfn);
}
}
else if ((metacount > 4) && ((strncmp(metadata[0], "@@droptest", 10) == 0))) {
/* @@droptest|timestamp|sender|hostname|testname */
hostname = metadata[3];
testname = metadata[4];
if (save_histlogs) {
char *hostdash;
char testdir[PATH_MAX];
MEMDEFINE(testdir);
p = hostdash = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = '_';
sprintf(testdir, "%s/%s/%s", histlogdir, hostdash, testname);
dropdirectory(testdir, 1);
xfree(hostdash);
MEMUNDEFINE(testdir);
}
if (save_statusevents) {
char *hostnamecommas;
char statuslogfn[PATH_MAX];
struct stat st;
MEMDEFINE(statuslogfn);
p = hostnamecommas = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = ',';
sprintf(statuslogfn, "%s/%s.%s", histdir, hostnamecommas, testname);
if ((stat(statuslogfn, &st) == 0) && S_ISREG(st.st_mode)) unlink(statuslogfn);
xfree(hostnamecommas);
MEMUNDEFINE(statuslogfn);
}
}
else if ((metacount > 4) && ((strncmp(metadata[0], "@@renamehost", 12) == 0))) {
/* @@renamehost|timestamp|sender|hostname|newhostname */
char *newhostname;
hostname = metadata[3];
newhostname = metadata[4];
if (save_histlogs) {
char *hostdash;
char *newhostdash;
char olddir[PATH_MAX];
char newdir[PATH_MAX];
MEMDEFINE(olddir); MEMDEFINE(newdir);
p = hostdash = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = '_';
p = newhostdash = strdup(newhostname); while ((p = strchr(p, '.')) != NULL) *p = '_';
sprintf(olddir, "%s/%s", histlogdir, hostdash);
sprintf(newdir, "%s/%s", histlogdir, newhostdash);
rename(olddir, newdir);
xfree(newhostdash);
xfree(hostdash);
MEMUNDEFINE(newdir); MEMUNDEFINE(olddir);
}
if (save_hostevents) {
char hostlogfn[PATH_MAX];
char newhostlogfn[PATH_MAX];
MEMDEFINE(hostlogfn); MEMDEFINE(newhostlogfn);
sprintf(hostlogfn, "%s/%s", histdir, hostname);
sprintf(newhostlogfn, "%s/%s", histdir, newhostname);
rename(hostlogfn, newhostlogfn);
MEMUNDEFINE(hostlogfn); MEMUNDEFINE(newhostlogfn);
}
if (save_statusevents) {
DIR *dirfd;
struct dirent *de;
char *hostlead;
char *newhostnamecommas;
char statuslogfn[PATH_MAX];
char newlogfn[PATH_MAX];
MEMDEFINE(statuslogfn); MEMDEFINE(newlogfn);
p = hostnamecommas = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = ',';
hostlead = malloc(strlen(hostname) + 2);
strcpy(hostlead, hostnamecommas); strcat(hostlead, ".");
p = newhostnamecommas = strdup(newhostname); while ((p = strchr(p, '.')) != NULL) *p = ',';
dirfd = opendir(histdir);
if (dirfd) {
while ((de = readdir(dirfd)) != NULL) {
if (strncmp(de->d_name, hostlead, strlen(hostlead)) == 0) {
char *testname = strchr(de->d_name, '.');
sprintf(statuslogfn, "%s/%s", histdir, de->d_name);
sprintf(newlogfn, "%s/%s%s", histdir, newhostnamecommas, testname);
rename(statuslogfn, newlogfn);
}
}
closedir(dirfd);
}
xfree(newhostnamecommas);
xfree(hostlead);
xfree(hostnamecommas);
MEMUNDEFINE(statuslogfn); MEMUNDEFINE(newlogfn);
}
}
else if ((metacount > 5) && (strncmp(metadata[0], "@@renametest", 12) == 0)) {
/* @@renametest|timestamp|sender|hostname|oldtestname|newtestname */
char *newtestname;
hostname = metadata[3];
testname = metadata[4];
newtestname = metadata[5];
if (save_histlogs) {
char *hostdash;
char olddir[PATH_MAX];
char newdir[PATH_MAX];
MEMDEFINE(olddir); MEMDEFINE(newdir);
p = hostdash = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = '_';
sprintf(olddir, "%s/%s/%s", histlogdir, hostdash, testname);
sprintf(newdir, "%s/%s/%s", histlogdir, hostdash, newtestname);
rename(olddir, newdir);
xfree(hostdash);
MEMUNDEFINE(newdir); MEMUNDEFINE(olddir);
}
if (save_statusevents) {
char *hostnamecommas;
char statuslogfn[PATH_MAX];
char newstatuslogfn[PATH_MAX];
MEMDEFINE(statuslogfn); MEMDEFINE(newstatuslogfn);
p = hostnamecommas = strdup(hostname); while ((p = strchr(p, '.')) != NULL) *p = ',';
sprintf(statuslogfn, "%s/%s.%s", histdir, hostnamecommas, testname);
sprintf(newstatuslogfn, "%s/%s.%s", histdir, hostnamecommas, newtestname);
rename(statuslogfn, newstatuslogfn);
xfree(hostnamecommas);
MEMUNDEFINE(newstatuslogfn); MEMUNDEFINE(statuslogfn);
}
}
else if (strncmp(metadata[0], "@@shutdown", 10) == 0) {
running = 0;
}
else if (strncmp(metadata[0], "@@logrotate", 11) == 0) {
char *fn = xgetenv("HOBBITCHANNEL_LOGFILENAME");
if (fn && strlen(fn)) {
freopen(fn, "a", stdout);
freopen(fn, "a", stderr);
}
continue;
}
}
MEMUNDEFINE(newcol2);
MEMUNDEFINE(oldcol2);
MEMUNDEFINE(alleventsfn);
MEMUNDEFINE(pidfn);
fclose(alleventsfd);
unlink(pidfn);
return 0;
}
|
342828.c | /*****************************************************************************
*
* colors - Entry points for Win32 to Win 16 converter
*
* Date: 7/1/91
* Author: Jeffrey Newman (c-jeffn)
*
* History:
* Sep 1992 -by- Hock San Lee [hockl]
* Complete rewrite.
*
* The following implementation takes into account that all 16-bit metafile
* palette records reference the current palette.
*
* CreatePalette
* Create a private copy of the logical palette in the converter but
* don't emit the 16-bit record.
*
* SelectPalette
* Emit a CreatePalette record followed by a SelectPalette record.
* Then emit a 16-bit DeleteObject record to delete the previous palette.
* The selected logical palette can be queried from the private copy
* maintained by the converter. You need to keep track of the current
* palette so that you can emit ResizePalette or SetPaletteEntries record
* if the palette identifies the current palette. You also need to deal
* with the stock palette correctly here (you don't need to keep a
* private copy of the stock palette). Don't delete the private copy
* of the logical palette here! (see DeleteObject below)
*
* RealizePalette
* Just emit a 16-bit record. This record always references the current
* palette in both 16 and 32-bit metafiles.
*
* ResizePalette
* Update the private copy of the logical palette in the converter.
* Emit a 16-bit record only if the palette identifies the current palette.
*
* SetPaletteEntries
* Update the private copy of the logical palette in the converter.
* Emit a 16-bit record only if the palette identifies the current palette.
*
* DeleteObject
* Don't emit the 16-bit record for palettes since all palettes are
* deleted in SelectPalette above. Similarly, don't emit palette delete
* records at the end of conversion. However, you need to delete the
* private copy of the palette maintained by the converter here and at
* the end of conversion.
*
*
* Copyright 1991 Microsoft Corp
*****************************************************************************/
#include "precomp.h"
#pragma hdrstop
/***************************************************************************
* SelectPalette - Win32 to Win16 Metafile Converter Entry Point
*
* Emit a CreatePalette record followed by a SelectPalette record.
* Then emit a 16-bit DeleteObject record to delete the previous palette.
* The selected logical palette can be queried from the private copy
* maintained by the converter. You need to keep track of the current
* palette so that you can emit ResizePalette or SetPaletteEntries record
* if the palette identifies the current palette. You also need to deal
* with the stock palette correctly here (you don't need to keep a
* private copy of the stock palette). Don't delete the private copy
* of the logical palette here! (see DeleteObject below)
*
**************************************************************************/
BOOL WINAPI DoSelectPalette
(
PLOCALDC pLocalDC,
DWORD ihpal
)
{
BOOL b = FALSE;
WORD cEntries;
LPLOGPALETTE lpLogPal = (LPLOGPALETTE) NULL;
HPALETTE hpalW32;
INT ihW16, ihW32Norm;
// No need to do anything if selecting the same palette.
if (pLocalDC->ihpal32 == ihpal)
return(TRUE);
// Validate the palette index.
if ((ihpal != (ENHMETA_STOCK_OBJECT | DEFAULT_PALETTE))
&& (ihpal >= pLocalDC->cW32hPal || !pLocalDC->pW32hPal[ihpal]))
{
RIPS("MF3216: DoSelectPalette - ihpal invalid");
goto error_exit;
}
// Get the W32 handle.
if (ihpal == (ENHMETA_STOCK_OBJECT | DEFAULT_PALETTE))
hpalW32 = GetStockObject(DEFAULT_PALETTE) ;
else
hpalW32 = pLocalDC->pW32hPal[ihpal];
if(hpalW32 == 0)
{
RIPS("MF3216: DoSelectPalette - hpalW32 == 0\n");
goto error_exit;
}
// Emit a CreatePalette record.
if (!GetObjectA(hpalW32, sizeof(WORD), &cEntries))
{
RIPS("MF3216: DoSelectPalette - GetObjectA failed\n");
goto error_exit;
}
if (!(lpLogPal = (LPLOGPALETTE) LocalAlloc(
LMEM_FIXED,
sizeof(LOGPALETTE) - sizeof(PALETTEENTRY)
+ sizeof(PALETTEENTRY) * cEntries)))
goto error_exit;
lpLogPal->palVersion = 0x300;
lpLogPal->palNumEntries = cEntries;
GetPaletteEntries(hpalW32, 0, cEntries, lpLogPal->palPalEntry);
// Allocate the W16 handle.
ihW16 = iAllocateW16Handle(pLocalDC, ihpal, REALIZED_PALETTE);
if (ihW16 == -1)
goto error_exit;
if (!bEmitWin16CreatePalette(pLocalDC, lpLogPal))
goto error_exit;
// Emit a SelectPalette record.
if (!SelectPalette(pLocalDC->hdcHelper, hpalW32, TRUE))
goto error_exit;
if (!bEmitWin16SelectPalette(pLocalDC, (WORD) ihW16))
goto error_exit;
// Emit a DeleteObject record to delete the previous palette.
if (pLocalDC->ihpal16 != -1)
{
ihW32Norm = iNormalizeHandle(pLocalDC, pLocalDC->ihpal32);
if (ihW32Norm == -1)
goto error_exit;
pLocalDC->pW16ObjHndlSlotStatus[pLocalDC->ihpal16].use
= OPEN_AVAILABLE_SLOT;
pLocalDC->piW32ToW16ObjectMap[ihW32Norm]
= UNMAPPED;
bEmitWin16DeleteObject(pLocalDC, (WORD) pLocalDC->ihpal16);
}
pLocalDC->ihpal32 = ihpal;
pLocalDC->ihpal16 = ihW16;
b = TRUE;
error_exit:
if (lpLogPal)
LocalFree((HANDLE) lpLogPal);
return(b);
}
/***************************************************************************
* ResizePalette - Win32 to Win16 Metafile Converter Entry Point
*
* Update the private copy of the logical palette in the converter.
* Emit a 16-bit record only if the palette identifies the current palette.
*
**************************************************************************/
BOOL WINAPI DoResizePalette
(
PLOCALDC pLocalDC,
DWORD ihpal,
DWORD cEntries
)
{
// Do not modify the default palette.
if (ihpal == (ENHMETA_STOCK_OBJECT | DEFAULT_PALETTE))
return(TRUE);
// Validate the palette index.
if (ihpal >= pLocalDC->cW32hPal || !pLocalDC->pW32hPal[ihpal])
{
RIPS("MF3216: DoResizePalette - ihpal invalid");
return(FALSE);
}
// Do it to the private palette.
if (!ResizePalette(pLocalDC->pW32hPal[ihpal], cEntries))
{
RIPS("MF3216: DoResizePalette - ResizePalette failed");
return(FALSE);
}
// Emit a 16-bit record only if the palette identifies the
// current palette.
if (pLocalDC->ihpal32 == ihpal)
return(bEmitWin16ResizePalette(pLocalDC, (WORD) cEntries));
return(TRUE);
}
/***************************************************************************
* SetPaletteEntries - Win32 to Win16 Metafile Converter Entry Point
*
* Update the private copy of the logical palette in the converter.
* Emit a 16-bit record only if the palette identifies the current palette.
*
**************************************************************************/
BOOL WINAPI DoSetPaletteEntries
(
PLOCALDC pLocalDC,
DWORD ihpal,
DWORD iStart,
DWORD cEntries,
LPPALETTEENTRY pPalEntries
)
{
// Do not modify the default palette.
if (ihpal == (ENHMETA_STOCK_OBJECT | DEFAULT_PALETTE))
return(TRUE);
// Validate the palette index.
if (ihpal >= pLocalDC->cW32hPal || !pLocalDC->pW32hPal[ihpal])
{
RIPS("MF3216: DoSetPaletteEntries - ihpal invalid");
return(FALSE);
}
// Do it to the private palette.
if (!SetPaletteEntries(pLocalDC->pW32hPal[ihpal], iStart, cEntries, pPalEntries))
{
RIPS("MF3216: DoSetPaletteEntries - SetPaletteEntries failed");
return(FALSE);
}
// Emit a 16-bit record only if the palette identifies the
// current palette.
if (pLocalDC->ihpal32 == ihpal)
return(bEmitWin16SetPaletteEntries(pLocalDC, iStart, cEntries, pPalEntries));
return(TRUE);
}
/***************************************************************************
* RealizePalette - Win32 to Win16 Metafile Converter Entry Point
*
* Just emit a 16-bit record. This record always references the current
* palette in both 16 and 32-bit metafiles.
*
**************************************************************************/
BOOL WINAPI DoRealizePalette
(
PLOCALDC pLocalDC
)
{
// Emit the Win16 metafile drawing order.
return(bEmitWin16RealizePalette(pLocalDC));
}
/***************************************************************************
* CreatePalette - Win32 to Win16 Metafile Converter Entry Point
*
* Create a private copy of the logical palette in the converter but
* don't emit the 16-bit record.
*
**************************************************************************/
BOOL WINAPI DoCreatePalette
(
PLOCALDC pLocalDC,
DWORD ihPal,
LPLOGPALETTE lpLogPal
)
{
if (ihPal != (ENHMETA_STOCK_OBJECT | DEFAULT_PALETTE))
{
LOGPALETTE *lpLogPalNew;
// Validate the palette index.
if (ihPal >= pLocalDC->cW32hPal || pLocalDC->pW32hPal[ihPal])
return(FALSE);
// Allocate size of log palette + 2 entries for black and white.
lpLogPalNew = LocalAlloc(LMEM_FIXED, lpLogPal->palNumEntries * sizeof(DWORD) + (sizeof(LOGPALETTE) + sizeof(DWORD)));
if (lpLogPalNew == NULL)
{
return(FALSE);
}
RtlMoveMemory(lpLogPalNew, lpLogPal, lpLogPal->palNumEntries * sizeof(DWORD) + (sizeof(LOGPALETTE) - sizeof(DWORD)));
lpLogPalNew->palNumEntries += 2;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 1].peRed = 0;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 1].peGreen = 0;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 1].peBlue = 0;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 1].peFlags = 0;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 2].peRed = 0xff;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 2].peGreen = 0xff;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 2].peBlue = 0xff;
lpLogPalNew->palPalEntry[lpLogPal->palNumEntries - 2].peFlags = 0;
// Create a private copy of the logical palette and keep it
// in the converter palette table.
pLocalDC->pW32hPal[ihPal] = CreatePalette(lpLogPalNew);
LocalFree(lpLogPalNew);
if (!(pLocalDC->pW32hPal[ihPal]))
{
RIPS("MF3216: DoCreatePalette - CreatePalette failed\n") ;
return(FALSE);
}
}
return(TRUE);
}
|
977319.c | /*
* Copyright (c) 2018, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <immintrin.h> // AVX2
#include "aom_dsp/x86/mem_sse2.h"
#include "aom_dsp/x86/synonyms.h"
#include "aom_dsp/x86/synonyms_avx2.h"
#include "aom_dsp/x86/transpose_sse2.h"
#include "config/av1_rtcd.h"
#include "av1/common/restoration.h"
#include "av1/encoder/pickrst.h"
static INLINE void acc_stat_avx2(int32_t *dst, const uint8_t *src,
const __m128i *shuffle, const __m256i *kl) {
const __m128i s = _mm_shuffle_epi8(xx_loadu_128(src), *shuffle);
const __m256i d0 = _mm256_madd_epi16(*kl, _mm256_cvtepu8_epi16(s));
const __m256i dst0 = yy_load_256(dst);
const __m256i r0 = _mm256_add_epi32(dst0, d0);
yy_store_256(dst, r0);
}
static INLINE void acc_stat_win7_one_line_avx2(
const uint8_t *dgd, const uint8_t *src, int h_start, int h_end,
int dgd_stride, const __m128i *shuffle, int32_t *sumX,
int32_t sumY[WIENER_WIN][WIENER_WIN], int32_t M_int[WIENER_WIN][WIENER_WIN],
int32_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
int j, k, l;
const int wiener_win = WIENER_WIN;
for (j = h_start; j < h_end; j += 2) {
const uint8_t X1 = src[j];
const uint8_t X2 = src[j + 1];
*sumX += X1 + X2;
const uint8_t *dgd_ij = dgd + j;
for (k = 0; k < wiener_win; k++) {
const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
for (l = 0; l < wiener_win; l++) {
int32_t *H_ = &H_int[(l * wiener_win + k)][0];
const uint8_t D1 = dgd_ijk[l];
const uint8_t D2 = dgd_ijk[l + 1];
sumY[k][l] += D1 + D2;
M_int[k][l] += D1 * X1 + D2 * X2;
const __m256i kl =
_mm256_cvtepu8_epi16(_mm_set1_epi16(loadu_uint16(dgd_ijk + l)));
acc_stat_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, &kl);
}
}
}
}
static INLINE void compute_stats_win7_opt_avx2(
const uint8_t *dgd, const uint8_t *src, int h_start, int h_end, int v_start,
int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
int use_downsampled_wiener_stats) {
int i, j, k, l, m, n;
const int wiener_win = WIENER_WIN;
const int pixel_count = (h_end - h_start) * (v_end - v_start);
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin = (wiener_win >> 1);
uint8_t avg = find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
int32_t M_int32[WIENER_WIN][WIENER_WIN] = { { 0 } };
int64_t M_int64[WIENER_WIN][WIENER_WIN] = { { 0 } };
int32_t M_int32_row[WIENER_WIN][WIENER_WIN] = { { 0 } };
DECLARE_ALIGNED(32, int32_t,
H_int32[WIENER_WIN2][WIENER_WIN * 8]) = { { 0 } };
DECLARE_ALIGNED(32, int32_t,
H_int32_row[WIENER_WIN2][WIENER_WIN * 8]) = { { 0 } };
int64_t H_int64[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
int32_t sumX = 0;
const uint8_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
int downsample_factor =
use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
int32_t sumX_row = 0;
int32_t sumY_row[WIENER_WIN][WIENER_WIN] = { { 0 } };
const __m128i shuffle = xx_loadu_128(g_shuffle_stats_data);
for (j = v_start; j < v_end; j += 64) {
const int vert_end = AOMMIN(64, v_end - j) + j;
for (i = j; i < vert_end; i = i + downsample_factor) {
if (use_downsampled_wiener_stats &&
(vert_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
downsample_factor = vert_end - i;
}
sumX_row = 0;
memset(sumY_row, 0, sizeof(int32_t) * WIENER_WIN * WIENER_WIN);
memset(M_int32_row, 0, sizeof(int32_t) * WIENER_WIN * WIENER_WIN);
memset(H_int32_row, 0, sizeof(int32_t) * WIENER_WIN2 * (WIENER_WIN * 8));
acc_stat_win7_one_line_avx2(
dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
dgd_stride, &shuffle, &sumX_row, sumY_row, M_int32_row, H_int32_row);
sumX += sumX_row * downsample_factor;
// Scale M matrix based on the downsampling factor
for (k = 0; k < wiener_win; ++k) {
for (l = 0; l < wiener_win; ++l) {
sumY[k][l] += (sumY_row[k][l] * downsample_factor);
M_int32[k][l] += (M_int32_row[k][l] * downsample_factor);
}
}
// Scale H matrix based on the downsampling factor
for (k = 0; k < WIENER_WIN2; ++k) {
for (l = 0; l < WIENER_WIN * 8; ++l) {
H_int32[k][l] += (H_int32_row[k][l] * downsample_factor);
}
}
}
for (k = 0; k < wiener_win; ++k) {
for (l = 0; l < wiener_win; ++l) {
M_int64[k][l] += M_int32[k][l];
M_int32[k][l] = 0;
}
}
for (k = 0; k < WIENER_WIN2; ++k) {
for (l = 0; l < WIENER_WIN * 8; ++l) {
H_int64[k][l] += H_int32[k][l];
H_int32[k][l] = 0;
}
}
}
const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
for (k = 0; k < wiener_win; k++) {
for (l = 0; l < wiener_win; l++) {
const int32_t idx0 = l * wiener_win + k;
M[idx0] =
M_int64[k][l] + (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]));
int64_t *H_ = H + idx0 * wiener_win2;
int64_t *H_int_ = &H_int64[idx0][0];
for (m = 0; m < wiener_win; m++) {
for (n = 0; n < wiener_win; n++) {
H_[m * wiener_win + n] = H_int_[n * 8 + m] + avg_square_sum -
(int64_t)avg * (sumY[k][l] + sumY[n][m]);
}
}
}
}
}
#if CONFIG_AV1_HIGHBITDEPTH
static INLINE void acc_stat_highbd_avx2(int64_t *dst, const uint16_t *dgd,
const __m256i *shuffle,
const __m256i *dgd_ijkl) {
// Load two 128-bit chunks from dgd
const __m256i s0 = _mm256_inserti128_si256(
_mm256_castsi128_si256(_mm_loadu_si128((__m128i *)dgd)),
_mm_loadu_si128((__m128i *)(dgd + 4)), 1);
// s0 = [11 10 9 8 7 6 5 4] [7 6 5 4 3 2 1 0] as u16 (values are dgd indices)
// The weird order is so the shuffle stays within 128-bit lanes
// Shuffle 16x u16 values within lanes according to the mask:
// [0 1 1 2 2 3 3 4] [0 1 1 2 2 3 3 4]
// (Actually we shuffle u8 values as there's no 16-bit shuffle)
const __m256i s1 = _mm256_shuffle_epi8(s0, *shuffle);
// s1 = [8 7 7 6 6 5 5 4] [4 3 3 2 2 1 1 0] as u16 (values are dgd indices)
// Multiply 16x 16-bit integers in dgd_ijkl and s1, resulting in 16x 32-bit
// integers then horizontally add pairs of these integers resulting in 8x
// 32-bit integers
const __m256i d0 = _mm256_madd_epi16(*dgd_ijkl, s1);
// d0 = [a b c d] [e f g h] as u32
// Take the lower-half of d0, extend to u64, add it on to dst (H)
const __m256i d0l = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(d0, 0));
// d0l = [a b] [c d] as u64
const __m256i dst0 = yy_load_256(dst);
yy_store_256(dst, _mm256_add_epi64(d0l, dst0));
// Take the upper-half of d0, extend to u64, add it on to dst (H)
const __m256i d0h = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(d0, 1));
// d0h = [e f] [g h] as u64
const __m256i dst1 = yy_load_256(dst + 4);
yy_store_256(dst + 4, _mm256_add_epi64(d0h, dst1));
}
static INLINE void acc_stat_highbd_win7_one_line_avx2(
const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
int dgd_stride, const __m256i *shuffle, int32_t *sumX,
int32_t sumY[WIENER_WIN][WIENER_WIN], int64_t M_int[WIENER_WIN][WIENER_WIN],
int64_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
int j, k, l;
const int wiener_win = WIENER_WIN;
for (j = h_start; j < h_end; j += 2) {
const uint16_t X1 = src[j];
const uint16_t X2 = src[j + 1];
*sumX += X1 + X2;
const uint16_t *dgd_ij = dgd + j;
for (k = 0; k < wiener_win; k++) {
const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
for (l = 0; l < wiener_win; l++) {
int64_t *H_ = &H_int[(l * wiener_win + k)][0];
const uint16_t D1 = dgd_ijk[l];
const uint16_t D2 = dgd_ijk[l + 1];
sumY[k][l] += D1 + D2;
M_int[k][l] += D1 * X1 + D2 * X2;
// Load two u16 values from dgd_ijkl combined as a u32,
// then broadcast to 8x u32 slots of a 256
const __m256i dgd_ijkl = _mm256_set1_epi32(loadu_uint32(dgd_ijk + l));
// dgd_ijkl = [y x y x y x y x] [y x y x y x y x] where each is a u16
acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
&dgd_ijkl);
}
}
}
}
static INLINE void compute_stats_highbd_win7_opt_avx2(
const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
int64_t *H, aom_bit_depth_t bit_depth) {
int i, j, k, l, m, n;
const int wiener_win = WIENER_WIN;
const int pixel_count = (h_end - h_start) * (v_end - v_start);
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin = (wiener_win >> 1);
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
const uint16_t avg =
find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
int64_t M_int[WIENER_WIN][WIENER_WIN] = { { 0 } };
DECLARE_ALIGNED(32, int64_t, H_int[WIENER_WIN2][WIENER_WIN * 8]) = { { 0 } };
int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
int32_t sumX = 0;
const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
const __m256i shuffle = yy_loadu_256(g_shuffle_stats_highbd_data);
for (j = v_start; j < v_end; j += 64) {
const int vert_end = AOMMIN(64, v_end - j) + j;
for (i = j; i < vert_end; i++) {
acc_stat_highbd_win7_one_line_avx2(
dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
dgd_stride, &shuffle, &sumX, sumY, M_int, H_int);
}
}
uint8_t bit_depth_divider = 1;
if (bit_depth == AOM_BITS_12)
bit_depth_divider = 16;
else if (bit_depth == AOM_BITS_10)
bit_depth_divider = 4;
const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
for (k = 0; k < wiener_win; k++) {
for (l = 0; l < wiener_win; l++) {
const int32_t idx0 = l * wiener_win + k;
M[idx0] = (M_int[k][l] +
(avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
bit_depth_divider;
int64_t *H_ = H + idx0 * wiener_win2;
int64_t *H_int_ = &H_int[idx0][0];
for (m = 0; m < wiener_win; m++) {
for (n = 0; n < wiener_win; n++) {
H_[m * wiener_win + n] =
(H_int_[n * 8 + m] +
(avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
bit_depth_divider;
}
}
}
}
}
static INLINE void acc_stat_highbd_win5_one_line_avx2(
const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
int dgd_stride, const __m256i *shuffle, int32_t *sumX,
int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
int j, k, l;
const int wiener_win = WIENER_WIN_CHROMA;
for (j = h_start; j < h_end; j += 2) {
const uint16_t X1 = src[j];
const uint16_t X2 = src[j + 1];
*sumX += X1 + X2;
const uint16_t *dgd_ij = dgd + j;
for (k = 0; k < wiener_win; k++) {
const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
for (l = 0; l < wiener_win; l++) {
int64_t *H_ = &H_int[(l * wiener_win + k)][0];
const uint16_t D1 = dgd_ijk[l];
const uint16_t D2 = dgd_ijk[l + 1];
sumY[k][l] += D1 + D2;
M_int[k][l] += D1 * X1 + D2 * X2;
// Load two u16 values from dgd_ijkl combined as a u32,
// then broadcast to 8x u32 slots of a 256
const __m256i dgd_ijkl = _mm256_set1_epi32(loadu_uint32(dgd_ijk + l));
// dgd_ijkl = [x y x y x y x y] [x y x y x y x y] where each is a u16
acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
&dgd_ijkl);
acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
&dgd_ijkl);
}
}
}
}
static INLINE void compute_stats_highbd_win5_opt_avx2(
const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
int64_t *H, aom_bit_depth_t bit_depth) {
int i, j, k, l, m, n;
const int wiener_win = WIENER_WIN_CHROMA;
const int pixel_count = (h_end - h_start) * (v_end - v_start);
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin = (wiener_win >> 1);
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
const uint16_t avg =
find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
int64_t M_int64[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
DECLARE_ALIGNED(
32, int64_t,
H_int64[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) = { { 0 } };
int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
int32_t sumX = 0;
const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
const __m256i shuffle = yy_loadu_256(g_shuffle_stats_highbd_data);
for (j = v_start; j < v_end; j += 64) {
const int vert_end = AOMMIN(64, v_end - j) + j;
for (i = j; i < vert_end; i++) {
acc_stat_highbd_win5_one_line_avx2(
dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
dgd_stride, &shuffle, &sumX, sumY, M_int64, H_int64);
}
}
uint8_t bit_depth_divider = 1;
if (bit_depth == AOM_BITS_12)
bit_depth_divider = 16;
else if (bit_depth == AOM_BITS_10)
bit_depth_divider = 4;
const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
for (k = 0; k < wiener_win; k++) {
for (l = 0; l < wiener_win; l++) {
const int32_t idx0 = l * wiener_win + k;
M[idx0] = (M_int64[k][l] +
(avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
bit_depth_divider;
int64_t *H_ = H + idx0 * wiener_win2;
int64_t *H_int_ = &H_int64[idx0][0];
for (m = 0; m < wiener_win; m++) {
for (n = 0; n < wiener_win; n++) {
H_[m * wiener_win + n] =
(H_int_[n * 8 + m] +
(avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
bit_depth_divider;
}
}
}
}
}
void av1_compute_stats_highbd_avx2(int wiener_win, const uint8_t *dgd8,
const uint8_t *src8, int h_start, int h_end,
int v_start, int v_end, int dgd_stride,
int src_stride, int64_t *M, int64_t *H,
aom_bit_depth_t bit_depth) {
if (wiener_win == WIENER_WIN) {
compute_stats_highbd_win7_opt_avx2(dgd8, src8, h_start, h_end, v_start,
v_end, dgd_stride, src_stride, M, H,
bit_depth);
} else if (wiener_win == WIENER_WIN_CHROMA) {
compute_stats_highbd_win5_opt_avx2(dgd8, src8, h_start, h_end, v_start,
v_end, dgd_stride, src_stride, M, H,
bit_depth);
} else {
av1_compute_stats_highbd_c(wiener_win, dgd8, src8, h_start, h_end, v_start,
v_end, dgd_stride, src_stride, M, H, bit_depth);
}
}
#endif // CONFIG_AV1_HIGHBITDEPTH
static INLINE void acc_stat_win5_one_line_avx2(
const uint8_t *dgd, const uint8_t *src, int h_start, int h_end,
int dgd_stride, const __m128i *shuffle, int32_t *sumX,
int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
int32_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
int32_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
int j, k, l;
const int wiener_win = WIENER_WIN_CHROMA;
for (j = h_start; j < h_end; j += 2) {
const uint8_t X1 = src[j];
const uint8_t X2 = src[j + 1];
*sumX += X1 + X2;
const uint8_t *dgd_ij = dgd + j;
for (k = 0; k < wiener_win; k++) {
const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
for (l = 0; l < wiener_win; l++) {
int32_t *H_ = &H_int[(l * wiener_win + k)][0];
const uint8_t D1 = dgd_ijk[l];
const uint8_t D2 = dgd_ijk[l + 1];
sumY[k][l] += D1 + D2;
M_int[k][l] += D1 * X1 + D2 * X2;
const __m256i kl =
_mm256_cvtepu8_epi16(_mm_set1_epi16(loadu_uint16(dgd_ijk + l)));
acc_stat_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
acc_stat_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
}
}
}
}
static INLINE void compute_stats_win5_opt_avx2(
const uint8_t *dgd, const uint8_t *src, int h_start, int h_end, int v_start,
int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
int use_downsampled_wiener_stats) {
int i, j, k, l, m, n;
const int wiener_win = WIENER_WIN_CHROMA;
const int pixel_count = (h_end - h_start) * (v_end - v_start);
const int wiener_win2 = wiener_win * wiener_win;
const int wiener_halfwin = (wiener_win >> 1);
uint8_t avg = find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
int32_t M_int32[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
int32_t M_int32_row[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
int64_t M_int64[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
DECLARE_ALIGNED(
32, int32_t,
H_int32[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) = { { 0 } };
DECLARE_ALIGNED(
32, int32_t,
H_int32_row[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) = { { 0 } };
int64_t H_int64[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
int32_t sumX = 0;
const uint8_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
int downsample_factor =
use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
int32_t sumX_row = 0;
int32_t sumY_row[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
const __m128i shuffle = xx_loadu_128(g_shuffle_stats_data);
for (j = v_start; j < v_end; j += 64) {
const int vert_end = AOMMIN(64, v_end - j) + j;
for (i = j; i < vert_end; i = i + downsample_factor) {
if (use_downsampled_wiener_stats &&
(vert_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
downsample_factor = vert_end - i;
}
sumX_row = 0;
memset(sumY_row, 0,
sizeof(int32_t) * WIENER_WIN_CHROMA * WIENER_WIN_CHROMA);
memset(M_int32_row, 0,
sizeof(int32_t) * WIENER_WIN_CHROMA * WIENER_WIN_CHROMA);
memset(H_int32_row, 0,
sizeof(int32_t) * WIENER_WIN2_CHROMA * (WIENER_WIN_CHROMA * 8));
acc_stat_win5_one_line_avx2(
dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
dgd_stride, &shuffle, &sumX_row, sumY_row, M_int32_row, H_int32_row);
sumX += sumX_row * downsample_factor;
// Scale M matrix based on the downsampling factor
for (k = 0; k < wiener_win; ++k) {
for (l = 0; l < wiener_win; ++l) {
sumY[k][l] += (sumY_row[k][l] * downsample_factor);
M_int32[k][l] += (M_int32_row[k][l] * downsample_factor);
}
}
// Scale H matrix based on the downsampling factor
for (k = 0; k < WIENER_WIN2_CHROMA; ++k) {
for (l = 0; l < WIENER_WIN_CHROMA * 8; ++l) {
H_int32[k][l] += (H_int32_row[k][l] * downsample_factor);
}
}
}
for (k = 0; k < wiener_win; ++k) {
for (l = 0; l < wiener_win; ++l) {
M_int64[k][l] += M_int32[k][l];
M_int32[k][l] = 0;
}
}
for (k = 0; k < WIENER_WIN2_CHROMA; ++k) {
for (l = 0; l < WIENER_WIN_CHROMA * 8; ++l) {
H_int64[k][l] += H_int32[k][l];
H_int32[k][l] = 0;
}
}
}
const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
for (k = 0; k < wiener_win; k++) {
for (l = 0; l < wiener_win; l++) {
const int32_t idx0 = l * wiener_win + k;
M[idx0] =
M_int64[k][l] + (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]));
int64_t *H_ = H + idx0 * wiener_win2;
int64_t *H_int_ = &H_int64[idx0][0];
for (m = 0; m < wiener_win; m++) {
for (n = 0; n < wiener_win; n++) {
H_[m * wiener_win + n] = H_int_[n * 8 + m] + avg_square_sum -
(int64_t)avg * (sumY[k][l] + sumY[n][m]);
}
}
}
}
}
void av1_compute_stats_avx2(int wiener_win, const uint8_t *dgd,
const uint8_t *src, int h_start, int h_end,
int v_start, int v_end, int dgd_stride,
int src_stride, int64_t *M, int64_t *H,
int use_downsampled_wiener_stats) {
if (wiener_win == WIENER_WIN) {
compute_stats_win7_opt_avx2(dgd, src, h_start, h_end, v_start, v_end,
dgd_stride, src_stride, M, H,
use_downsampled_wiener_stats);
} else if (wiener_win == WIENER_WIN_CHROMA) {
compute_stats_win5_opt_avx2(dgd, src, h_start, h_end, v_start, v_end,
dgd_stride, src_stride, M, H,
use_downsampled_wiener_stats);
} else {
av1_compute_stats_c(wiener_win, dgd, src, h_start, h_end, v_start, v_end,
dgd_stride, src_stride, M, H,
use_downsampled_wiener_stats);
}
}
static INLINE __m256i pair_set_epi16(int a, int b) {
return _mm256_set1_epi32(
(int32_t)(((uint16_t)(a)) | (((uint32_t)(b)) << 16)));
}
int64_t av1_lowbd_pixel_proj_error_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
int i, j, k;
const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1));
__m256i sum64 = _mm256_setzero_si256();
const uint8_t *src = src8;
const uint8_t *dat = dat8;
int64_t err = 0;
if (params->r[0] > 0 && params->r[1] > 0) {
__m256i xq_coeff = pair_set_epi16(xq[0], xq[1]);
for (i = 0; i < height; ++i) {
__m256i sum32 = _mm256_setzero_si256();
for (j = 0; j <= width - 16; j += 16) {
const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j));
const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j));
const __m256i flt0_16b = _mm256_permute4x64_epi64(
_mm256_packs_epi32(yy_loadu_256(flt0 + j),
yy_loadu_256(flt0 + j + 8)),
0xd8);
const __m256i flt1_16b = _mm256_permute4x64_epi64(
_mm256_packs_epi32(yy_loadu_256(flt1 + j),
yy_loadu_256(flt1 + j + 8)),
0xd8);
const __m256i u0 = _mm256_slli_epi16(d0, SGRPROJ_RST_BITS);
const __m256i flt0_0_sub_u = _mm256_sub_epi16(flt0_16b, u0);
const __m256i flt1_0_sub_u = _mm256_sub_epi16(flt1_16b, u0);
const __m256i v0 = _mm256_madd_epi16(
xq_coeff, _mm256_unpacklo_epi16(flt0_0_sub_u, flt1_0_sub_u));
const __m256i v1 = _mm256_madd_epi16(
xq_coeff, _mm256_unpackhi_epi16(flt0_0_sub_u, flt1_0_sub_u));
const __m256i vr0 =
_mm256_srai_epi32(_mm256_add_epi32(v0, rounding), shift);
const __m256i vr1 =
_mm256_srai_epi32(_mm256_add_epi32(v1, rounding), shift);
const __m256i e0 = _mm256_sub_epi16(
_mm256_add_epi16(_mm256_packs_epi32(vr0, vr1), d0), s0);
const __m256i err0 = _mm256_madd_epi16(e0, e0);
sum32 = _mm256_add_epi32(sum32, err0);
}
for (k = j; k < width; ++k) {
const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt0 += flt0_stride;
flt1 += flt1_stride;
const __m256i sum64_0 =
_mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32));
const __m256i sum64_1 =
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1));
sum64 = _mm256_add_epi64(sum64, sum64_0);
sum64 = _mm256_add_epi64(sum64, sum64_1);
}
} else if (params->r[0] > 0 || params->r[1] > 0) {
const int xq_active = (params->r[0] > 0) ? xq[0] : xq[1];
const __m256i xq_coeff =
pair_set_epi16(xq_active, (-xq_active * (1 << SGRPROJ_RST_BITS)));
const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
for (i = 0; i < height; ++i) {
__m256i sum32 = _mm256_setzero_si256();
for (j = 0; j <= width - 16; j += 16) {
const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j));
const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j));
const __m256i flt_16b = _mm256_permute4x64_epi64(
_mm256_packs_epi32(yy_loadu_256(flt + j),
yy_loadu_256(flt + j + 8)),
0xd8);
const __m256i v0 =
_mm256_madd_epi16(xq_coeff, _mm256_unpacklo_epi16(flt_16b, d0));
const __m256i v1 =
_mm256_madd_epi16(xq_coeff, _mm256_unpackhi_epi16(flt_16b, d0));
const __m256i vr0 =
_mm256_srai_epi32(_mm256_add_epi32(v0, rounding), shift);
const __m256i vr1 =
_mm256_srai_epi32(_mm256_add_epi32(v1, rounding), shift);
const __m256i e0 = _mm256_sub_epi16(
_mm256_add_epi16(_mm256_packs_epi32(vr0, vr1), d0), s0);
const __m256i err0 = _mm256_madd_epi16(e0, e0);
sum32 = _mm256_add_epi32(sum32, err0);
}
for (k = j; k < width; ++k) {
const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
int32_t v = xq_active * (flt[k] - u);
const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt += flt_stride;
const __m256i sum64_0 =
_mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32));
const __m256i sum64_1 =
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1));
sum64 = _mm256_add_epi64(sum64, sum64_0);
sum64 = _mm256_add_epi64(sum64, sum64_1);
}
} else {
__m256i sum32 = _mm256_setzero_si256();
for (i = 0; i < height; ++i) {
for (j = 0; j <= width - 16; j += 16) {
const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j));
const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j));
const __m256i diff0 = _mm256_sub_epi16(d0, s0);
const __m256i err0 = _mm256_madd_epi16(diff0, diff0);
sum32 = _mm256_add_epi32(sum32, err0);
}
for (k = j; k < width; ++k) {
const int32_t e = (int32_t)(dat[k]) - src[k];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
}
const __m256i sum64_0 =
_mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32));
const __m256i sum64_1 =
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1));
sum64 = _mm256_add_epi64(sum64_0, sum64_1);
}
int64_t sum[4];
yy_storeu_256(sum, sum64);
err += sum[0] + sum[1] + sum[2] + sum[3];
return err;
}
// When params->r[0] > 0 and params->r[1] > 0. In this case all elements of
// C and H need to be computed.
static AOM_INLINE void calc_proj_params_r0_r1_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint8_t *src = src8;
const uint8_t *dat = dat8;
__m256i h00, h01, h11, c0, c1;
const __m256i zero = _mm256_setzero_si256();
h01 = h11 = c0 = c1 = h00 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu8_epi32(
_mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu8_epi32(
_mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
__m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
__m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f1 = _mm256_sub_epi32(f1, d);
f2 = _mm256_sub_epi32(f2, d);
const __m256i h00_even = _mm256_mul_epi32(f1, f1);
const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f1, 32));
h00 = _mm256_add_epi64(h00, h00_even);
h00 = _mm256_add_epi64(h00, h00_odd);
const __m256i h01_even = _mm256_mul_epi32(f1, f2);
const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f2, 32));
h01 = _mm256_add_epi64(h01, h01_even);
h01 = _mm256_add_epi64(h01, h01_odd);
const __m256i h11_even = _mm256_mul_epi32(f2, f2);
const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
_mm256_srli_epi64(f2, 32));
h11 = _mm256_add_epi64(h11, h11_even);
h11 = _mm256_add_epi64(h11, h11_odd);
const __m256i c0_even = _mm256_mul_epi32(f1, s);
const __m256i c0_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
c0 = _mm256_add_epi64(c0, c0_even);
c0 = _mm256_add_epi64(c0, c0_odd);
const __m256i c1_even = _mm256_mul_epi32(f2, s);
const __m256i c1_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
c1 = _mm256_add_epi64(c1, c1_even);
c1 = _mm256_add_epi64(c1, c1_odd);
}
}
__m256i c_low = _mm256_unpacklo_epi64(c0, c1);
const __m256i c_high = _mm256_unpackhi_epi64(c0, c1);
c_low = _mm256_add_epi64(c_low, c_high);
const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1),
_mm256_castsi256_si128(c_low));
__m256i h0x_low = _mm256_unpacklo_epi64(h00, h01);
const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01);
h0x_low = _mm256_add_epi64(h0x_low, h0x_high);
const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1),
_mm256_castsi256_si128(h0x_low));
// Using the symmetric properties of H, calculations of H[1][0] are not
// needed.
__m256i h1x_low = _mm256_unpacklo_epi64(zero, h11);
const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11);
h1x_low = _mm256_add_epi64(h1x_low, h1x_high);
const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1),
_mm256_castsi256_si128(h1x_low));
xx_storeu_128(C, c_128bit);
xx_storeu_128(H[0], h0x_128bit);
xx_storeu_128(H[1], h1x_128bit);
H[0][0] /= size;
H[0][1] /= size;
H[1][1] /= size;
// Since H is a symmetric matrix
H[1][0] = H[0][1];
C[0] /= size;
C[1] /= size;
}
// When only params->r[0] > 0. In this case only H[0][0] and C[0] are
// non-zero and need to be computed.
static AOM_INLINE void calc_proj_params_r0_avx2(const uint8_t *src8, int width,
int height, int src_stride,
const uint8_t *dat8,
int dat_stride, int32_t *flt0,
int flt0_stride,
int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint8_t *src = src8;
const uint8_t *dat = dat8;
__m256i h00, c0;
const __m256i zero = _mm256_setzero_si256();
c0 = h00 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu8_epi32(
_mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu8_epi32(
_mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
__m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f1 = _mm256_sub_epi32(f1, d);
const __m256i h00_even = _mm256_mul_epi32(f1, f1);
const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f1, 32));
h00 = _mm256_add_epi64(h00, h00_even);
h00 = _mm256_add_epi64(h00, h00_odd);
const __m256i c0_even = _mm256_mul_epi32(f1, s);
const __m256i c0_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
c0 = _mm256_add_epi64(c0, c0_even);
c0 = _mm256_add_epi64(c0, c0_odd);
}
}
const __m128i h00_128bit = _mm_add_epi64(_mm256_extracti128_si256(h00, 1),
_mm256_castsi256_si128(h00));
const __m128i h00_val =
_mm_add_epi64(h00_128bit, _mm_srli_si128(h00_128bit, 8));
const __m128i c0_128bit = _mm_add_epi64(_mm256_extracti128_si256(c0, 1),
_mm256_castsi256_si128(c0));
const __m128i c0_val = _mm_add_epi64(c0_128bit, _mm_srli_si128(c0_128bit, 8));
const __m128i c = _mm_unpacklo_epi64(c0_val, _mm256_castsi256_si128(zero));
const __m128i h0x = _mm_unpacklo_epi64(h00_val, _mm256_castsi256_si128(zero));
xx_storeu_128(C, c);
xx_storeu_128(H[0], h0x);
H[0][0] /= size;
C[0] /= size;
}
// When only params->r[1] > 0. In this case only H[1][1] and C[1] are
// non-zero and need to be computed.
static AOM_INLINE void calc_proj_params_r1_avx2(const uint8_t *src8, int width,
int height, int src_stride,
const uint8_t *dat8,
int dat_stride, int32_t *flt1,
int flt1_stride,
int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint8_t *src = src8;
const uint8_t *dat = dat8;
__m256i h11, c1;
const __m256i zero = _mm256_setzero_si256();
c1 = h11 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu8_epi32(
_mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu8_epi32(
_mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
__m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f2 = _mm256_sub_epi32(f2, d);
const __m256i h11_even = _mm256_mul_epi32(f2, f2);
const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
_mm256_srli_epi64(f2, 32));
h11 = _mm256_add_epi64(h11, h11_even);
h11 = _mm256_add_epi64(h11, h11_odd);
const __m256i c1_even = _mm256_mul_epi32(f2, s);
const __m256i c1_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
c1 = _mm256_add_epi64(c1, c1_even);
c1 = _mm256_add_epi64(c1, c1_odd);
}
}
const __m128i h11_128bit = _mm_add_epi64(_mm256_extracti128_si256(h11, 1),
_mm256_castsi256_si128(h11));
const __m128i h11_val =
_mm_add_epi64(h11_128bit, _mm_srli_si128(h11_128bit, 8));
const __m128i c1_128bit = _mm_add_epi64(_mm256_extracti128_si256(c1, 1),
_mm256_castsi256_si128(c1));
const __m128i c1_val = _mm_add_epi64(c1_128bit, _mm_srli_si128(c1_128bit, 8));
const __m128i c = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), c1_val);
const __m128i h1x = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), h11_val);
xx_storeu_128(C, c);
xx_storeu_128(H[1], h1x);
H[1][1] /= size;
C[1] /= size;
}
// AVX2 variant of av1_calc_proj_params_c.
void av1_calc_proj_params_avx2(const uint8_t *src8, int width, int height,
int src_stride, const uint8_t *dat8,
int dat_stride, int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int64_t H[2][2],
int64_t C[2], const sgr_params_type *params) {
if ((params->r[0] > 0) && (params->r[1] > 0)) {
calc_proj_params_r0_r1_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt0, flt0_stride, flt1,
flt1_stride, H, C);
} else if (params->r[0] > 0) {
calc_proj_params_r0_avx2(src8, width, height, src_stride, dat8, dat_stride,
flt0, flt0_stride, H, C);
} else if (params->r[1] > 0) {
calc_proj_params_r1_avx2(src8, width, height, src_stride, dat8, dat_stride,
flt1, flt1_stride, H, C);
}
}
static AOM_INLINE void calc_proj_params_r0_r1_high_bd_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
__m256i h00, h01, h11, c0, c1;
const __m256i zero = _mm256_setzero_si256();
h01 = h11 = c0 = c1 = h00 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(src + i * src_stride + j)));
__m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
__m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f1 = _mm256_sub_epi32(f1, d);
f2 = _mm256_sub_epi32(f2, d);
const __m256i h00_even = _mm256_mul_epi32(f1, f1);
const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f1, 32));
h00 = _mm256_add_epi64(h00, h00_even);
h00 = _mm256_add_epi64(h00, h00_odd);
const __m256i h01_even = _mm256_mul_epi32(f1, f2);
const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f2, 32));
h01 = _mm256_add_epi64(h01, h01_even);
h01 = _mm256_add_epi64(h01, h01_odd);
const __m256i h11_even = _mm256_mul_epi32(f2, f2);
const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
_mm256_srli_epi64(f2, 32));
h11 = _mm256_add_epi64(h11, h11_even);
h11 = _mm256_add_epi64(h11, h11_odd);
const __m256i c0_even = _mm256_mul_epi32(f1, s);
const __m256i c0_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
c0 = _mm256_add_epi64(c0, c0_even);
c0 = _mm256_add_epi64(c0, c0_odd);
const __m256i c1_even = _mm256_mul_epi32(f2, s);
const __m256i c1_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
c1 = _mm256_add_epi64(c1, c1_even);
c1 = _mm256_add_epi64(c1, c1_odd);
}
}
__m256i c_low = _mm256_unpacklo_epi64(c0, c1);
const __m256i c_high = _mm256_unpackhi_epi64(c0, c1);
c_low = _mm256_add_epi64(c_low, c_high);
const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1),
_mm256_castsi256_si128(c_low));
__m256i h0x_low = _mm256_unpacklo_epi64(h00, h01);
const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01);
h0x_low = _mm256_add_epi64(h0x_low, h0x_high);
const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1),
_mm256_castsi256_si128(h0x_low));
// Using the symmetric properties of H, calculations of H[1][0] are not
// needed.
__m256i h1x_low = _mm256_unpacklo_epi64(zero, h11);
const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11);
h1x_low = _mm256_add_epi64(h1x_low, h1x_high);
const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1),
_mm256_castsi256_si128(h1x_low));
xx_storeu_128(C, c_128bit);
xx_storeu_128(H[0], h0x_128bit);
xx_storeu_128(H[1], h1x_128bit);
H[0][0] /= size;
H[0][1] /= size;
H[1][1] /= size;
// Since H is a symmetric matrix
H[1][0] = H[0][1];
C[0] /= size;
C[1] /= size;
}
static AOM_INLINE void calc_proj_params_r0_high_bd_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
__m256i h00, c0;
const __m256i zero = _mm256_setzero_si256();
c0 = h00 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(src + i * src_stride + j)));
__m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f1 = _mm256_sub_epi32(f1, d);
const __m256i h00_even = _mm256_mul_epi32(f1, f1);
const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f1, 32));
h00 = _mm256_add_epi64(h00, h00_even);
h00 = _mm256_add_epi64(h00, h00_odd);
const __m256i c0_even = _mm256_mul_epi32(f1, s);
const __m256i c0_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
c0 = _mm256_add_epi64(c0, c0_even);
c0 = _mm256_add_epi64(c0, c0_odd);
}
}
const __m128i h00_128bit = _mm_add_epi64(_mm256_extracti128_si256(h00, 1),
_mm256_castsi256_si128(h00));
const __m128i h00_val =
_mm_add_epi64(h00_128bit, _mm_srli_si128(h00_128bit, 8));
const __m128i c0_128bit = _mm_add_epi64(_mm256_extracti128_si256(c0, 1),
_mm256_castsi256_si128(c0));
const __m128i c0_val = _mm_add_epi64(c0_128bit, _mm_srli_si128(c0_128bit, 8));
const __m128i c = _mm_unpacklo_epi64(c0_val, _mm256_castsi256_si128(zero));
const __m128i h0x = _mm_unpacklo_epi64(h00_val, _mm256_castsi256_si128(zero));
xx_storeu_128(C, c);
xx_storeu_128(H[0], h0x);
H[0][0] /= size;
C[0] /= size;
}
static AOM_INLINE void calc_proj_params_r1_high_bd_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt1, int flt1_stride,
int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
__m256i h11, c1;
const __m256i zero = _mm256_setzero_si256();
c1 = h11 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(src + i * src_stride + j)));
__m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f2 = _mm256_sub_epi32(f2, d);
const __m256i h11_even = _mm256_mul_epi32(f2, f2);
const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
_mm256_srli_epi64(f2, 32));
h11 = _mm256_add_epi64(h11, h11_even);
h11 = _mm256_add_epi64(h11, h11_odd);
const __m256i c1_even = _mm256_mul_epi32(f2, s);
const __m256i c1_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
c1 = _mm256_add_epi64(c1, c1_even);
c1 = _mm256_add_epi64(c1, c1_odd);
}
}
const __m128i h11_128bit = _mm_add_epi64(_mm256_extracti128_si256(h11, 1),
_mm256_castsi256_si128(h11));
const __m128i h11_val =
_mm_add_epi64(h11_128bit, _mm_srli_si128(h11_128bit, 8));
const __m128i c1_128bit = _mm_add_epi64(_mm256_extracti128_si256(c1, 1),
_mm256_castsi256_si128(c1));
const __m128i c1_val = _mm_add_epi64(c1_128bit, _mm_srli_si128(c1_128bit, 8));
const __m128i c = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), c1_val);
const __m128i h1x = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), h11_val);
xx_storeu_128(C, c);
xx_storeu_128(H[1], h1x);
H[1][1] /= size;
C[1] /= size;
}
// AVX2 variant of av1_calc_proj_params_high_bd_c.
void av1_calc_proj_params_high_bd_avx2(const uint8_t *src8, int width,
int height, int src_stride,
const uint8_t *dat8, int dat_stride,
int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride,
int64_t H[2][2], int64_t C[2],
const sgr_params_type *params) {
if ((params->r[0] > 0) && (params->r[1] > 0)) {
calc_proj_params_r0_r1_high_bd_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt0, flt0_stride, flt1,
flt1_stride, H, C);
} else if (params->r[0] > 0) {
calc_proj_params_r0_high_bd_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt0, flt0_stride, H, C);
} else if (params->r[1] > 0) {
calc_proj_params_r1_high_bd_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt1, flt1_stride, H, C);
}
}
#if CONFIG_AV1_HIGHBITDEPTH
int64_t av1_highbd_pixel_proj_error_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
int i, j, k;
const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1));
__m256i sum64 = _mm256_setzero_si256();
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
int64_t err = 0;
if (params->r[0] > 0 && params->r[1] > 0) { // Both filters are enabled
const __m256i xq0 = _mm256_set1_epi32(xq[0]);
const __m256i xq1 = _mm256_set1_epi32(xq[1]);
for (i = 0; i < height; ++i) {
__m256i sum32 = _mm256_setzero_si256();
for (j = 0; j <= width - 16; j += 16) { // Process 16 pixels at a time
// Load 16 pixels each from source image and corrupted image
const __m256i s0 = yy_loadu_256(src + j);
const __m256i d0 = yy_loadu_256(dat + j);
// s0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16 (indices)
// Shift-up each pixel to match filtered image scaling
const __m256i u0 = _mm256_slli_epi16(d0, SGRPROJ_RST_BITS);
// Split u0 into two halves and pad each from u16 to i32
const __m256i u0l = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(u0));
const __m256i u0h =
_mm256_cvtepu16_epi32(_mm256_extracti128_si256(u0, 1));
// u0h, u0l = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as u32
// Load 16 pixels from each filtered image
const __m256i flt0l = yy_loadu_256(flt0 + j);
const __m256i flt0h = yy_loadu_256(flt0 + j + 8);
const __m256i flt1l = yy_loadu_256(flt1 + j);
const __m256i flt1h = yy_loadu_256(flt1 + j + 8);
// flt?l, flt?h = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as u32
// Subtract shifted corrupt image from each filtered image
const __m256i flt0l_subu = _mm256_sub_epi32(flt0l, u0l);
const __m256i flt0h_subu = _mm256_sub_epi32(flt0h, u0h);
const __m256i flt1l_subu = _mm256_sub_epi32(flt1l, u0l);
const __m256i flt1h_subu = _mm256_sub_epi32(flt1h, u0h);
// Multiply basis vectors by appropriate coefficients
const __m256i v0l = _mm256_mullo_epi32(flt0l_subu, xq0);
const __m256i v0h = _mm256_mullo_epi32(flt0h_subu, xq0);
const __m256i v1l = _mm256_mullo_epi32(flt1l_subu, xq1);
const __m256i v1h = _mm256_mullo_epi32(flt1h_subu, xq1);
// Add together the contributions from the two basis vectors
const __m256i vl = _mm256_add_epi32(v0l, v1l);
const __m256i vh = _mm256_add_epi32(v0h, v1h);
// Right-shift v with appropriate rounding
const __m256i vrl =
_mm256_srai_epi32(_mm256_add_epi32(vl, rounding), shift);
const __m256i vrh =
_mm256_srai_epi32(_mm256_add_epi32(vh, rounding), shift);
// vrh, vrl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0]
// Saturate each i32 to an i16 then combine both halves
// The permute (control=[3 1 2 0]) fixes weird ordering from AVX lanes
const __m256i vr =
_mm256_permute4x64_epi64(_mm256_packs_epi32(vrl, vrh), 0xd8);
// intermediate = [15 14 13 12 7 6 5 4] [11 10 9 8 3 2 1 0]
// vr = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0]
// Add twin-subspace-sgr-filter to corrupt image then subtract source
const __m256i e0 = _mm256_sub_epi16(_mm256_add_epi16(vr, d0), s0);
// Calculate squared error and add adjacent values
const __m256i err0 = _mm256_madd_epi16(e0, e0);
sum32 = _mm256_add_epi32(sum32, err0);
}
const __m256i sum32l =
_mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32));
sum64 = _mm256_add_epi64(sum64, sum32l);
const __m256i sum32h =
_mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1));
sum64 = _mm256_add_epi64(sum64, sum32h);
// Process remaining pixels in this row (modulo 16)
for (k = j; k < width; ++k) {
const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt0 += flt0_stride;
flt1 += flt1_stride;
}
} else if (params->r[0] > 0 || params->r[1] > 0) { // Only one filter enabled
const int32_t xq_on = (params->r[0] > 0) ? xq[0] : xq[1];
const __m256i xq_active = _mm256_set1_epi32(xq_on);
const __m256i xq_inactive =
_mm256_set1_epi32(-xq_on * (1 << SGRPROJ_RST_BITS));
const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
for (i = 0; i < height; ++i) {
__m256i sum32 = _mm256_setzero_si256();
for (j = 0; j <= width - 16; j += 16) {
// Load 16 pixels from source image
const __m256i s0 = yy_loadu_256(src + j);
// s0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16
// Load 16 pixels from corrupted image and pad each u16 to i32
const __m256i d0 = yy_loadu_256(dat + j);
const __m256i d0h =
_mm256_cvtepu16_epi32(_mm256_extracti128_si256(d0, 1));
const __m256i d0l = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(d0));
// d0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16
// d0h, d0l = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32
// Load 16 pixels from the filtered image
const __m256i flth = yy_loadu_256(flt + j + 8);
const __m256i fltl = yy_loadu_256(flt + j);
// flth, fltl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32
const __m256i flth_xq = _mm256_mullo_epi32(flth, xq_active);
const __m256i fltl_xq = _mm256_mullo_epi32(fltl, xq_active);
const __m256i d0h_xq = _mm256_mullo_epi32(d0h, xq_inactive);
const __m256i d0l_xq = _mm256_mullo_epi32(d0l, xq_inactive);
const __m256i vh = _mm256_add_epi32(flth_xq, d0h_xq);
const __m256i vl = _mm256_add_epi32(fltl_xq, d0l_xq);
// Shift this down with appropriate rounding
const __m256i vrh =
_mm256_srai_epi32(_mm256_add_epi32(vh, rounding), shift);
const __m256i vrl =
_mm256_srai_epi32(_mm256_add_epi32(vl, rounding), shift);
// vrh, vrl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32
// Saturate each i32 to an i16 then combine both halves
// The permute (control=[3 1 2 0]) fixes weird ordering from AVX lanes
const __m256i vr =
_mm256_permute4x64_epi64(_mm256_packs_epi32(vrl, vrh), 0xd8);
// intermediate = [15 14 13 12 7 6 5 4] [11 10 9 8 3 2 1 0] as u16
// vr = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16
// Subtract twin-subspace-sgr filtered from source image to get error
const __m256i e0 = _mm256_sub_epi16(_mm256_add_epi16(vr, d0), s0);
// Calculate squared error and add adjacent values
const __m256i err0 = _mm256_madd_epi16(e0, e0);
sum32 = _mm256_add_epi32(sum32, err0);
}
const __m256i sum32l =
_mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32));
sum64 = _mm256_add_epi64(sum64, sum32l);
const __m256i sum32h =
_mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1));
sum64 = _mm256_add_epi64(sum64, sum32h);
// Process remaining pixels in this row (modulo 16)
for (k = j; k < width; ++k) {
const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
int32_t v = xq_on * (flt[k] - u);
const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
flt += flt_stride;
}
} else { // Neither filter is enabled
for (i = 0; i < height; ++i) {
__m256i sum32 = _mm256_setzero_si256();
for (j = 0; j <= width - 32; j += 32) {
// Load 2x16 u16 from source image
const __m256i s0l = yy_loadu_256(src + j);
const __m256i s0h = yy_loadu_256(src + j + 16);
// Load 2x16 u16 from corrupted image
const __m256i d0l = yy_loadu_256(dat + j);
const __m256i d0h = yy_loadu_256(dat + j + 16);
// Subtract corrupted image from source image
const __m256i diffl = _mm256_sub_epi16(d0l, s0l);
const __m256i diffh = _mm256_sub_epi16(d0h, s0h);
// Square error and add adjacent values
const __m256i err0l = _mm256_madd_epi16(diffl, diffl);
const __m256i err0h = _mm256_madd_epi16(diffh, diffh);
sum32 = _mm256_add_epi32(sum32, err0l);
sum32 = _mm256_add_epi32(sum32, err0h);
}
const __m256i sum32l =
_mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32));
sum64 = _mm256_add_epi64(sum64, sum32l);
const __m256i sum32h =
_mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1));
sum64 = _mm256_add_epi64(sum64, sum32h);
// Process remaining pixels (modulu 16)
for (k = j; k < width; ++k) {
const int32_t e = (int32_t)(dat[k]) - src[k];
err += ((int64_t)e * e);
}
dat += dat_stride;
src += src_stride;
}
}
// Sum 4 values from sum64l and sum64h into err
int64_t sum[4];
yy_storeu_256(sum, sum64);
err += sum[0] + sum[1] + sum[2] + sum[3];
return err;
}
#endif // CONFIG_AV1_HIGHBITDEPTH
|
427832.c | /*
* Class READABLE_INDEXABLE_ITERATION_CURSOR [NATURAL_16]
*/
#include "eif_macros.h"
#ifdef __cplusplus
extern "C" {
#endif
static const EIF_TYPE_INDEX egt_0_327 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_1_327 [] = {0xFF01,245,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_2_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_3_327 [] = {0,0xFFFF};
static const EIF_TYPE_INDEX egt_4_327 [] = {0,0xFFFF};
static const EIF_TYPE_INDEX egt_5_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_6_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_7_327 [] = {0,0xFFFF};
static const EIF_TYPE_INDEX egt_8_327 [] = {0xFF01,14,0xFFFF};
static const EIF_TYPE_INDEX egt_9_327 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_10_327 [] = {0xFF01,232,0xFFFF};
static const EIF_TYPE_INDEX egt_11_327 [] = {0xFF01,15,0xFFFF};
static const EIF_TYPE_INDEX egt_12_327 [] = {326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_13_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_14_327 [] = {0xFFF8,1,0xFFFF};
static const EIF_TYPE_INDEX egt_15_327 [] = {0xFFF8,1,0xFFFF};
static const EIF_TYPE_INDEX egt_16_327 [] = {0xFF01,322,0xFFF8,1,0xFFFF};
static const EIF_TYPE_INDEX egt_17_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_18_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_19_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_20_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_21_327 [] = {0xFF01,326,215,0xFFFF};
static const EIF_TYPE_INDEX egt_22_327 [] = {0xFFF8,1,0xFFFF};
static const EIF_TYPE_INDEX egt_23_327 [] = {0xFF01,322,0xFFF8,1,0xFFFF};
static const EIF_TYPE_INDEX egt_24_327 [] = {0xFF01,322,0xFFF8,1,0xFFFF};
static const struct desc_info desc_327[] = {
{EIF_GENERIC(NULL), 0xFFFFFFFF, 0xFFFFFFFF},
{EIF_GENERIC(egt_0_327), 0, 0xFFFFFFFF},
{EIF_GENERIC(egt_1_327), 1, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 2, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 3, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 4, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 5, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 6, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 7, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 8, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 9, 0xFFFFFFFF},
{EIF_GENERIC(egt_2_327), 10, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 11, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 12, 0xFFFFFFFF},
{EIF_GENERIC(egt_3_327), 13, 0xFFFFFFFF},
{EIF_GENERIC(egt_4_327), 14, 0xFFFFFFFF},
{EIF_GENERIC(egt_5_327), 15, 0xFFFFFFFF},
{EIF_GENERIC(egt_6_327), 16, 0xFFFFFFFF},
{EIF_GENERIC(egt_7_327), 17, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 18, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 19, 0xFFFFFFFF},
{EIF_GENERIC(egt_8_327), 20, 0xFFFFFFFF},
{EIF_GENERIC(egt_9_327), 21, 0xFFFFFFFF},
{EIF_GENERIC(egt_10_327), 22, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 23, 0xFFFFFFFF},
{EIF_GENERIC(egt_11_327), 24, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 25, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 26, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 27, 0xFFFFFFFF},
{EIF_GENERIC(egt_12_327), 28, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01C7 /*227*/), 29, 0xFFFFFFFF},
{EIF_GENERIC(egt_13_327), 30, 0xFFFFFFFF},
{EIF_GENERIC(egt_14_327), 8389, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 8448, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 8456, 0xFFFFFFFF},
{EIF_GENERIC(egt_15_327), 0x00, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 8458, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01A9 /*212*/), 8447, 8},
{EIF_GENERIC(NULL), 8453, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 8454, 0xFFFFFFFF},
{EIF_GENERIC(egt_16_327), 8457, 0},
{EIF_NON_GENERIC(0x0197 /*203*/), 8449, 4},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8463, 12},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8462, 16},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8461, 20},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8460, 24},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8459, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8460, 24},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8461, 20},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8462, 16},
{EIF_NON_GENERIC(0x01B5 /*218*/), 8463, 12},
{EIF_GENERIC(egt_17_327), 8465, 0xFFFFFFFF},
{EIF_GENERIC(egt_18_327), 8466, 0xFFFFFFFF},
{EIF_GENERIC(egt_19_327), 8467, 0xFFFFFFFF},
{EIF_GENERIC(egt_20_327), 8446, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 8449, 4},
{EIF_NON_GENERIC(0x0197 /*203*/), 8450, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 8451, 0xFFFFFFFF},
{EIF_NON_GENERIC(0x0197 /*203*/), 8452, 0xFFFFFFFF},
{EIF_GENERIC(NULL), 8455, 0xFFFFFFFF},
{EIF_GENERIC(egt_21_327), 8464, 0xFFFFFFFF},
{EIF_GENERIC(egt_22_327), 0x00, 0xFFFFFFFF},
{EIF_GENERIC(egt_23_327), 8457, 0},
{EIF_GENERIC(egt_24_327), 0x00, 0xFFFFFFFF},
};
void Init327(void)
{
IDSC(desc_327, 0, 326);
IDSC(desc_327 + 1, 1, 326);
IDSC(desc_327 + 32, 257, 326);
IDSC(desc_327 + 36, 319, 326);
IDSC(desc_327 + 46, 204, 326);
IDSC(desc_327 + 60, 102, 326);
IDSC(desc_327 + 62, 250, 326);
}
#ifdef __cplusplus
}
#endif
|
488070.c |
/* The ASM version of lcd-as-memframe.c isn't 24bit capable */
#include "lcd-as-memframe.c"
|
248686.c | /*
Copyright (C) 2000, 2001 Silicon Graphics, Inc. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2.1 of the GNU Lesser General Public License
as published by the Free Software Foundation.
This program is distributed in the hope that it would be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
Further, this software is distributed without any warranty that it is
free of the rightful claim of any third person regarding infringement
or the like. Any license provided herein, whether implied or
otherwise, applies only to this software file. Patent licenses, if
any, provided herein do not apply to combinations of this program with
other software, or any other product whatsoever.
You should have received a copy of the GNU Lesser General Public
License along with this program; if not, write the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
USA.
Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pky,
Mountain View, CA 94043, or:
http://www.sgi.com
For further information regarding this notice, see:
http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#pragma ident "@(#) libf/pxf/pxfgetgroups.c 92.2 09/15/99 10:41:12"
#include <fortran.h>
#include <errno.h>
#include <liberrno.h>
#include <unistd.h>
#ifdef _UNICOS
#include <sys/types.h>
#include <sys/param.h>
#elif defined(_LITTLE_ENDIAN)
#include <sys/types.h>
#include <sys/param.h>
#else
#include <limits.h>
#endif
/*
* PXFGETGROUPS -- get supplementary group IDs
* (section 4.2.3 of Posix 1003.9-1992)
*
* Call from Fortran:
*
* SUBROUTINE PXFGETGROUPS(IGIDSETSIZE,IGROUPLIST,NGROUPS,IERROR)
* INTERGER IGIDSETSIZE,IGROUPLIST(IGIDSETSIZE),NGROUPS,IERROR
*
* Where:
*
* IGIDSETSIZE is an input integer variable containing the size of
* the IGROUPLIST integer array.
*
* IGROUPLIST is an output integer variable or array element that
* will contain a set of supplemental group IDs for the
* calling process.
*
* NGROUPS is an output interger variable that will contain the
* number of supplemental group IDs for the calling process.
*
* IERROR is an output integer variable that will contain
* the status:
*
* zero - variable was changed.
*
* nonzero - PXFGETGROUPS was not successful.
*
* PXFGETGROUPS may return any of the following error
* values:
*
* EINVAL If the IGIDSETSIZE is not equal to zero and is
* is less than the number of supplementary group
* IDs.
*
*/
#ifdef _UNICOS
void
PXFGETGROUPS(
#else
void
_PXFGETGROUPS(
#endif
_f_int *IGIDSETSIZE,
_f_int *IGROUPLIST,
_f_int *ngroups,
_f_int *IERROR
)
{
gid_t grplistbuf[NGROUPS_MAX];
int i, groups, gidsetsize;
gidsetsize = *IGIDSETSIZE;
/* get groups using a group list buffer of size NGROUPS_MAX */
if ((groups = getgroups(NGROUPS_MAX, grplistbuf)) == -1) {
*IERROR = errno;
} else {
/* check if user array IGROUPLIST is too small to hold all of the group IDs */
if ((gidsetsize < groups) && (gidsetsize != 0)) {
*IERROR = EINVAL;
} else {
/* If gidsetsize is zero the user just wants the number of
* supplemental groups IDs, other wise the group IDs need
* to be coppied into the IGROUPLIST array and the number of
* groups assigned to ngroups user variable.
*/
if (gidsetsize != 0) {
/* copy the group ID elements into IGROUPLIST */
for (i=0; i < groups; i++) {
IGROUPLIST[i] = grplistbuf[i];
}
}
*IERROR = 0;
*ngroups = groups;
}
}
}
#ifndef _UNICOS
void
pxfgetgroups_(
_f_int *IGIDSETSIZE,
_f_int *IGROUPLIST,
_f_int *ngroups,
_f_int *IERROR
)
{
_PXFGETGROUPS(IGIDSETSIZE, IGROUPLIST, ngroups, IERROR);
}
#endif
|
581005.c | // Room: /d/shaolin/hantan1.c
// Date: YZC 96/02/06
#include <room.h>
inherit ROOM;
void create()
{
set("short", "万古寒潭");
set("long", @LONG
这是寒潭之中。四周水声哗哗,似乎有一股暗流在极力牵扯
着你往下沉。透过潭水隐约可见潭周的岩壁,被数万年水流冲激
磨削得十分光滑。虽然口中含了辟水珠,身上却仍然感觉到丝丝
寒意侵入肌肤,而且越是往下,水底的吸力越强,寒意更甚。
LONG );
set("no_clean_up", 0);
setup();
}
void init()
{
call_out("down", 10, this_player());
}
void down(object me)
{
tell_object(me, "只觉脚底水流盘旋,一股强大的吸力把你往下拉去...\n");
me->move(__DIR__"hantan2");
}
|
59837.c | /*!
*************************************************************************************
* \file img_luma_uint8.c
*
* \brief
* Luma interpolation functions
*
* \author
* Main contributors (see contributors.h for copyright, address and affiliation details)
* - Alexis Michael Tourapis <[email protected]>
* - Athanasios Leontaris <[email protected]>
* - Yuwen He <[email protected]>
*
*************************************************************************************
*/
#include "contributors.h"
#include <limits.h>
#include "global.h"
#include "image.h"
#include "img_luma.h"
#include "memalloc.h"
/*!
************************************************************************
* \brief
* Copy Integer Samples to image [0][0]
*
* \param s
* pointer to StorablePicture structure
* \param dstImg
* destination image
* \param srcImg
* source image
************************************************************************
*/
static void getSubImageInteger( StorablePicture *s, imgpel **dstImg, imgpel **srcImg)
{
int i, j;
int size_x_minus1 = s->size_x - 1;
imgpel *wBufSrc, *wBufDst;
// Copy top line
wBufDst = &( dstImg[-IMG_PAD_SIZE_Y][-IMG_PAD_SIZE_X] );
wBufSrc = srcImg[0];
// left IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; i++)
*(wBufDst++) = wBufSrc[0];
// center 0-(s->size_x)
memcpy(wBufDst, wBufSrc, s->size_x * sizeof(imgpel));
wBufDst += s->size_x;
// right IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; i++)
*(wBufDst++) = wBufSrc[size_x_minus1];
// Now copy remaining pad lines
for (j = -IMG_PAD_SIZE_Y+1; j < 1; ++j)
{
memcpy(dstImg[j]-IMG_PAD_SIZE_X, dstImg[j - 1]-IMG_PAD_SIZE_X, s->size_x_padded * sizeof(imgpel));
}
for (j = 1; j < s->size_y; j++)
{
wBufDst = &( dstImg[j][-IMG_PAD_SIZE_X] );
wBufSrc = srcImg[j];
// left IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; i++)
*(wBufDst++) = wBufSrc[0];
// center 0-(s->size_x)
memcpy(wBufDst, wBufSrc, s->size_x * sizeof(imgpel));
wBufDst += s->size_x;
// right IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; i++)
*(wBufDst++) = wBufSrc[size_x_minus1];
}
// Replicate bottom pad lines
for (j = s->size_y; j < s->size_y+IMG_PAD_SIZE_Y; j++)
{
memcpy(dstImg[j]-IMG_PAD_SIZE_X, dstImg[j - 1]-IMG_PAD_SIZE_X, s->size_x_padded * sizeof(imgpel));
}
}
static void getSubImageInteger_s( StorablePicture *s, imgpel **dstImg, imgpel **srcImg)
{
int i, j;
int size_x_minus1 = s->size_x - 1;
imgpel *wBufSrc, *wBufDst;
// Copy top line
wBufDst = &( dstImg[-IMG_PAD_SIZE_Y][-IMG_PAD_SIZE_X] );
wBufSrc = srcImg[0];
// left IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; ++i)
*(wBufDst++) = wBufSrc[0];
// center 0-(s->size_x)
memcpy(wBufDst, wBufSrc, s->size_x * sizeof(imgpel));
wBufDst += s->size_x;
// right IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; ++i)
*(wBufDst++) = wBufSrc[size_x_minus1];
// Now copy remaining pad lines
for (j = -IMG_PAD_SIZE_Y+1; j < 1; ++j)
{
memcpy(dstImg[j]-IMG_PAD_SIZE_X, dstImg[j - 1]-IMG_PAD_SIZE_X, s->size_x_padded * sizeof(imgpel));
}
for (j = 1; j < s->size_y; ++j)
{
wBufDst = &( dstImg[j][-IMG_PAD_SIZE_X] );
wBufSrc = srcImg[j];
// left IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; ++i)
*(wBufDst++) = wBufSrc[0];
// center 0-(s->size_x)
//memcpy(wBufDst, wBufSrc, s->size_x * sizeof(imgpel));
wBufDst += s->size_x;
// right IMG_PAD_SIZE
for (i = 0; i < IMG_PAD_SIZE_X; ++i)
*(wBufDst++) = wBufSrc[size_x_minus1];
}
// Replicate bottom pad lines
for (j = s->size_y; j < s->size_y+IMG_PAD_SIZE_Y; ++j)
{
memcpy(dstImg[j]-IMG_PAD_SIZE_X, dstImg[j - 1]-IMG_PAD_SIZE_X, s->size_x_padded * sizeof(imgpel));
}
}
/*!
************************************************************************
* \brief
* Does _horizontal_ interpolation using the SIX TAP filters
*
* \param p_Vid
* pointer to VideoParameters structure
* \param s
* pointer to StorablePicture structure
* \param dstImg
* destination image
* \param srcImg
* source image
************************************************************************
*/
static void getHorSubImageSixTap( VideoParameters *p_Vid, StorablePicture *s, imgpel **dstImg, imgpel **srcImg)
{
int is, jpad, ipad;
int ypadded_size = s->size_y_padded;
int xpadded_size = s->size_x_padded;
imgpel *wBufSrc, *wBufDst;
imgpel *srcImgA, *srcImgB, *srcImgC, *srcImgD, *srcImgE, *srcImgF;
int *iBufDst;
const int tap0 = ONE_FOURTH_TAP[0][0];
const int tap1 = ONE_FOURTH_TAP[0][1];
const int tap2 = ONE_FOURTH_TAP[0][2];
for (jpad = -IMG_PAD_SIZE_Y; jpad < ypadded_size-IMG_PAD_SIZE_Y; jpad++)
{
wBufSrc = srcImg[jpad]-IMG_PAD_SIZE_X;
wBufDst = dstImg[jpad]-IMG_PAD_SIZE_X;
iBufDst = p_Vid->imgY_sub_tmp[jpad]-IMG_PAD_SIZE_X;
srcImgA = &wBufSrc[0];
srcImgB = &wBufSrc[0];
srcImgC = &wBufSrc[0];
srcImgD = &wBufSrc[1];
srcImgE = &wBufSrc[2];
srcImgF = &wBufSrc[3];
// left padded area
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB + *srcImgE++) +
tap2 * (*srcImgC + *srcImgF++));
*iBufDst++ = is;
*wBufDst++ = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC + *srcImgF++));
*iBufDst++ = is;
*wBufDst++ = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
// center
for (ipad = 2; ipad < xpadded_size - 4; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
*iBufDst++ = is;
*wBufDst++ = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
}
is = (
tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF ));
*iBufDst++ = is;
*wBufDst++ = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
// right padded area
is = (
tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE) +
tap2 * (*srcImgC++ + *srcImgF));
*iBufDst++ = is;
*wBufDst++ = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
is = (
tap0 * (*srcImgA++ + *srcImgD) +
tap1 * (*srcImgB++ + *srcImgE) +
tap2 * (*srcImgC++ + *srcImgF));
*iBufDst++ = is;
*wBufDst++ = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
is = (
tap0 * (*srcImgA + *srcImgD) +
tap1 * (*srcImgB + *srcImgE) +
tap2 * (*srcImgC + *srcImgF));
*iBufDst = is;
*wBufDst = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
}
}
/*!
************************************************************************
* \brief
* Does _vertical_ interpolation using the SIX TAP filters
*
* \param p_Vid
* pointer to VideoParameters structure
* \param s
* pointer to StorablePicture structure
* \param dstImg
* pointer to target image
* \param srcImg
* pointer to source image
************************************************************************
*/
static void getVerSubImageSixTap( VideoParameters *p_Vid, StorablePicture *s, imgpel **dstImg, imgpel **srcImg)
{
int is, jpad, ipad;
int ypadded_size = s->size_y_padded;
int xpadded_size = s->size_x_padded;
int maxy = ypadded_size - 1-IMG_PAD_SIZE_Y;
imgpel *wxLineDst;
imgpel *srcImgA, *srcImgB, *srcImgC, *srcImgD, *srcImgE, *srcImgF;
const int tap0 = ONE_FOURTH_TAP[0][0];
const int tap1 = ONE_FOURTH_TAP[0][1];
const int tap2 = ONE_FOURTH_TAP[0][2];
// branches within the j loop
// top
for (jpad = -IMG_PAD_SIZE_Y; jpad < 2-IMG_PAD_SIZE_Y; jpad++)
{
wxLineDst = dstImg[jpad]-IMG_PAD_SIZE_X;
srcImgA = srcImg[jpad ]-IMG_PAD_SIZE_X;
srcImgB = srcImg[0-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
srcImgC = srcImg[0-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
srcImgD = srcImg[jpad + 1]-IMG_PAD_SIZE_X;
srcImgE = srcImg[jpad + 2]-IMG_PAD_SIZE_X;
srcImgF = srcImg[jpad + 3]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
wxLineDst[ipad] = (imgpel) iClip1 (p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
}
}
// center
for (jpad = 2-IMG_PAD_SIZE_Y; jpad < ypadded_size - 3-IMG_PAD_SIZE_Y; jpad++)
{
wxLineDst = dstImg[jpad]-IMG_PAD_SIZE_X;
srcImgA = srcImg[jpad ]-IMG_PAD_SIZE_X;
srcImgB = srcImg[jpad - 1]-IMG_PAD_SIZE_X;
srcImgC = srcImg[jpad - 2]-IMG_PAD_SIZE_X;
srcImgD = srcImg[jpad + 1]-IMG_PAD_SIZE_X;
srcImgE = srcImg[jpad + 2]-IMG_PAD_SIZE_X;
srcImgF = srcImg[jpad + 3]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
wxLineDst[ipad] = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
}
}
// bottom
for (jpad = ypadded_size - 3-IMG_PAD_SIZE_Y; jpad < ypadded_size-IMG_PAD_SIZE_Y; jpad++)
{
wxLineDst = dstImg[jpad]-IMG_PAD_SIZE_X;
srcImgA = srcImg[jpad ]-IMG_PAD_SIZE_X;
srcImgB = srcImg[jpad - 1]-IMG_PAD_SIZE_X;
srcImgC = srcImg[jpad - 2]-IMG_PAD_SIZE_X;
srcImgD = srcImg[imin (maxy, jpad + 1)]-IMG_PAD_SIZE_X;
srcImgE = srcImg[maxy]-IMG_PAD_SIZE_X;
srcImgF = srcImg[maxy]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
wxLineDst[ipad] = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 5 ) );
}
}
}
/*!
************************************************************************
* \brief
* Does _vertical_ interpolation using the SIX TAP filters
*
* \param p_Vid
* pointer to VideoParameters structure
* \param s
* pointer to StorablePicture structure
* \param dstImg
* pointer to source image
************************************************************************
*/
static void getVerSubImageSixTapTmp( VideoParameters *p_Vid, StorablePicture *s, imgpel **dstImg)
{
int is, jpad, ipad;
int ypadded_size = s->size_y_padded;
int xpadded_size = s->size_x_padded;
int maxy = ypadded_size - 1-IMG_PAD_SIZE_Y;
imgpel *wxLineDst;
int *srcImgA, *srcImgB, *srcImgC, *srcImgD, *srcImgE, *srcImgF;
const int tap0 = ONE_FOURTH_TAP[0][0];
const int tap1 = ONE_FOURTH_TAP[0][1];
const int tap2 = ONE_FOURTH_TAP[0][2];
// top
for (jpad = -IMG_PAD_SIZE_Y; jpad < 2-IMG_PAD_SIZE_Y; jpad++)
{
wxLineDst = dstImg[jpad]-IMG_PAD_SIZE_X;
srcImgA = p_Vid->imgY_sub_tmp[jpad ]-IMG_PAD_SIZE_X;
srcImgB = p_Vid->imgY_sub_tmp[0-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
srcImgC = p_Vid->imgY_sub_tmp[0-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
srcImgD = p_Vid->imgY_sub_tmp[jpad + 1]-IMG_PAD_SIZE_X;
srcImgE = p_Vid->imgY_sub_tmp[jpad + 2]-IMG_PAD_SIZE_X;
srcImgF = p_Vid->imgY_sub_tmp[jpad + 3]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
wxLineDst[ipad] = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 10 ) );
}
}
// center
for (jpad = 2-IMG_PAD_SIZE_Y; jpad < ypadded_size - 3-IMG_PAD_SIZE_Y; jpad++)
{
wxLineDst = dstImg[jpad]-IMG_PAD_SIZE_X;
srcImgA = p_Vid->imgY_sub_tmp[jpad ]-IMG_PAD_SIZE_X;
srcImgB = p_Vid->imgY_sub_tmp[jpad - 1]-IMG_PAD_SIZE_X;
srcImgC = p_Vid->imgY_sub_tmp[jpad - 2]-IMG_PAD_SIZE_X;
srcImgD = p_Vid->imgY_sub_tmp[jpad + 1]-IMG_PAD_SIZE_X;
srcImgE = p_Vid->imgY_sub_tmp[jpad + 2]-IMG_PAD_SIZE_X;
srcImgF = p_Vid->imgY_sub_tmp[jpad + 3]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
wxLineDst[ipad] = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 10 ) );
}
}
// bottom
for (jpad = ypadded_size - 3-IMG_PAD_SIZE_Y; jpad < ypadded_size-IMG_PAD_SIZE_Y; jpad++)
{
wxLineDst = dstImg[jpad]-IMG_PAD_SIZE_X;
srcImgA = p_Vid->imgY_sub_tmp[jpad ]-IMG_PAD_SIZE_X;
srcImgB = p_Vid->imgY_sub_tmp[jpad - 1]-IMG_PAD_SIZE_X;
srcImgC = p_Vid->imgY_sub_tmp[jpad - 2]-IMG_PAD_SIZE_X;
srcImgD = p_Vid->imgY_sub_tmp[imin (maxy, jpad + 1)]-IMG_PAD_SIZE_X;
srcImgE = p_Vid->imgY_sub_tmp[maxy]-IMG_PAD_SIZE_X;
srcImgF = p_Vid->imgY_sub_tmp[maxy]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
is =
(tap0 * (*srcImgA++ + *srcImgD++) +
tap1 * (*srcImgB++ + *srcImgE++) +
tap2 * (*srcImgC++ + *srcImgF++));
wxLineDst[ipad] = (imgpel) iClip1 ( p_Vid->max_imgpel_value, rshift_rnd_sf( is, 10 ) );
}
}
}
/*!
************************************************************************
* \brief
* Does _horizontal_ interpolation using the BiLinear filter
*
* \param s
* pointer to StorablePicture structure
* \param dstImg
* destination Image
* \param srcImgL
* source left image
* \param srcImgR
* source right image
************************************************************************
*/
static void getSubImageBiLinear( StorablePicture *s, imgpel **dstImg, imgpel **srcImgL, imgpel **srcImgR)
{
int jpad, ipad;
int ypadded_size = s->size_y_padded;
int xpadded_size = s->size_x_padded;
imgpel *wBufSrcL, *wBufSrcR, *wBufDst;
for (jpad = -IMG_PAD_SIZE_Y; jpad < ypadded_size-IMG_PAD_SIZE_Y; jpad++)
{
wBufSrcL = srcImgL[jpad]-IMG_PAD_SIZE_X;
wBufSrcR = srcImgR[jpad]-IMG_PAD_SIZE_X;
wBufDst = dstImg[jpad]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
*wBufDst++ = (imgpel) rshift_rnd_sf( *wBufSrcL++ + *wBufSrcR++, 1 );
}
}
}
/*!
************************************************************************
* \brief
* Does _horizontal_ interpolation using the BiLinear filter
*
* \param s
* pointer to StorablePicture structure
* \param dstImg
* destination Image
* \param srcImgL
* source left image
* \param srcImgR
* source right image
************************************************************************
*/
static void getHorSubImageBiLinear( StorablePicture *s, imgpel **dstImg, imgpel **srcImgL, imgpel **srcImgR)
{
int jpad, ipad;
int ypadded_size = s->size_y_padded;
int xpadded_size = s->size_x_padded - 1;
imgpel *wBufSrcL, *wBufSrcR, *wBufDst;
for (jpad = -IMG_PAD_SIZE_Y; jpad < ypadded_size-IMG_PAD_SIZE_Y; jpad++)
{
wBufSrcL = srcImgL[jpad]-IMG_PAD_SIZE_X;
wBufSrcR = &srcImgR[jpad][1-IMG_PAD_SIZE_X];
wBufDst = dstImg[jpad]-IMG_PAD_SIZE_X;
// left padded area + center
for (ipad = 0; ipad < xpadded_size; ipad++)
{
*wBufDst++ = (imgpel) rshift_rnd_sf( *wBufSrcL++ + *wBufSrcR++, 1 );
}
// right padded area
*wBufDst++ = (imgpel) rshift_rnd_sf( *wBufSrcL++ + wBufSrcR[-1], 1 );
}
}
/*!
************************************************************************
* \brief
* Does _vertical_ interpolation using the BiLinear filter
*
* \param s
* pointer to StorablePicture structure
* \param dstImg
* destination Image
* \param srcImgT
* source top image
* \param srcImgB
* source bottom image
************************************************************************
*/
static void getVerSubImageBiLinear( StorablePicture *s, imgpel **dstImg, imgpel **srcImgT, imgpel **srcImgB)
{
int jpad, ipad;
int ypadded_size = s->size_y_padded - 1;
int xpadded_size = s->size_x_padded;
imgpel *wBufSrcT, *wBufSrcB, *wBufDst;
// top
for (jpad = -IMG_PAD_SIZE_Y; jpad < ypadded_size-IMG_PAD_SIZE_Y; jpad++)
{
wBufSrcT = srcImgT[jpad]-IMG_PAD_SIZE_X;
wBufDst = dstImg[jpad]-IMG_PAD_SIZE_X;
wBufSrcB = srcImgB[jpad + 1]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
*wBufDst++ = (imgpel) rshift_rnd_sf(*wBufSrcT++ + *wBufSrcB++, 1);
}
}
// bottom
wBufSrcT = srcImgT[ypadded_size-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
wBufDst = dstImg[ypadded_size-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
wBufSrcB = srcImgB[ypadded_size-IMG_PAD_SIZE_Y]-IMG_PAD_SIZE_X;
for (ipad = 0; ipad < xpadded_size; ipad++)
{
*wBufDst++ = (imgpel) rshift_rnd_sf(*wBufSrcT++ + *wBufSrcB++, 1);
}
}
/*!
************************************************************************
* \brief
* Does _diagonal_ interpolation using the BiLinear filter
*
* \param s
* pointer to StorablePicture structure
* \param dstImg
* destination Image
* \param srcImgT
* source top/left image
* \param srcImgB
* source bottom/right image
************************************************************************
*/
static void getDiagSubImageBiLinear( StorablePicture *s, imgpel **dstImg, imgpel **srcImgT, imgpel **srcImgB )
{
int jpad, ipad;
int maxx = s->size_x_padded - 1-IMG_PAD_SIZE_X;
int maxy = s->size_y_padded - 1-IMG_PAD_SIZE_Y;
imgpel *wBufSrcL, *wBufSrcR, *wBufDst;
for (jpad = -IMG_PAD_SIZE_Y; jpad < maxy; jpad++)
{
wBufSrcL = srcImgT[jpad + 1]-IMG_PAD_SIZE_X;
wBufSrcR = &srcImgB[jpad][1-IMG_PAD_SIZE_X];
wBufDst = dstImg[jpad]-IMG_PAD_SIZE_X;
for (ipad = -IMG_PAD_SIZE_X; ipad < maxx; ipad++)
{
*wBufDst++ = (imgpel) rshift_rnd_sf(*wBufSrcL++ + *wBufSrcR++, 1);
}
*wBufDst++ = (imgpel) rshift_rnd_sf(*wBufSrcL++ + wBufSrcR[-1], 1);
}
wBufSrcL = srcImgT[maxy]-IMG_PAD_SIZE_X;
wBufSrcR = &srcImgB[maxy][1-IMG_PAD_SIZE_X];
wBufDst = dstImg[maxy]-IMG_PAD_SIZE_X;
for (ipad = -IMG_PAD_SIZE_X; ipad < maxx; ipad++)
{
*wBufDst++ = (imgpel) rshift_rnd_sf(*wBufSrcL++ + *wBufSrcR++, 1);
}
*wBufDst++ = (imgpel) rshift_rnd_sf(*wBufSrcL++ + wBufSrcR[-1], 1);
}
/*!
************************************************************************
* \brief
* Creates the 4x4 = 16 images that contain quarter-pel samples
* sub-sampled at different spatial orientations;
* enables more efficient implementation
*
* \param p_Vid
* pointer to VideoParameters structure
* \param s
* pointer to StorablePicture structure
s************************************************************************
*/
void getSubImagesLuma( VideoParameters *p_Vid, StorablePicture *s )
{
imgpel ****cImgSub = s->p_curr_img_sub;
int otf_shift = ( p_Vid->p_Inp->OnTheFlyFractMCP == OTF_L1 ) ? (1) : (0) ;
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
// 12 13 14 15
//// INTEGER PEL POSITIONS ////
// sub-image 0 [0][0]
// simply copy the integer pels
if (cImgSub[0][0][0] != s->p_curr_img[0])
{
getSubImageInteger( s, cImgSub[0][0], s->p_curr_img);
}
else
{
getSubImageInteger_s( s, cImgSub[0][0], s->p_curr_img);
}
//// HALF-PEL POSITIONS: SIX-TAP FILTER ////
// sub-image 2 [0][2]
// HOR interpolate (six-tap) sub-image [0][0]
getHorSubImageSixTap( p_Vid, s, cImgSub[0][2>>otf_shift], cImgSub[0][0] );
// sub-image 8 [2][0]
// VER interpolate (six-tap) sub-image [0][0]
getVerSubImageSixTap( p_Vid, s, cImgSub[2>>otf_shift][0], cImgSub[0][0]);
// sub-image 10 [2][2]
// VER interpolate (six-tap) sub-image [0][2]
getVerSubImageSixTapTmp( p_Vid, s, cImgSub[2>>otf_shift][2>>otf_shift]);
if( !p_Vid->p_Inp->OnTheFlyFractMCP )
{
//// QUARTER-PEL POSITIONS: BI-LINEAR INTERPOLATION ////
// sub-image 1 [0][1]
getSubImageBiLinear ( s, cImgSub[0][1], cImgSub[0][0], cImgSub[0][2]);
// sub-image 4 [1][0]
getSubImageBiLinear ( s, cImgSub[1][0], cImgSub[0][0], cImgSub[2][0]);
// sub-image 5 [1][1]
getSubImageBiLinear ( s, cImgSub[1][1], cImgSub[0][2], cImgSub[2][0]);
// sub-image 6 [1][2]
getSubImageBiLinear ( s, cImgSub[1][2], cImgSub[0][2], cImgSub[2][2]);
// sub-image 9 [2][1]
getSubImageBiLinear ( s, cImgSub[2][1], cImgSub[2][0], cImgSub[2][2]);
// sub-image 3 [0][3]
getHorSubImageBiLinear ( s, cImgSub[0][3], cImgSub[0][2], cImgSub[0][0]);
// sub-image 7 [1][3]
getHorSubImageBiLinear ( s, cImgSub[1][3], cImgSub[0][2], cImgSub[2][0]);
// sub-image 11 [2][3]
getHorSubImageBiLinear ( s, cImgSub[2][3], cImgSub[2][2], cImgSub[2][0]);
// sub-image 12 [3][0]
getVerSubImageBiLinear ( s, cImgSub[3][0], cImgSub[2][0], cImgSub[0][0]);
// sub-image 13 [3][1]
getVerSubImageBiLinear ( s, cImgSub[3][1], cImgSub[2][0], cImgSub[0][2]);
// sub-image 14 [3][2]
getVerSubImageBiLinear ( s, cImgSub[3][2], cImgSub[2][2], cImgSub[0][2]);
// sub-image 15 [3][3]
getDiagSubImageBiLinear( s, cImgSub[3][3], cImgSub[0][2], cImgSub[2][0]);
}
}
|
720499.c | /* cci_fDll.c Chapter 5 */
/* cci_f.c, from Chapter 2, modified to build as a DLL */
#include "Everything.h"
#define BUF_SIZE 256
/* The following line [__declspec (dllexport)] is the only change from cci_f.c */
/* In general, you should have a single function and use a preprocessor */
/* variable to determine if the function is exported or not */
__declspec (dllexport)
BOOL __cdecl cci_f (LPCTSTR fIn, LPCTSTR fOut, DWORD shift)
/* Caesar cipher function - Simple implementation
* fIn: Source file pathname
* fOut: Destination file pathname
* shift: Numerical shift
* Behavior is modeled after CopyFile */
{
HANDLE hIn, hOut;
DWORD nIn, nOut, iCopy;
CHAR aBuffer [BUF_SIZE], ccBuffer [BUF_SIZE];
BOOL WriteOK = TRUE;
hIn = CreateFile (fIn, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (hIn == INVALID_HANDLE_VALUE) return FALSE;
hOut = CreateFile (fOut, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
if (hOut == INVALID_HANDLE_VALUE) return FALSE;
__try
{
while (WriteOK && ReadFile (hIn, aBuffer, BUF_SIZE, &nIn, NULL) && nIn > 0) {
for (iCopy = 0; iCopy < nIn; iCopy++)
ccBuffer[iCopy] = (BYTE)(aBuffer[iCopy] + shift) % 256;
WriteOK = WriteFile (hOut, ccBuffer, nIn, &nOut, NULL);
}
}
__except(GetExceptionCode() == EXCEPTION_IN_PAGE_ERROR ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
{
WriteOK = FALSE;
}
CloseHandle (hIn);
CloseHandle (hOut);
return WriteOK;
}
|
468536.c | #include <substrate/substrate.h>
void (*OSScreenPutFontEx)(int buffer, int x, int y, char* msg);
void callback(COSSubstrate_FunctionContext* ctx) {
ctx->args[3] = (unsigned int)("Hello From the Substrate!");
}
void _start() {
unsigned int coreinit_handle;
COSS_SPECIFICS->OSDynLoad_Acquire("coreinit.rpl", &coreinit_handle);
COSS_SPECIFICS->OSDynLoad_FindExport(coreinit_handle, 0, "OSScreenPutFontEx", &OSScreenPutFontEx);
unsigned int substrate_handle;
COSS_SPECIFICS->COSSDynLoad_Acquire("substrate.cosm", &substrate_handle);
COSS_SPECIFICS->COSSDynLoad_FindExport(substrate_handle, "COSSubstrate_PatchFunc", &COSSubstrate_PatchFunc);
COSSubstrate_PatchFunc(OSScreenPutFontEx, &callback);
}
|
994884.c | /*
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2020 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2011-2012 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2014-2020 Intel, Inc. All rights reserved.
* Copyright (c) 2016-2021 IBM Corporation. All rights reserved.
* Copyright (c) 2021 Nanook Consulting All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "prte_config.h"
#include "constants.h"
#include <sys/types.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
#include <string.h>
#include "src/hwloc/hwloc-internal.h"
#include "src/mca/base/base.h"
#include "src/mca/mca.h"
#include "src/threads/tsd.h"
#include "src/util/argv.h"
#include "src/util/if.h"
#include "src/util/output.h"
#include "src/mca/errmgr/errmgr.h"
#include "src/mca/ess/ess.h"
#include "src/runtime/prte_globals.h"
#include "src/util/dash_host/dash_host.h"
#include "src/util/hostfile/hostfile.h"
#include "src/util/name_fns.h"
#include "src/util/show_help.h"
#include "types.h"
#include "src/mca/rmaps/base/base.h"
#include "src/mca/rmaps/base/rmaps_private.h"
int prte_rmaps_base_filter_nodes(prte_app_context_t *app, prte_list_t *nodes, bool remove)
{
int rc = PRTE_ERR_TAKE_NEXT_OPTION;
char *hosts;
/* did the app_context contain a hostfile? */
hosts = NULL;
if (prte_get_attribute(&app->attributes, PRTE_APP_HOSTFILE, (void **) &hosts, PMIX_STRING) &&
NULL != hosts) {
/* yes - filter the node list through the file, removing
* any nodes not found in the file
*/
if (PRTE_SUCCESS != (rc = prte_util_filter_hostfile_nodes(nodes, hosts, remove))) {
PRTE_ERROR_LOG(rc);
free(hosts);
return rc;
}
/** check that anything is here */
if (0 == prte_list_get_size(nodes)) {
prte_show_help("help-prte-rmaps-base.txt", "prte-rmaps-base:no-mapped-node", true,
app->app, "-hostfile", hosts);
free(hosts);
return PRTE_ERR_SILENT;
}
free(hosts);
}
/* did the app_context contain an add-hostfile? */
hosts = NULL;
if (prte_get_attribute(&app->attributes, PRTE_APP_ADD_HOSTFILE, (void **) &hosts, PMIX_STRING) &&
NULL != hosts) {
/* yes - filter the node list through the file, removing
* any nodes not found in the file
*/
if (PRTE_SUCCESS != (rc = prte_util_filter_hostfile_nodes(nodes, hosts, remove))) {
free(hosts);
PRTE_ERROR_LOG(rc);
return rc;
}
/** check that anything is here */
if (0 == prte_list_get_size(nodes)) {
prte_show_help("help-prte-rmaps-base.txt", "prte-rmaps-base:no-mapped-node", true,
app->app, "-add-hostfile", hosts);
free(hosts);
return PRTE_ERR_SILENT;
}
free(hosts);
}
/* now filter the list through any -host specification */
hosts = NULL;
if (prte_get_attribute(&app->attributes, PRTE_APP_DASH_HOST, (void **) &hosts, PMIX_STRING) &&
NULL != hosts) {
if (PRTE_SUCCESS != (rc = prte_util_filter_dash_host_nodes(nodes, hosts, remove))) {
PRTE_ERROR_LOG(rc);
free(hosts);
return rc;
}
/** check that anything is left! */
if (0 == prte_list_get_size(nodes)) {
prte_show_help("help-prte-rmaps-base.txt", "prte-rmaps-base:no-mapped-node", true,
app->app, "-host", hosts);
free(hosts);
return PRTE_ERR_SILENT;
}
free(hosts);
}
/* now filter the list through any add-host specification */
hosts = NULL;
if (prte_get_attribute(&app->attributes, PRTE_APP_ADD_HOST, (void **) &hosts, PMIX_STRING) &&
NULL != hosts) {
if (PRTE_SUCCESS != (rc = prte_util_filter_dash_host_nodes(nodes, hosts, remove))) {
PRTE_ERROR_LOG(rc);
free(hosts);
return rc;
}
/** check that anything is left! */
if (0 == prte_list_get_size(nodes)) {
prte_show_help("help-prte-rmaps-base.txt", "prte-rmaps-base:no-mapped-node", true,
app->app, "-add-host", hosts);
free(hosts);
return PRTE_ERR_SILENT;
}
free(hosts);
}
return rc;
}
/*
* Query the registry for all nodes allocated to a specified app_context
*/
int prte_rmaps_base_get_target_nodes(prte_list_t *allocated_nodes, int32_t *total_num_slots,
prte_app_context_t *app, prte_mapping_policy_t policy,
bool initial_map, bool silent)
{
prte_list_item_t *item;
prte_node_t *node, *nd, *nptr, *next;
int32_t num_slots;
int32_t i;
int rc;
prte_job_t *daemons;
bool novm;
prte_list_t nodes;
char *hosts = NULL;
/** set default answer */
*total_num_slots = 0;
/* get the daemon job object */
daemons = prte_get_job_data_object(PRTE_PROC_MY_NAME->nspace);
/* see if we have a vm or not */
novm = prte_get_attribute(&daemons->attributes, PRTE_JOB_NO_VM, NULL, PMIX_BOOL);
/* if this is NOT a managed allocation, then we use the nodes
* that were specified for this app - there is no need to collect
* all available nodes and "filter" them.
*
* However, if it is a managed allocation AND the hostfile or the hostlist was
* provided, those take precedence, so process them and filter as we normally do.
*/
if (!prte_managed_allocation
|| (prte_managed_allocation
&& (prte_get_attribute(&app->attributes, PRTE_APP_DASH_HOST, (void **) &hosts,
PMIX_STRING)
|| prte_get_attribute(&app->attributes, PRTE_APP_HOSTFILE, (void **) &hosts,
PMIX_STRING)))) {
PRTE_CONSTRUCT(&nodes, prte_list_t);
/* if the app provided a dash-host, then use those nodes */
hosts = NULL;
if (prte_get_attribute(&app->attributes, PRTE_APP_DASH_HOST, (void **) &hosts,
PMIX_STRING)) {
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s using dash_host %s", PRTE_NAME_PRINT(PRTE_PROC_MY_NAME),
hosts));
if (PRTE_SUCCESS != (rc = prte_util_add_dash_host_nodes(&nodes, hosts, false))) {
PRTE_ERROR_LOG(rc);
free(hosts);
return rc;
}
free(hosts);
} else if (prte_get_attribute(&app->attributes, PRTE_APP_HOSTFILE, (void **) &hosts,
PMIX_STRING)) {
/* otherwise, if the app provided a hostfile, then use that */
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s using hostfile %s", PRTE_NAME_PRINT(PRTE_PROC_MY_NAME),
hosts));
if (PRTE_SUCCESS != (rc = prte_util_add_hostfile_nodes(&nodes, hosts))) {
free(hosts);
PRTE_ERROR_LOG(rc);
return rc;
}
free(hosts);
} else {
/* if nothing else was specified by the app, then use all known nodes, which
* will include ourselves
*/
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s using known nodes", PRTE_NAME_PRINT(PRTE_PROC_MY_NAME)));
goto addknown;
}
/** if we still don't have anything */
if (0 == prte_list_get_size(&nodes)) {
if (!silent) {
prte_show_help("help-prte-rmaps-base.txt", "prte-rmaps-base:no-available-resources",
true);
}
PRTE_DESTRUCT(&nodes);
return PRTE_ERR_SILENT;
}
/* find the nodes in our node array and assemble them
* in list order as that is what the user specified. Note
* that the prte_node_t objects on the nodes list are not
* fully filled in - they only contain the user-provided
* name of the node as a temp object. Thus, we cannot just
* check to see if the node pointer matches that of a node
* in the node_pool.
*/
PRTE_LIST_FOREACH_SAFE(nptr, next, &nodes, prte_node_t)
{
for (i = 0; i < prte_node_pool->size; i++) {
if (NULL
== (node = (prte_node_t *) prte_pointer_array_get_item(prte_node_pool, i))) {
continue;
}
/* ignore nodes that are non-usable */
if (PRTE_FLAG_TEST(node, PRTE_NODE_NON_USABLE)) {
continue;
}
/* ignore nodes that are marked as do-not-use for this mapping */
if (PRTE_NODE_STATE_DO_NOT_USE == node->state) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s IS MARKED NO_USE", node->name));
/* reset the state so it can be used another time */
node->state = PRTE_NODE_STATE_UP;
continue;
}
if (PRTE_NODE_STATE_DOWN == node->state) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s IS DOWN", node->name));
continue;
}
if (PRTE_NODE_STATE_NOT_INCLUDED == node->state) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s IS MARKED NO_INCLUDE", node->name));
/* not to be used */
continue;
}
/* if this node wasn't included in the vm (e.g., by -host), ignore it,
* unless we are mapping prior to launching the vm
*/
if (NULL == node->daemon && !novm) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s HAS NO DAEMON", node->name));
continue;
}
if (!prte_nptr_match(node, nptr)) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s DOESNT MATCH NODE %s", node->name, nptr->name));
continue;
}
/* retain a copy for our use in case the item gets
* destructed along the way
*/
PRTE_RETAIN(node);
if (initial_map) {
/* if this is the first app_context we
* are getting for an initial map of a job,
* then mark all nodes as unmapped
*/
PRTE_FLAG_UNSET(node, PRTE_NODE_FLAG_MAPPED);
}
/* the list is ordered as per user direction using -host
* or the listing in -hostfile - preserve that ordering */
prte_list_append(allocated_nodes, &node->super);
break;
}
/* remove the item from the list as we have allocated it */
prte_list_remove_item(&nodes, (prte_list_item_t *) nptr);
PRTE_RELEASE(nptr);
}
PRTE_DESTRUCT(&nodes);
/* now prune for usage and compute total slots */
goto complete;
}
addknown:
/* add everything in the node pool that can be used - add them
* in daemon order, which may be different than the order in the
* node pool. Since an empty list is passed into us, the list at
* this point either has the HNP node or nothing, and the HNP
* node obviously has a daemon on it (us!)
*/
if (0 == prte_list_get_size(allocated_nodes)) {
/* the list is empty - if the HNP is allocated, then add it */
if (prte_hnp_is_allocated) {
nd = (prte_node_t *) prte_pointer_array_get_item(prte_node_pool, 0);
if (!PRTE_FLAG_TEST(nd, PRTE_NODE_NON_USABLE)) {
PRTE_RETAIN(nd);
prte_list_append(allocated_nodes, &nd->super);
} else {
nd = NULL;
}
} else {
nd = NULL;
}
} else {
nd = (prte_node_t *) prte_list_get_last(allocated_nodes);
}
for (i = 1; i < prte_node_pool->size; i++) {
if (NULL != (node = (prte_node_t *) prte_pointer_array_get_item(prte_node_pool, i))) {
/* ignore nodes that are non-usable */
if (PRTE_FLAG_TEST(node, PRTE_NODE_NON_USABLE)) {
continue;
}
/* ignore nodes that are marked as do-not-use for this mapping */
if (PRTE_NODE_STATE_DO_NOT_USE == node->state) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s IS MARKED NO_USE", node->name));
/* reset the state so it can be used another time */
node->state = PRTE_NODE_STATE_UP;
continue;
}
if (PRTE_NODE_STATE_DOWN == node->state) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s IS MARKED DOWN", node->name));
continue;
}
if (PRTE_NODE_STATE_NOT_INCLUDED == node->state) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s IS MARKED NO_INCLUDE", node->name));
/* not to be used */
continue;
}
/* if this node wasn't included in the vm (e.g., by -host), ignore it,
* unless we are mapping prior to launching the vm
*/
if (NULL == node->daemon && !novm) {
PRTE_OUTPUT_VERBOSE((10, prte_rmaps_base_framework.framework_output,
"NODE %s HAS NO DAEMON", node->name));
continue;
}
/* retain a copy for our use in case the item gets
* destructed along the way
*/
PRTE_RETAIN(node);
if (initial_map) {
/* if this is the first app_context we
* are getting for an initial map of a job,
* then mark all nodes as unmapped
*/
PRTE_FLAG_UNSET(node, PRTE_NODE_FLAG_MAPPED);
}
if (NULL == nd || NULL == nd->daemon || NULL == node->daemon
|| nd->daemon->name.rank < node->daemon->name.rank) {
/* just append to end */
prte_list_append(allocated_nodes, &node->super);
nd = node;
} else {
/* starting from end, put this node in daemon-vpid order */
while (node->daemon->name.rank < nd->daemon->name.rank) {
if (prte_list_get_begin(allocated_nodes) == prte_list_get_prev(&nd->super)) {
/* insert at beginning */
prte_list_prepend(allocated_nodes, &node->super);
goto moveon;
}
nd = (prte_node_t *) prte_list_get_prev(&nd->super);
}
item = prte_list_get_next(&nd->super);
if (item == prte_list_get_end(allocated_nodes)) {
/* we are at the end - just append */
prte_list_append(allocated_nodes, &node->super);
} else {
nd = (prte_node_t *) item;
prte_list_insert_pos(allocated_nodes, item, &node->super);
}
moveon:
/* reset us back to the end for the next node */
nd = (prte_node_t *) prte_list_get_last(allocated_nodes);
}
}
}
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s Starting with %d nodes in list", PRTE_NAME_PRINT(PRTE_PROC_MY_NAME),
(int) prte_list_get_size(allocated_nodes)));
/** check that anything is here */
if (0 == prte_list_get_size(allocated_nodes)) {
if (!silent) {
prte_show_help("help-prte-rmaps-base.txt", "prte-rmaps-base:no-available-resources",
true);
}
return PRTE_ERR_SILENT;
}
/* filter the nodes thru any hostfile and dash-host options */
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output, "%s Filtering thru apps",
PRTE_NAME_PRINT(PRTE_PROC_MY_NAME)));
if (PRTE_SUCCESS != (rc = prte_rmaps_base_filter_nodes(app, allocated_nodes, true))
&& PRTE_ERR_TAKE_NEXT_OPTION != rc) {
PRTE_ERROR_LOG(rc);
return rc;
}
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s Retained %d nodes in list", PRTE_NAME_PRINT(PRTE_PROC_MY_NAME),
(int) prte_list_get_size(allocated_nodes)));
complete:
/* if we are mapping debuggers, then they don't count against
* the allocation */
if (PRTE_FLAG_TEST(app, PRTE_APP_FLAG_TOOL)) {
num_slots = INT32_MAX;
if (!prte_hnp_is_allocated
|| (PRTE_GET_MAPPING_DIRECTIVE(policy) & PRTE_MAPPING_NO_USE_LOCAL)) {
PRTE_LIST_FOREACH_SAFE(node, next, allocated_nodes, prte_node_t)
{
if (0 == node->index) {
prte_list_remove_item(allocated_nodes, &node->super);
PRTE_RELEASE(node); /* "un-retain" it */
break;
}
}
}
} else {
num_slots = 0;
PRTE_LIST_FOREACH_SAFE(node, next, allocated_nodes, prte_node_t)
{
/* if the hnp was not allocated, or flagged not to be used,
* then remove it here */
if (!prte_hnp_is_allocated
|| (PRTE_GET_MAPPING_DIRECTIVE(policy) & PRTE_MAPPING_NO_USE_LOCAL)) {
if (0 == node->index) {
prte_list_remove_item(allocated_nodes, &node->super);
PRTE_RELEASE(node); /* "un-retain" it */
continue;
}
}
/** check to see if this node is fully used - remove if so */
if (0 != node->slots_max && node->slots_inuse >= node->slots_max) {
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s Removing node %s: max %d inuse %d",
PRTE_NAME_PRINT(PRTE_PROC_MY_NAME), node->name,
node->slots_max, node->slots_inuse));
prte_list_remove_item(allocated_nodes, &node->super);
PRTE_RELEASE(node); /* "un-retain" it */
continue;
}
if (node->slots <= node->slots_inuse
&& (PRTE_MAPPING_NO_OVERSUBSCRIBE & PRTE_GET_MAPPING_DIRECTIVE(policy))) {
/* remove the node as fully used */
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s Removing node %s slots %d inuse %d",
PRTE_NAME_PRINT(PRTE_PROC_MY_NAME), node->name, node->slots,
node->slots_inuse));
prte_list_remove_item(allocated_nodes, &node->super);
PRTE_RELEASE(node); /* "un-retain" it */
continue;
}
if (node->slots > node->slots_inuse) {
int32_t s;
/* check for any -host allocations */
if (prte_get_attribute(&app->attributes, PRTE_APP_DASH_HOST, (void **) &hosts,
PMIX_STRING)) {
s = prte_util_dash_host_compute_slots(node, hosts);
} else {
s = node->slots - node->slots_inuse;
}
node->slots_available = s;
/* add the available slots */
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s node %s has %d slots available",
PRTE_NAME_PRINT(PRTE_PROC_MY_NAME), node->name, s));
num_slots += s;
continue;
}
if (!(PRTE_MAPPING_NO_OVERSUBSCRIBE & PRTE_GET_MAPPING_DIRECTIVE(policy))) {
/* nothing needed to do here - we don't add slots to the
* count as we don't have any available. Just let the mapper
* do what it needs to do to meet the request
*/
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s node %s is fully used, but available for oversubscription",
PRTE_NAME_PRINT(PRTE_PROC_MY_NAME), node->name));
} else {
/* if we cannot use it, remove it from list */
prte_list_remove_item(allocated_nodes, &node->super);
PRTE_RELEASE(node); /* "un-retain" it */
}
}
}
/* Sanity check to make sure we have resources available */
if (0 == prte_list_get_size(allocated_nodes)) {
if (silent) {
/* let the caller know that the resources exist,
* but are currently busy
*/
return PRTE_ERR_RESOURCE_BUSY;
} else {
prte_show_help("help-prte-rmaps-base.txt",
"prte-rmaps-base:all-available-resources-used", true);
return PRTE_ERR_SILENT;
}
}
/* pass back the total number of available slots */
*total_num_slots = num_slots;
if (4 < prte_output_get_verbosity(prte_rmaps_base_framework.framework_output)) {
prte_output(0, "AVAILABLE NODES FOR MAPPING:");
for (item = prte_list_get_first(allocated_nodes);
item != prte_list_get_end(allocated_nodes); item = prte_list_get_next(item)) {
node = (prte_node_t *) item;
prte_output(0, " node: %s daemon: %s slots_available: %d", node->name,
(NULL == node->daemon) ? "NULL" : PRTE_VPID_PRINT(node->daemon->name.rank),
node->slots_available);
}
}
return PRTE_SUCCESS;
}
prte_proc_t *prte_rmaps_base_setup_proc(prte_job_t *jdata, prte_node_t *node, prte_app_idx_t idx)
{
prte_proc_t *proc;
int rc;
prte_app_context_t *app;
proc = PRTE_NEW(prte_proc_t);
/* set the jobid */
PMIX_LOAD_NSPACE(proc->name.nspace, jdata->nspace);
proc->job = jdata;
/* flag the proc as ready for launch */
proc->state = PRTE_PROC_STATE_INIT;
proc->app_idx = idx;
app = (prte_app_context_t*)prte_pointer_array_get_item(jdata->apps, idx);
if (NULL == app) {
PRTE_ERROR_LOG(PRTE_ERR_NOT_FOUND);
PRTE_RELEASE(proc);
return NULL;
}
/* mark the proc as UPDATED so it will be included in the launch */
PRTE_FLAG_SET(proc, PRTE_PROC_FLAG_UPDATED);
if (NULL == node->daemon) {
proc->parent = PMIX_RANK_INVALID;
} else {
proc->parent = node->daemon->name.rank;
}
PRTE_RETAIN(node); /* maintain accounting on object */
proc->node = node;
/* if this is a debugger job, then it doesn't count against
* available slots - otherwise, it does */
if (!PRTE_FLAG_TEST(app, PRTE_APP_FLAG_TOOL)) {
node->num_procs++;
++node->slots_inuse;
}
if (0 > (rc = prte_pointer_array_add(node->procs, (void *) proc))) {
PRTE_ERROR_LOG(rc);
PRTE_RELEASE(proc);
return NULL;
}
/* retain the proc struct so that we correctly track its release */
PRTE_RETAIN(proc);
return proc;
}
/*
* determine the proper starting point for the next mapping operation
*/
prte_node_t *prte_rmaps_base_get_starting_point(prte_list_t *node_list, prte_job_t *jdata)
{
prte_list_item_t *item, *cur_node_item;
prte_node_t *node, *nd1, *ndmin;
int overload;
/* if a bookmark exists from some prior mapping, set us to start there */
if (NULL != jdata->bookmark) {
cur_node_item = NULL;
/* find this node on the list */
for (item = prte_list_get_first(node_list); item != prte_list_get_end(node_list);
item = prte_list_get_next(item)) {
node = (prte_node_t *) item;
if (node->index == jdata->bookmark->index) {
cur_node_item = item;
break;
}
}
/* see if we found it - if not, just start at the beginning */
if (NULL == cur_node_item) {
cur_node_item = prte_list_get_first(node_list);
}
} else {
/* if no bookmark, then just start at the beginning of the list */
cur_node_item = prte_list_get_first(node_list);
}
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output,
"%s Starting bookmark at node %s", PRTE_NAME_PRINT(PRTE_PROC_MY_NAME),
((prte_node_t *) cur_node_item)->name));
/* is this node fully subscribed? If so, then the first
* proc we assign will oversubscribe it, so let's look
* for another candidate
*/
node = (prte_node_t *) cur_node_item;
ndmin = node;
overload = ndmin->slots_inuse - ndmin->slots;
if (node->slots_inuse >= node->slots) {
/* work down the list - is there another node that
* would not be oversubscribed?
*/
if (cur_node_item != prte_list_get_last(node_list)) {
item = prte_list_get_next(cur_node_item);
} else {
item = prte_list_get_first(node_list);
}
nd1 = NULL;
while (item != cur_node_item) {
nd1 = (prte_node_t *) item;
if (nd1->slots_inuse < nd1->slots) {
/* this node is not oversubscribed! use it! */
cur_node_item = item;
goto process;
}
/* this one was also oversubscribed, keep track of the
* node that has the least usage - if we can't
* find anyone who isn't fully utilized, we will
* start with the least used node
*/
if (overload >= (nd1->slots_inuse - nd1->slots)) {
ndmin = nd1;
overload = ndmin->slots_inuse - ndmin->slots;
}
if (item == prte_list_get_last(node_list)) {
item = prte_list_get_first(node_list);
} else {
item = prte_list_get_next(item);
}
}
/* if we get here, then we cycled all the way around the
* list without finding a better answer - just use the node
* that is minimally overloaded if it is better than
* what we already have
*/
if (NULL != nd1 && (nd1->slots_inuse - nd1->slots) < (node->slots_inuse - node->slots)) {
cur_node_item = (prte_list_item_t *) ndmin;
}
}
process:
PRTE_OUTPUT_VERBOSE((5, prte_rmaps_base_framework.framework_output, "%s Starting at node %s",
PRTE_NAME_PRINT(PRTE_PROC_MY_NAME),
((prte_node_t *) cur_node_item)->name));
return (prte_node_t *) cur_node_item;
}
|
100729.c | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* ZGFX (RDP8) Bulk Data Compression
*
* Copyright 2014 Marc-Andre Moreau <[email protected]>
* Copyright 2017 Armin Novak <[email protected]>
* Copyright 2017 Thincast Technologies GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <winpr/crt.h>
#include <winpr/print.h>
#include <winpr/bitstream.h>
#include <freerdp/log.h>
#include <freerdp/codec/zgfx.h>
#define TAG FREERDP_TAG("codec")
/**
* RDP8 Compressor Limits:
*
* Maximum number of uncompressed bytes in a single segment: 65535
* Maximum match distance / minimum history size: 2500000 bytes.
* Maximum number of segments: 65535
* Maximum expansion of a segment (when compressed size exceeds uncompressed): 1000 bytes
* Minimum match length: 3 bytes
*/
struct _ZGFX_TOKEN
{
UINT32 prefixLength;
UINT32 prefixCode;
UINT32 valueBits;
UINT32 tokenType;
UINT32 valueBase;
};
typedef struct _ZGFX_TOKEN ZGFX_TOKEN;
struct _ZGFX_CONTEXT
{
BOOL Compressor;
const BYTE* pbInputCurrent;
const BYTE* pbInputEnd;
UINT32 bits;
UINT32 cBitsRemaining;
UINT32 BitsCurrent;
UINT32 cBitsCurrent;
BYTE OutputBuffer[65536];
UINT32 OutputCount;
BYTE HistoryBuffer[2500000];
UINT32 HistoryIndex;
UINT32 HistoryBufferSize;
};
static const ZGFX_TOKEN ZGFX_TOKEN_TABLE[] =
{
// len code vbits type vbase
{ 1, 0, 8, 0, 0 }, // 0
{ 5, 17, 5, 1, 0 }, // 10001
{ 5, 18, 7, 1, 32 }, // 10010
{ 5, 19, 9, 1, 160 }, // 10011
{ 5, 20, 10, 1, 672 }, // 10100
{ 5, 21, 12, 1, 1696 }, // 10101
{ 5, 24, 0, 0, 0x00 }, // 11000
{ 5, 25, 0, 0, 0x01 }, // 11001
{ 6, 44, 14, 1, 5792 }, // 101100
{ 6, 45, 15, 1, 22176 }, // 101101
{ 6, 52, 0, 0, 0x02 }, // 110100
{ 6, 53, 0, 0, 0x03 }, // 110101
{ 6, 54, 0, 0, 0xFF }, // 110110
{ 7, 92, 18, 1, 54944 }, // 1011100
{ 7, 93, 20, 1, 317088 }, // 1011101
{ 7, 110, 0, 0, 0x04 }, // 1101110
{ 7, 111, 0, 0, 0x05 }, // 1101111
{ 7, 112, 0, 0, 0x06 }, // 1110000
{ 7, 113, 0, 0, 0x07 }, // 1110001
{ 7, 114, 0, 0, 0x08 }, // 1110010
{ 7, 115, 0, 0, 0x09 }, // 1110011
{ 7, 116, 0, 0, 0x0A }, // 1110100
{ 7, 117, 0, 0, 0x0B }, // 1110101
{ 7, 118, 0, 0, 0x3A }, // 1110110
{ 7, 119, 0, 0, 0x3B }, // 1110111
{ 7, 120, 0, 0, 0x3C }, // 1111000
{ 7, 121, 0, 0, 0x3D }, // 1111001
{ 7, 122, 0, 0, 0x3E }, // 1111010
{ 7, 123, 0, 0, 0x3F }, // 1111011
{ 7, 124, 0, 0, 0x40 }, // 1111100
{ 7, 125, 0, 0, 0x80 }, // 1111101
{ 8, 188, 20, 1, 1365664 }, // 10111100
{ 8, 189, 21, 1, 2414240 }, // 10111101
{ 8, 252, 0, 0, 0x0C }, // 11111100
{ 8, 253, 0, 0, 0x38 }, // 11111101
{ 8, 254, 0, 0, 0x39 }, // 11111110
{ 8, 255, 0, 0, 0x66 }, // 11111111
{ 9, 380, 22, 1, 4511392 }, // 101111100
{ 9, 381, 23, 1, 8705696 }, // 101111101
{ 9, 382, 24, 1, 17094304 }, // 101111110
{ 0 }
};
static INLINE BOOL zgfx_GetBits(ZGFX_CONTEXT* _zgfx, UINT32 _nbits)
{
if (!_zgfx)
return FALSE;
while (_zgfx->cBitsCurrent < _nbits)
{
_zgfx->BitsCurrent <<= 8;
if (_zgfx->pbInputCurrent < _zgfx->pbInputEnd)
_zgfx->BitsCurrent += *(_zgfx->pbInputCurrent)++;
_zgfx->cBitsCurrent += 8;
}
_zgfx->cBitsRemaining -= _nbits;
_zgfx->cBitsCurrent -= _nbits;
_zgfx->bits = _zgfx->BitsCurrent >> _zgfx->cBitsCurrent;
_zgfx->BitsCurrent &= ((1 << _zgfx->cBitsCurrent) - 1);
return TRUE;
}
static void zgfx_history_buffer_ring_write(ZGFX_CONTEXT* zgfx, const BYTE* src, size_t count)
{
UINT32 front;
if (count <= 0)
return;
if (count > zgfx->HistoryBufferSize)
{
const size_t residue = count - zgfx->HistoryBufferSize;
count = zgfx->HistoryBufferSize;
src += residue;
zgfx->HistoryIndex = (zgfx->HistoryIndex + residue) % zgfx->HistoryBufferSize;
}
if (zgfx->HistoryIndex + count <= zgfx->HistoryBufferSize)
{
CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, count);
if ((zgfx->HistoryIndex += count) == zgfx->HistoryBufferSize)
zgfx->HistoryIndex = 0;
}
else
{
front = zgfx->HistoryBufferSize - zgfx->HistoryIndex;
CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, front);
CopyMemory(zgfx->HistoryBuffer, &src[front], count - front);
zgfx->HistoryIndex = count - front;
}
}
static void zgfx_history_buffer_ring_read(ZGFX_CONTEXT* zgfx, int offset, BYTE* dst, UINT32 count)
{
UINT32 front;
UINT32 index;
UINT32 bytes;
UINT32 valid;
UINT32 bytesLeft;
BYTE* dptr = dst;
BYTE* origDst = dst;
if (count <= 0)
return;
bytesLeft = count;
index = (zgfx->HistoryIndex + zgfx->HistoryBufferSize - offset) % zgfx->HistoryBufferSize;
bytes = MIN(bytesLeft, offset);
if ((index + bytes) <= zgfx->HistoryBufferSize)
{
CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), bytes);
}
else
{
front = zgfx->HistoryBufferSize - index;
CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), front);
CopyMemory(&dptr[front], zgfx->HistoryBuffer, bytes - front);
}
if ((bytesLeft -= bytes) == 0)
return;
dptr += bytes;
valid = bytes;
do
{
bytes = valid;
if (bytes > bytesLeft)
bytes = bytesLeft;
CopyMemory(dptr, origDst, bytes);
dptr += bytes;
valid <<= 1;
}
while ((bytesLeft -= bytes) > 0);
}
static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize)
{
BYTE c;
BYTE flags;
UINT32 extra = 0;
int opIndex;
int haveBits;
int inPrefix;
UINT32 count;
UINT32 distance;
BYTE* pbSegment;
size_t cbSegment;
if (!zgfx || !stream)
return FALSE;
cbSegment = segmentSize - 1;
if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1) ||
(segmentSize > UINT32_MAX))
return FALSE;
Stream_Read_UINT8(stream, flags); /* header (1 byte) */
zgfx->OutputCount = 0;
pbSegment = Stream_Pointer(stream);
Stream_Seek(stream, cbSegment);
if (!(flags & PACKET_COMPRESSED))
{
zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment);
if (cbSegment > sizeof(zgfx->OutputBuffer))
return FALSE;
CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment);
zgfx->OutputCount = cbSegment;
return TRUE;
}
zgfx->pbInputCurrent = pbSegment;
zgfx->pbInputEnd = &pbSegment[cbSegment - 1];
/* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */
zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd;
zgfx->cBitsCurrent = 0;
zgfx->BitsCurrent = 0;
while (zgfx->cBitsRemaining)
{
haveBits = 0;
inPrefix = 0;
for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++)
{
while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength)
{
zgfx_GetBits(zgfx, 1);
inPrefix = (inPrefix << 1) + zgfx->bits;
haveBits++;
}
if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode)
{
if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0)
{
/* Literal */
zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits);
c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits);
zgfx->HistoryBuffer[zgfx->HistoryIndex] = c;
if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize)
zgfx->HistoryIndex = 0;
if (zgfx->OutputCount >= sizeof(zgfx->OutputBuffer))
return FALSE;
zgfx->OutputBuffer[zgfx->OutputCount++] = c;
}
else
{
zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits);
distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits;
if (distance != 0)
{
/* Match */
zgfx_GetBits(zgfx, 1);
if (zgfx->bits == 0)
{
count = 3;
}
else
{
count = 4;
extra = 2;
zgfx_GetBits(zgfx, 1);
while (zgfx->bits == 1)
{
count *= 2;
extra++;
zgfx_GetBits(zgfx, 1);
}
zgfx_GetBits(zgfx, extra);
count += zgfx->bits;
}
if (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount)
return FALSE;
zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count);
zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count);
zgfx->OutputCount += count;
}
else
{
/* Unencoded */
zgfx_GetBits(zgfx, 15);
count = zgfx->bits;
zgfx->cBitsRemaining -= zgfx->cBitsCurrent;
zgfx->cBitsCurrent = 0;
zgfx->BitsCurrent = 0;
if (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount)
return FALSE;
CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count);
zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count);
zgfx->pbInputCurrent += count;
zgfx->cBitsRemaining -= (8 * count);
zgfx->OutputCount += count;
}
}
break;
}
}
}
return TRUE;
}
int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData,
UINT32* pDstSize, UINT32 flags)
{
int status = -1;
BYTE descriptor;
wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize);
if (!stream)
return -1;
if (Stream_GetRemainingLength(stream) < 1)
goto fail;
Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */
if (descriptor == ZGFX_SEGMENTED_SINGLE)
{
if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream)))
goto fail;
*ppDstData = NULL;
if (zgfx->OutputCount > 0)
*ppDstData = (BYTE*) malloc(zgfx->OutputCount);
if (!*ppDstData)
goto fail;
*pDstSize = zgfx->OutputCount;
CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount);
}
else if (descriptor == ZGFX_SEGMENTED_MULTIPART)
{
UINT32 segmentSize;
UINT16 segmentNumber;
UINT16 segmentCount;
UINT32 uncompressedSize;
BYTE* pConcatenated;
size_t used = 0;
if (Stream_GetRemainingLength(stream) < 6)
goto fail;
Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */
Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */
if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32))
goto fail;
pConcatenated = (BYTE*) malloc(uncompressedSize);
if (!pConcatenated)
goto fail;
*ppDstData = pConcatenated;
*pDstSize = uncompressedSize;
for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++)
{
if (Stream_GetRemainingLength(stream) < sizeof(UINT32))
goto fail;
Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */
if (!zgfx_decompress_segment(zgfx, stream, segmentSize))
goto fail;
if (zgfx->OutputCount > UINT32_MAX - used)
goto fail;
if (used + zgfx->OutputCount > uncompressedSize)
goto fail;
CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount);
pConcatenated += zgfx->OutputCount;
used += zgfx->OutputCount;
}
}
else
{
goto fail;
}
status = 1;
fail:
Stream_Free(stream, FALSE);
return status;
}
static BOOL zgfx_compress_segment(ZGFX_CONTEXT* zgfx, wStream* s, const BYTE* pSrcData,
UINT32 SrcSize, UINT32* pFlags)
{
/* FIXME: Currently compression not implemented. Just copy the raw source */
if (!Stream_EnsureRemainingCapacity(s, SrcSize + 1))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return FALSE;
}
(*pFlags) |= ZGFX_PACKET_COMPR_TYPE_RDP8; /* RDP 8.0 compression format */
Stream_Write_UINT8(s, (*pFlags)); /* header (1 byte) */
Stream_Write(s, pSrcData, SrcSize);
return TRUE;
}
int zgfx_compress_to_stream(ZGFX_CONTEXT* zgfx, wStream* sDst, const BYTE* pUncompressed,
UINT32 uncompressedSize, UINT32* pFlags)
{
int fragment;
UINT16 maxLength;
UINT32 totalLength;
size_t posSegmentCount = 0;
const BYTE* pSrcData;
int status = 0;
maxLength = ZGFX_SEGMENTED_MAXSIZE;
totalLength = uncompressedSize;
pSrcData = pUncompressed;
for (fragment = 0; (totalLength > 0) || (fragment == 0); fragment++)
{
UINT32 SrcSize;
size_t posDstSize;
size_t posDataStart;
UINT32 DstSize;
SrcSize = (totalLength > maxLength) ? maxLength : totalLength;
posDstSize = 0;
totalLength -= SrcSize;
/* Ensure we have enough space for headers */
if (!Stream_EnsureRemainingCapacity(sDst, 12))
{
WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!");
return -1;
}
if (fragment == 0)
{
/* First fragment */
/* descriptor (1 byte) */
Stream_Write_UINT8(sDst, (totalLength == 0) ?
ZGFX_SEGMENTED_SINGLE : ZGFX_SEGMENTED_MULTIPART);
if (totalLength > 0)
{
posSegmentCount = Stream_GetPosition(sDst); /* segmentCount (2 bytes) */
Stream_Seek(sDst, 2);
Stream_Write_UINT32(sDst, uncompressedSize); /* uncompressedSize (4 bytes) */
}
}
if (fragment > 0 || totalLength > 0)
{
/* Multipart */
posDstSize = Stream_GetPosition(sDst); /* size (4 bytes) */
Stream_Seek(sDst, 4);
}
posDataStart = Stream_GetPosition(sDst);
if (!zgfx_compress_segment(zgfx, sDst, pSrcData, SrcSize, pFlags))
return -1;
if (posDstSize)
{
/* Fill segment data size */
DstSize = Stream_GetPosition(sDst) - posDataStart;
Stream_SetPosition(sDst, posDstSize);
Stream_Write_UINT32(sDst, DstSize);
Stream_SetPosition(sDst, posDataStart + DstSize);
}
pSrcData += SrcSize;
}
Stream_SealLength(sDst);
/* fill back segmentCount */
if (posSegmentCount)
{
Stream_SetPosition(sDst, posSegmentCount);
Stream_Write_UINT16(sDst, fragment);
Stream_SetPosition(sDst, Stream_Length(sDst));
}
return status;
}
int zgfx_compress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData,
UINT32* pDstSize, UINT32* pFlags)
{
int status;
wStream* s = Stream_New(NULL, SrcSize);
status = zgfx_compress_to_stream(zgfx, s, pSrcData, SrcSize, pFlags);
(*ppDstData) = Stream_Buffer(s);
(*pDstSize) = Stream_GetPosition(s);
Stream_Free(s, FALSE);
return status;
}
void zgfx_context_reset(ZGFX_CONTEXT* zgfx, BOOL flush)
{
zgfx->HistoryIndex = 0;
}
ZGFX_CONTEXT* zgfx_context_new(BOOL Compressor)
{
ZGFX_CONTEXT* zgfx;
zgfx = (ZGFX_CONTEXT*) calloc(1, sizeof(ZGFX_CONTEXT));
if (zgfx)
{
zgfx->Compressor = Compressor;
zgfx->HistoryBufferSize = sizeof(zgfx->HistoryBuffer);
zgfx_context_reset(zgfx, FALSE);
}
return zgfx;
}
void zgfx_context_free(ZGFX_CONTEXT* zgfx)
{
free(zgfx);
}
|
628764.c | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* Tcl extensions by Ingo Wilken <[email protected]>
* Last modification: Wed May 10 21:28:44 CEST 2000
* Requires Tcl 8.0 or higher.
*
* Variables:
* ::vim::current(buffer) # Name of buffer command for current buffer.
* ::vim::current(window) # Name of window command for current window.
* ::vim::range(start) # Start of current range (line number).
* ::vim::range(end) # End of current range (line number).
* ::vim::lbase # Start of line/column numbers (1 or 0).
*
* Commands:
* ::vim::command {cmd} # Execute ex command {cmd}.
* ::vim::option {opt} [val] # Get/Set option {opt}.
* ::vim::expr {expr} # Evaluate {expr} using vim's evaluator.
* ::vim::beep # Guess.
*
* set buf [::vim::buffer {n}] # Create Tcl command for buffer N.
* set bl [::vim::buffer list] # Get list of Tcl commands of all buffers.
* ::vim::buffer exists {n} # True if buffer {n} exists.
*
* set wl [::vim::window list] # Get list of Tcl commands of all windows.
*
* set n [$win height] # Report window height.
* $win height {n} # Set window height to {n}.
* array set pos [$win cursor] # Get cursor position.
* $win cursor {row} {col} # Set cursor position.
* $win cursor pos # Set cursor position from array var "pos"
* $win delcmd {cmd} # Register callback command for closed window.
* $win option {opt} [val] # Get/Set vim option in context of $win.
* $win command {cmd} # Execute ex command in context of $win.
* $win expr {expr} # Evaluate vim expression in context of $win.
* set buf [$win buffer] # Create Tcl command for window's buffer.
*
* $buf name # Reports file name in buffer.
* $buf number # Reports buffer number.
* set l [$buf get {n}] # Get buffer line {n} as a string.
* set L [$buf get {n} {m}] # Get lines {n} through {m} as a list.
* $buf count # Reports number of lines in buffer.
* $buf last # Reports number of last line in buffer.
* $buf delete {n} # Delete line {n}.
* $buf delete {n} {m} # Delete lines {n} through {m}.
* $buf set {n} {l} # Set line {n} to string {l}.
* $buf set {n} {m} {L} # Set lines {n} through {m} from list {L}.
* # Delete/inserts lines as appropriate.
* $buf option {opt} [val] # Get/Set vim option in context of $buf.
* $buf command {cmd} # Execute ex command in context of $buf
* $buf expr {cmd} # Evaluate vim expression in context of $buf.
* array set pos [$buf mark {m}] # Get position of mark.
* $buf append {n} {str} # Append string {str} to buffer,after line {n}.
* $buf insert {n} {str} # Insert string {str} in buffer as line {n}.
* $buf delcmd {cmd} # Register callback command for deleted buffer.
* set wl [$buf windows] # Get list of Tcl commands for all windows of
* # this buffer.
TODO:
* ::vim::buffer new # create new buffer + Tcl command
*/
#include "vim.h"
#undef EXTERN /* tcl.h defines it too */
#ifdef DYNAMIC_TCL
# define USE_TCL_STUBS /* use tcl's stubs mechanism */
#endif
#include <tcl.h>
#include <string.h>
typedef struct
{
Tcl_Interp *interp;
int exitvalue;
int range_start, range_end;
int lbase;
char *curbuf, *curwin;
} tcl_info;
static tcl_info tclinfo = { NULL, 0, 0, 0, 0, NULL, NULL };
#define VAR_RANGE1 "::vim::range(start)"
#define VAR_RANGE2 "::vim::range(begin)"
#define VAR_RANGE3 "::vim::range(end)"
#define VAR_CURBUF "::vim::current(buffer)"
#define VAR_CURWIN "::vim::current(window)"
#define VAR_LBASE "::vim::lbase"
#define VAR_CURLINE "line"
#define VAR_CURLNUM "lnum"
#define VARNAME_SIZE 64
#define row2tcl(x) ((x) - (tclinfo.lbase==0))
#define row2vim(x) ((x) + (tclinfo.lbase==0))
#define col2tcl(x) ((x) + (tclinfo.lbase!=0))
#define col2vim(x) ((x) - (tclinfo.lbase!=0))
#define VIMOUT ((ClientData)1)
#define VIMERR ((ClientData)2)
/* This appears to be new in Tcl 8.4. */
#ifndef CONST84
# define CONST84
#endif
/*
* List of Tcl interpreters who reference a vim window or buffer.
* Each buffer and window has it's own list in the w_tcl_ref or b_tcl_ref
* struct member. We need this because Tcl can create sub-interpreters with
* the "interp" command, and each interpreter can reference all windows and
* buffers.
*/
struct ref
{
struct ref *next;
Tcl_Interp *interp;
Tcl_Command cmd; /* Tcl command that represents this object */
Tcl_Obj *delcmd; /* Tcl command to call when object is being del. */
void *vimobj; /* Vim window or buffer (win_T* or buf_T*) */
};
static char * tclgetbuffer _ANSI_ARGS_((Tcl_Interp *interp, buf_T *buf));
static char * tclgetwindow _ANSI_ARGS_((Tcl_Interp *interp, win_T *win));
static int tclsetdelcmd _ANSI_ARGS_((Tcl_Interp *interp, struct ref *reflist, void *vimobj, Tcl_Obj *delcmd));
static int tclgetlinenum _ANSI_ARGS_ ((Tcl_Interp *interp, Tcl_Obj *obj, int *valueP, buf_T *buf));
static win_T *tclfindwin _ANSI_ARGS_ ((buf_T *buf));
static int tcldoexcommand _ANSI_ARGS_ ((Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[], int objn));
static int tclsetoption _ANSI_ARGS_ ((Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[], int objn));
static int tclvimexpr _ANSI_ARGS_ ((Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[], int objn));
static void tcldelthisinterp _ANSI_ARGS_ ((void));
static int vimerror _ANSI_ARGS_((Tcl_Interp *interp));
static void tclmsg _ANSI_ARGS_((char *text));
static void tclerrmsg _ANSI_ARGS_((char *text));
static void tclupdatevars _ANSI_ARGS_((void));
static struct ref refsdeleted; /* dummy object for deleted ref list */
/*****************************************************************************
* TCL interface manager
****************************************************************************/
#if defined(DYNAMIC_TCL) || defined(PROTO)
# ifndef DYNAMIC_TCL_DLL
# define DYNAMIC_TCL_DLL "tcl83.dll"
# endif
# ifndef DYNAMIC_TCL_VER
# define DYNAMIC_TCL_VER "8.3"
# endif
# ifndef DYNAMIC_TCL /* Just generating prototypes */
typedef int HANDLE;
# endif
# ifndef WIN3264
# include <dlfcn.h>
# define HANDLE void*
# define TCL_PROC void*
# define load_dll(n) dlopen((n), RTLD_LAZY|RTLD_GLOBAL)
# define symbol_from_dll dlsym
# define close_dll dlclose
# else
# define TCL_PROC FARPROC
# define load_dll vimLoadLib
# define symbol_from_dll GetProcAddress
# define close_dll FreeLibrary
# endif
/*
* Declare HANDLE for tcl.dll and function pointers.
*/
static HANDLE hTclLib = NULL;
Tcl_Interp* (*dll_Tcl_CreateInterp)();
void (*dll_Tcl_FindExecutable)(const void *);
/*
* Table of name to function pointer of tcl.
*/
static struct {
char* name;
TCL_PROC* ptr;
} tcl_funcname_table[] = {
{"Tcl_CreateInterp", (TCL_PROC*)&dll_Tcl_CreateInterp},
{"Tcl_FindExecutable", (TCL_PROC*)&dll_Tcl_FindExecutable},
{NULL, NULL},
};
/*
* Make all runtime-links of tcl.
*
* 1. Get module handle using LoadLibraryEx.
* 2. Get pointer to tcl function by GetProcAddress.
* 3. Repeat 2, until get all functions will be used.
*
* Parameter 'libname' provides name of DLL.
* Return OK or FAIL.
*/
static int
tcl_runtime_link_init(char *libname, int verbose)
{
int i;
if (hTclLib)
return OK;
if (!(hTclLib = load_dll(libname)))
{
if (verbose)
EMSG2(_(e_loadlib), libname);
return FAIL;
}
for (i = 0; tcl_funcname_table[i].ptr; ++i)
{
if (!(*tcl_funcname_table[i].ptr = symbol_from_dll(hTclLib,
tcl_funcname_table[i].name)))
{
close_dll(hTclLib);
hTclLib = NULL;
if (verbose)
EMSG2(_(e_loadfunc), tcl_funcname_table[i].name);
return FAIL;
}
}
return OK;
}
#endif /* defined(DYNAMIC_TCL) || defined(PROTO) */
#ifdef DYNAMIC_TCL
static char *find_executable_arg = NULL;
#endif
void
vim_tcl_init(char *arg)
{
#ifndef DYNAMIC_TCL
Tcl_FindExecutable(arg);
#else
find_executable_arg = arg;
#endif
}
#if defined(DYNAMIC_TCL) || defined(PROTO)
static int stubs_initialized = FALSE;
/*
* Return TRUE if the TCL interface can be used.
*/
int
tcl_enabled(int verbose)
{
if (!stubs_initialized && find_executable_arg != NULL
&& tcl_runtime_link_init((char *)p_tcldll, verbose) == OK)
{
Tcl_Interp *interp;
dll_Tcl_FindExecutable(find_executable_arg);
if ((interp = dll_Tcl_CreateInterp()) != NULL)
{
if (Tcl_InitStubs(interp, DYNAMIC_TCL_VER, 0))
{
Tcl_DeleteInterp(interp);
stubs_initialized = TRUE;
}
/* FIXME: When Tcl_InitStubs() was failed, how delete interp? */
}
}
return stubs_initialized;
}
#endif
void
tcl_end(void)
{
#ifdef DYNAMIC_TCL
if (hTclLib)
{
close_dll(hTclLib);
hTclLib = NULL;
}
#endif
}
/****************************************************************************
Tcl commands
****************************************************************************/
/*
* Replace standard "exit" command.
*
* Delete the Tcl interpreter; a new one will be created with the next
* :tcl command). The exit code is saved (and retrieved in tclexit()).
* Since Tcl's exit is never expected to return and this replacement
* does, then (except for a trivial case) additional Tcl commands will
* be run. Since the interpreter is now marked as deleted, an error
* will be returned -- typically "attempt to call eval in deleted
* interpreter". Hopefully, at this point, checks for TCL_ERROR take
* place and control percolates back up to Vim -- but with this new error
* string in the interpreter's result value. Therefore it would be
* useless for this routine to return the exit code via Tcl_SetResult().
*/
static int
exitcmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
int value = 0;
switch (objc)
{
case 2:
if (Tcl_GetIntFromObj(interp, objv[1], &value) != TCL_OK)
break;
/* FALLTHROUGH */
case 1:
tclinfo.exitvalue = value;
Tcl_DeleteInterp(interp);
break;
default:
Tcl_WrongNumArgs(interp, 1, objv, "?returnCode?");
}
return TCL_ERROR;
}
/*
* "::vim::beep" - what Vi[m] does best :-)
*/
static int
beepcmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
if (objc != 1)
{
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
vim_beep(BO_LANG);
return TCL_OK;
}
/*
* "::vim::buffer list" - create a list of buffer commands.
* "::vim::buffer {N}" - create buffer command for buffer N.
* "::vim::buffer exists {N}" - test if buffer N exists.
* "::vim::buffer new" - create a new buffer (not implemented)
*/
static int
buffercmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
char *name;
buf_T *buf;
Tcl_Obj *resobj;
int err, n, idx;
enum {BCMD_EXISTS, BCMD_LIST};
static CONST84 char *bcmdoptions[] =
{
"exists", "list", (char *)0
};
if (objc < 2)
{
Tcl_WrongNumArgs(interp, 1, objv, "option");
return TCL_ERROR;
}
err = Tcl_GetIntFromObj(interp, objv[1], &n);
if (err == TCL_OK)
{
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 1, objv, "bufNumber");
return TCL_ERROR;
}
FOR_ALL_BUFFERS(buf)
{
if (buf->b_fnum == n)
{
name = tclgetbuffer(interp, buf);
if (name == NULL)
return TCL_ERROR;
Tcl_SetResult(interp, name, TCL_VOLATILE);
return TCL_OK;
}
}
Tcl_SetResult(interp, _("invalid buffer number"), TCL_STATIC);
return TCL_ERROR;
}
Tcl_ResetResult(interp); /* clear error from Tcl_GetIntFromObj */
err = Tcl_GetIndexFromObj(interp, objv[1], bcmdoptions, "option", 0, &idx);
if (err != TCL_OK)
return err;
switch (idx)
{
case BCMD_LIST:
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, "");
err = TCL_ERROR;
break;
}
FOR_ALL_BUFFERS(buf)
{
name = tclgetbuffer(interp, buf);
if (name == NULL)
{
err = TCL_ERROR;
break;
}
Tcl_AppendElement(interp, name);
}
break;
case BCMD_EXISTS:
if (objc != 3)
{
Tcl_WrongNumArgs(interp, 2, objv, "bufNumber");
err = TCL_ERROR;
break;
}
err = Tcl_GetIntFromObj(interp, objv[2], &n);
if (err == TCL_OK)
{
buf = buflist_findnr(n);
resobj = Tcl_NewIntObj(buf != NULL);
Tcl_SetObjResult(interp, resobj);
}
break;
default:
Tcl_SetResult(interp, _("not implemented yet"), TCL_STATIC);
err = TCL_ERROR;
}
return err;
}
/*
* "::vim::window list" - create list of window commands.
*/
static int
windowcmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
char *what, *string;
win_T *win;
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 1, objv, "option");
return TCL_ERROR;
}
what = Tcl_GetStringFromObj(objv[1], NULL);
if (strcmp(what, "list") == 0)
{
FOR_ALL_WINDOWS(win)
{
string = tclgetwindow(interp, win);
if (string == NULL)
return TCL_ERROR;
Tcl_AppendElement(interp, string);
}
return TCL_OK;
}
Tcl_SetResult(interp, _("unknown option"), TCL_STATIC);
return TCL_ERROR;
}
/*
* flags for bufselfcmd and winselfcmd to indicate outstanding actions.
*/
#define FL_UPDATE_SCREEN (1<<0)
#define FL_UPDATE_CURBUF (1<<1)
#define FL_ADJUST_CURSOR (1<<2)
/*
* This function implements the buffer commands.
*/
static int
bufselfcmd(
ClientData ref,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
int opt, err, idx, flags;
int val1, val2, n, i;
buf_T *buf, *savebuf;
win_T *win, *savewin;
Tcl_Obj *resobj;
pos_T *pos;
char *line;
enum
{
BUF_APPEND, BUF_COMMAND, BUF_COUNT, BUF_DELCMD, BUF_DELETE, BUF_EXPR,
BUF_GET, BUF_INSERT, BUF_LAST, BUF_MARK, BUF_NAME, BUF_NUMBER,
BUF_OPTION, BUF_SET, BUF_WINDOWS
};
static CONST84 char *bufoptions[] =
{
"append", "command", "count", "delcmd", "delete", "expr",
"get", "insert", "last", "mark", "name", "number",
"option", "set", "windows", (char *)0
};
if (objc < 2)
{
Tcl_WrongNumArgs(interp, 1, objv, "option ?arg ...?");
return TCL_ERROR;
}
err = Tcl_GetIndexFromObj(interp, objv[1], bufoptions, "option", 0, &idx);
if (err != TCL_OK)
return err;
buf = (buf_T *)((struct ref *)ref)->vimobj;
savebuf = curbuf; curbuf = buf;
savewin = curwin; curwin = tclfindwin(buf);
flags = 0;
opt = 0;
switch (idx)
{
case BUF_COMMAND:
err = tcldoexcommand(interp, objc, objv, 2);
flags |= FL_UPDATE_SCREEN;
break;
case BUF_OPTION:
err = tclsetoption(interp, objc, objv, 2);
flags |= FL_UPDATE_SCREEN;
break;
case BUF_EXPR:
err = tclvimexpr(interp, objc, objv, 2);
break;
case BUF_NAME:
/*
* Get filename of buffer.
*/
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, NULL);
err = TCL_ERROR;
break;
}
if (buf->b_ffname)
Tcl_SetResult(interp, (char *)buf->b_ffname, TCL_VOLATILE);
else
Tcl_SetResult(interp, "", TCL_STATIC);
break;
case BUF_LAST:
/*
* Get line number of last line.
*/
opt = 1;
/* fallthrough */
case BUF_COUNT:
/*
* Get number of lines in buffer.
*/
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, NULL);
err = TCL_ERROR;
break;
}
val1 = (int)buf->b_ml.ml_line_count;
if (opt)
val1 = row2tcl(val1);
resobj = Tcl_NewIntObj(val1);
Tcl_SetObjResult(interp, resobj);
break;
case BUF_NUMBER:
/*
* Get buffer's number.
*/
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, NULL);
err = TCL_ERROR;
break;
}
resobj = Tcl_NewIntObj((int)buf->b_fnum);
Tcl_SetObjResult(interp, resobj);
break;
case BUF_GET:
if (objc != 3 && objc != 4)
{
Tcl_WrongNumArgs(interp, 2, objv, "lineNumber ?lineNumber?");
err = TCL_ERROR;
break;
}
err = tclgetlinenum(interp, objv[2], &val1, buf);
if (err != TCL_OK)
break;
if (objc == 4)
{
err = tclgetlinenum(interp, objv[3], &val2, buf);
if (err != TCL_OK)
break;
if (val1 > val2)
{
n = val1; val1 = val2; val2 = n;
}
Tcl_ResetResult(interp);
for (n = val1; n <= val2 && err == TCL_OK; n++)
{
line = (char *)ml_get_buf(buf, (linenr_T)n, FALSE);
if (line)
Tcl_AppendElement(interp, line);
else
err = TCL_ERROR;
}
}
else { /* objc == 3 */
line = (char *)ml_get_buf(buf, (linenr_T)val1, FALSE);
Tcl_SetResult(interp, line, TCL_VOLATILE);
}
break;
case BUF_SET:
if (objc != 4 && objc != 5)
{
Tcl_WrongNumArgs(interp, 3, objv, "lineNumber ?lineNumber? stringOrList");
err = TCL_ERROR;
break;
}
err = tclgetlinenum(interp, objv[2], &val1, buf);
if (err != TCL_OK)
return TCL_ERROR;
if (objc == 4)
{
/*
* Replace one line with a string.
* $buf set {n} {string}
*/
line = Tcl_GetStringFromObj(objv[3], NULL);
if (u_savesub((linenr_T)val1) != OK)
{
Tcl_SetResult(interp, _("cannot save undo information"), TCL_STATIC);
err = TCL_ERROR;
}
else
if (ml_replace((linenr_T)val1, (char_u *)line, TRUE) != OK)
{
Tcl_SetResult(interp, _("cannot replace line"), TCL_STATIC);
err = TCL_ERROR;
}
else
{
changed_bytes((linenr_T)val1, 0);
flags |= FL_UPDATE_CURBUF;
}
break;
}
else
{
/*
* Replace several lines with the elements of a Tcl list.
* $buf set {n} {m} {list}
* If the list contains more than {m}-{n}+1 elements, they
* are * inserted after line {m}. If the list contains fewer
* elements, * the lines from {n}+length({list}) through {m}
* are deleted.
*/
int lc;
Tcl_Obj **lv;
err = tclgetlinenum(interp, objv[3], &val2, buf);
if (err != TCL_OK)
break;
err = Tcl_ListObjGetElements(interp, objv[4], &lc, &lv);
if (err != TCL_OK)
break;
if (val1 > val2)
{
n = val1;
val1 = val2;
val2 = n;
}
n = val1;
if (u_save((linenr_T)(val1 - 1), (linenr_T)(val2 + 1)) != OK)
{
Tcl_SetResult(interp, _("cannot save undo information"),
TCL_STATIC);
err = TCL_ERROR;
break;
}
flags |= FL_UPDATE_CURBUF;
for (i = 0; i < lc && n <= val2; i++)
{
line = Tcl_GetStringFromObj(lv[i], NULL);
if (ml_replace((linenr_T)n, (char_u *)line, TRUE) != OK)
goto setListError;
++n;
}
if (i < lc)
{
/* append lines */
do
{
line = Tcl_GetStringFromObj(lv[i], NULL);
if (ml_append((linenr_T)(n - 1),
(char_u *)line, 0, FALSE) != OK)
goto setListError;
++n;
++i;
} while (i < lc);
}
else if (n <= val2)
{
/* did not replace all lines, delete */
i = n;
do
{
if (ml_delete((linenr_T)i, FALSE) != OK)
goto setListError;
++n;
} while (n <= val2);
}
lc -= val2 - val1 + 1; /* number of lines to be replaced */
mark_adjust((linenr_T)val1, (linenr_T)val2, (long)MAXLNUM,
(long)lc);
changed_lines((linenr_T)val1, 0, (linenr_T)val2 + 1, (long)lc);
break;
setListError:
u_undo(1); /* ??? */
Tcl_SetResult(interp, _("cannot set line(s)"), TCL_STATIC);
err = TCL_ERROR;
}
break;
case BUF_DELETE:
if (objc != 3 && objc != 4)
{
Tcl_WrongNumArgs(interp, 3, objv, "lineNumber ?lineNumber?");
err = TCL_ERROR;
break;
}
err = tclgetlinenum(interp, objv[2], &val1, buf);
if (err != TCL_OK)
break;
val2 = val1;
if (objc == 4)
{
err = tclgetlinenum(interp, objv[3], &val2, buf);
if (err != TCL_OK)
return err;
if (val1 > val2)
{
i = val1; val1 = val2; val2 = i;
}
}
n = val2 - val1 + 1;
if (u_savedel((linenr_T)val1, (long)n) != OK)
{
Tcl_SetResult(interp, _("cannot save undo information"),
TCL_STATIC);
err = TCL_ERROR;
break;
}
for (i = 0; i < n; i++)
{
ml_delete((linenr_T)val1, FALSE);
err = vimerror(interp);
if (err != TCL_OK)
break;
}
if (i > 0)
deleted_lines_mark((linenr_T)val1, (long)i);
flags |= FL_ADJUST_CURSOR|FL_UPDATE_SCREEN;
break;
case BUF_MARK:
if (objc != 3)
{
Tcl_WrongNumArgs(interp, 2, objv, "markName");
err = TCL_ERROR;
break;
}
line = Tcl_GetStringFromObj(objv[2], NULL);
pos = NULL;
if (line[0] != '\0' && line[1] == '\0')
{
pos = getmark(line[0], FALSE);
}
if (pos == NULL)
{
Tcl_SetResult(interp, _("invalid mark name"), TCL_STATIC);
err = TCL_ERROR;
break;
}
err = vimerror(interp);
if (err != TCL_OK)
break;
if (pos->lnum <= 0)
{
Tcl_SetResult(interp, _("mark not set"), TCL_STATIC);
err = TCL_ERROR;
}
else
{
char rbuf[64];
sprintf(rbuf, _("row %d column %d"),
(int)row2tcl(pos->lnum), (int)col2tcl(pos->col));
Tcl_SetResult(interp, rbuf, TCL_VOLATILE);
}
break;
case BUF_INSERT:
opt = 1;
/* fallthrough */
case BUF_APPEND:
if (objc != 4)
{
Tcl_WrongNumArgs(interp, 2, objv, "lineNum text");
err = TCL_ERROR;
break;
}
err = tclgetlinenum(interp, objv[2], &val1, buf);
if (err != TCL_OK)
break;
if (opt)
--val1;
if (u_save((linenr_T)val1, (linenr_T)(val1+1)) != OK)
{
Tcl_SetResult(interp, _("cannot save undo information"),
TCL_STATIC);
err = TCL_ERROR;
break;
}
line = Tcl_GetStringFromObj(objv[3], NULL);
if (ml_append((linenr_T)val1, (char_u *)line, 0, FALSE) != OK)
{
Tcl_SetResult(interp, _("cannot insert/append line"),
TCL_STATIC);
err = TCL_ERROR;
break;
}
appended_lines_mark((linenr_T)val1, 1L);
flags |= FL_UPDATE_SCREEN;
break;
case BUF_WINDOWS:
/*
* Return list of window commands.
*/
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, NULL);
err = TCL_ERROR;
break;
}
Tcl_ResetResult(interp);
FOR_ALL_WINDOWS(win)
{
if (win->w_buffer == buf)
{
line = tclgetwindow(interp, win);
if (line != NULL)
Tcl_AppendElement(interp, line);
else
{
err = TCL_ERROR;
break;
}
}
}
break;
case BUF_DELCMD:
/*
* Register deletion callback.
* TODO: Should be able to register multiple callbacks
*/
if (objc != 3)
{
Tcl_WrongNumArgs(interp, 2, objv, "command");
err = TCL_ERROR;
break;
}
err = tclsetdelcmd(interp, buf->b_tcl_ref, (void *)buf, objv[2]);
break;
default:
Tcl_SetResult(interp, _("not implemented yet"), TCL_STATIC);
err = TCL_ERROR;
}
if (flags & FL_UPDATE_CURBUF)
redraw_curbuf_later(NOT_VALID);
curbuf = savebuf;
curwin = savewin;
if (flags & FL_ADJUST_CURSOR)
check_cursor();
if (flags & (FL_UPDATE_SCREEN | FL_UPDATE_CURBUF))
update_screen(NOT_VALID);
return err;
}
/*
* This function implements the window commands.
*/
static int
winselfcmd(
ClientData ref,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
int err, idx, flags;
int val1, val2;
Tcl_Obj *resobj;
win_T *savewin, *win;
buf_T *savebuf;
char *str;
enum
{
WIN_BUFFER, WIN_COMMAND, WIN_CURSOR, WIN_DELCMD, WIN_EXPR,
WIN_HEIGHT, WIN_OPTION
};
static CONST84 char *winoptions[] =
{
"buffer", "command", "cursor", "delcmd", "expr",
"height", "option", (char *)0
};
if (objc < 2)
{
Tcl_WrongNumArgs(interp, 1, objv, "option ?arg ...?");
return TCL_ERROR;
}
err = Tcl_GetIndexFromObj(interp, objv[1], winoptions, "option", 0, &idx);
if (err != TCL_OK)
return TCL_ERROR;
win = (win_T *)((struct ref *)ref)->vimobj;
savewin = curwin; curwin = win;
savebuf = curbuf; curbuf = win->w_buffer;
flags = 0;
switch (idx)
{
case WIN_OPTION:
err = tclsetoption(interp, objc, objv, 2);
flags |= FL_UPDATE_SCREEN;
break;
case WIN_COMMAND:
err = tcldoexcommand(interp, objc, objv, 2);
flags |= FL_UPDATE_SCREEN;
break;
case WIN_EXPR:
err = tclvimexpr(interp, objc, objv, 2);
break;
case WIN_HEIGHT:
if (objc == 3)
{
err = Tcl_GetIntFromObj(interp, objv[2], &val1);
if (err != TCL_OK)
break;
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_setheight(val1);
err = vimerror(interp);
if (err != TCL_OK)
break;
}
else
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, "?value?");
err = TCL_ERROR;
break;
}
resobj = Tcl_NewIntObj((int)(win->w_height));
Tcl_SetObjResult(interp, resobj);
break;
case WIN_BUFFER:
if (objc != 2)
{
Tcl_WrongNumArgs(interp, 2, objv, NULL);
err = TCL_ERROR;
break;
}
str = tclgetbuffer(interp, win->w_buffer);
if (str)
Tcl_SetResult(interp, str, TCL_VOLATILE);
else
err = TCL_ERROR;
break;
case WIN_DELCMD:
if (objc != 3)
{
Tcl_WrongNumArgs(interp, 2, objv, "command");
err = TCL_ERROR;
break;
}
err = tclsetdelcmd(interp, win->w_tcl_ref, (void *)win, objv[2]);
break;
case WIN_CURSOR:
if (objc > 4)
{
Tcl_WrongNumArgs(interp, 2, objv, "?arg1 ?arg2??");
err = TCL_ERROR;
break;
}
if (objc == 2)
{
char buf[64];
sprintf(buf, _("row %d column %d"), (int)row2tcl(win->w_cursor.lnum), (int)col2tcl(win->w_cursor.col));
Tcl_SetResult(interp, buf, TCL_VOLATILE);
break;
}
else if (objc == 3)
{
Tcl_Obj *part, *var;
part = Tcl_NewStringObj("row", -1);
var = Tcl_ObjGetVar2(interp, objv[2], part, TCL_LEAVE_ERR_MSG);
if (var == NULL)
{
err = TCL_ERROR;
break;
}
err = tclgetlinenum(interp, var, &val1, win->w_buffer);
if (err != TCL_OK)
break;
part = Tcl_NewStringObj("column", -1);
var = Tcl_ObjGetVar2(interp, objv[2], part, TCL_LEAVE_ERR_MSG);
if (var == NULL)
{
err = TCL_ERROR;
break;
}
err = Tcl_GetIntFromObj(interp, var, &val2);
if (err != TCL_OK)
break;
}
else { /* objc == 4 */
err = tclgetlinenum(interp, objv[2], &val1, win->w_buffer);
if (err != TCL_OK)
break;
err = Tcl_GetIntFromObj(interp, objv[3], &val2);
if (err != TCL_OK)
break;
}
/* TODO: should check column */
win->w_cursor.lnum = val1;
win->w_cursor.col = col2vim(val2);
flags |= FL_UPDATE_SCREEN;
break;
default:
Tcl_SetResult(interp, _("not implemented yet"), TCL_STATIC);
break;
}
curwin = savewin;
curbuf = savebuf;
if (flags & FL_UPDATE_SCREEN)
update_screen(NOT_VALID);
return err;
}
static int
commandcmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
int err;
err = tcldoexcommand(interp, objc, objv, 1);
update_screen(VALID);
return err;
}
static int
optioncmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
int err;
err = tclsetoption(interp, objc, objv, 1);
update_screen(VALID);
return err;
}
static int
exprcmd(
ClientData dummy UNUSED,
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[])
{
return tclvimexpr(interp, objc, objv, 1);
}
/****************************************************************************
Support functions for Tcl commands
****************************************************************************/
/*
* Get a line number from 'obj' and convert it to vim's range.
*/
static int
tclgetlinenum(
Tcl_Interp *interp,
Tcl_Obj *obj,
int *valueP,
buf_T *buf)
{
int err, i;
enum { LN_BEGIN, LN_BOTTOM, LN_END, LN_FIRST, LN_LAST, LN_START, LN_TOP };
static CONST84 char *keyw[] =
{
"begin", "bottom", "end", "first", "last", "start", "top", (char *)0
};
err = Tcl_GetIndexFromObj(interp, obj, keyw, "", 0, &i);
if (err == TCL_OK)
{
switch (i)
{
case LN_BEGIN:
case LN_FIRST:
case LN_START:
case LN_TOP:
*valueP = 1;
break;
case LN_BOTTOM:
case LN_END:
case LN_LAST:
*valueP = buf->b_ml.ml_line_count;
break;
}
return TCL_OK;
}
Tcl_ResetResult(interp);
err = Tcl_GetIntFromObj(interp, obj, &i);
if (err != TCL_OK)
return err;
i = row2vim(i);
if (i < 1 || i > buf->b_ml.ml_line_count)
{
Tcl_SetResult(interp, _("line number out of range"), TCL_STATIC);
return TCL_ERROR;
}
*valueP = i;
return TCL_OK;
}
/*
* Find the first window in the window list that displays the buffer.
*/
static win_T *
tclfindwin(buf_T *buf)
{
win_T *win;
FOR_ALL_WINDOWS(win)
{
if (win->w_buffer == buf)
return win;
}
return curwin; /* keep current window context */
}
/*
* Do-it-all function for "::vim::command", "$buf command" and "$win command".
*/
static int
tcldoexcommand(
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[],
int objn)
{
tcl_info saveinfo;
int err, flag, nobjs;
char *arg;
nobjs = objc - objn;
if (nobjs < 1 || nobjs > 2)
{
Tcl_WrongNumArgs(interp, objn, objv, "?-quiet? exCommand");
return TCL_ERROR;
}
flag = 0;
if (nobjs == 2)
{
arg = Tcl_GetStringFromObj(objv[objn], NULL);
if (strcmp(arg, "-quiet") == 0)
flag = 1;
else
{
Tcl_ResetResult(interp);
Tcl_AppendResult(interp, _("unknown flag: "), arg, (char *)0);
return TCL_ERROR;
}
++objn;
}
memcpy(&saveinfo, &tclinfo, sizeof(tcl_info));
tclinfo.interp = NULL;
tclinfo.curwin = NULL;
tclinfo.curbuf = NULL;
arg = Tcl_GetStringFromObj(objv[objn], NULL);
if (flag)
++emsg_off;
do_cmdline_cmd((char_u *)arg);
if (flag)
--emsg_off;
err = vimerror(interp);
/* If the ex command created a new Tcl interpreter, remove it */
if (tclinfo.interp)
tcldelthisinterp();
memcpy(&tclinfo, &saveinfo, sizeof(tcl_info));
tclupdatevars();
return err;
}
/*
* Do-it-all function for "::vim::option", "$buf option" and "$win option".
*/
static int
tclsetoption(
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[],
int objn)
{
int err, nobjs, idx;
char_u *option;
int isnum;
long lval;
char_u *sval;
Tcl_Obj *resobj;
enum { OPT_OFF, OPT_ON, OPT_TOGGLE };
static CONST84 char *optkw[] = { "off", "on", "toggle", (char *)0 };
nobjs = objc - objn;
if (nobjs != 1 && nobjs != 2)
{
Tcl_WrongNumArgs(interp, objn, objv, "vimOption ?value?");
return TCL_ERROR;
}
option = (char_u *)Tcl_GetStringFromObj(objv[objn], NULL);
++objn;
isnum = get_option_value(option, &lval, &sval, 0);
err = TCL_OK;
switch (isnum)
{
case 0:
Tcl_SetResult(interp, (char *)sval, TCL_VOLATILE);
vim_free(sval);
break;
case 1:
resobj = Tcl_NewLongObj(lval);
Tcl_SetObjResult(interp, resobj);
break;
default:
Tcl_SetResult(interp, _("unknown vimOption"), TCL_STATIC);
return TCL_ERROR;
}
if (nobjs == 2)
{
if (isnum)
{
sval = NULL; /* avoid compiler warning */
err = Tcl_GetIndexFromObj(interp, objv[objn], optkw, "", 0, &idx);
if (err != TCL_OK)
{
Tcl_ResetResult(interp);
err = Tcl_GetLongFromObj(interp, objv[objn], &lval);
}
else
switch (idx)
{
case OPT_ON:
lval = 1;
break;
case OPT_OFF:
lval = 0;
break;
case OPT_TOGGLE:
lval = !lval;
break;
}
}
else
sval = (char_u *)Tcl_GetStringFromObj(objv[objn], NULL);
if (err == TCL_OK)
{
set_option_value(option, lval, sval, OPT_LOCAL);
err = vimerror(interp);
}
}
return err;
}
/*
* Do-it-all function for "::vim::expr", "$buf expr" and "$win expr".
*/
static int
tclvimexpr(
Tcl_Interp *interp,
int objc,
Tcl_Obj *CONST objv[],
int objn)
{
#ifdef FEAT_EVAL
char *expr, *str;
#endif
int err;
if (objc - objn != 1)
{
Tcl_WrongNumArgs(interp, objn, objv, "vimExpr");
return TCL_ERROR;
}
#ifdef FEAT_EVAL
expr = Tcl_GetStringFromObj(objv[objn], NULL);
str = (char *)eval_to_string((char_u *)expr, NULL, TRUE);
if (str == NULL)
Tcl_SetResult(interp, _("invalid expression"), TCL_STATIC);
else
Tcl_SetResult(interp, str, TCL_VOLATILE);
err = vimerror(interp);
#else
Tcl_SetResult(interp, _("expressions disabled at compile time"), TCL_STATIC);
err = TCL_ERROR;
#endif
return err;
}
/*
* Check for internal vim errors.
*/
static int
vimerror(Tcl_Interp *interp)
{
if (got_int)
{
Tcl_SetResult(interp, _("keyboard interrupt"), TCL_STATIC);
return TCL_ERROR;
}
else if (did_emsg)
{
Tcl_SetResult(interp, _("vim error"), TCL_STATIC);
return TCL_ERROR;
}
return TCL_OK;
}
/*
* Functions that handle the reference lists:
* delref() - callback for Tcl's DeleteCommand
* tclgetref() - find/create Tcl command for a win_T* or buf_T* object
* tclgetwindow() - window frontend for tclgetref()
* tclgetbuffer() - buffer frontend for tclgetref()
* tclsetdelcmd() - add Tcl callback command to a vim object
*/
static void
delref(ClientData cref)
{
struct ref *ref = (struct ref *)cref;
if (ref->delcmd)
{
Tcl_DecrRefCount(ref->delcmd);
ref->delcmd = NULL;
}
ref->interp = NULL;
}
static char *
tclgetref(
Tcl_Interp *interp,
void **refstartP, /* ptr to w_tcl_ref/b_tcl-ref member of
win_T/buf_T struct */
char *prefix, /* "win" or "buf" */
void *vimobj, /* win_T* or buf_T* */
Tcl_ObjCmdProc *proc) /* winselfcmd or bufselfcmd */
{
struct ref *ref, *unused = NULL;
static char name[VARNAME_SIZE];
Tcl_Command cmd;
ref = (struct ref *)(*refstartP);
if (ref == &refsdeleted)
{
Tcl_SetResult(interp, _("cannot create buffer/window command: object is being deleted"), TCL_STATIC);
return NULL;
}
while (ref != NULL)
{
if (ref->interp == interp)
break;
if (ref->interp == NULL)
unused = ref;
ref = ref->next;
}
if (ref)
vim_snprintf(name, sizeof(name), "::vim::%s",
Tcl_GetCommandName(interp, ref->cmd));
else
{
if (unused)
ref = unused;
else
{
ref = (struct ref *)Tcl_Alloc(sizeof(struct ref));
ref->interp = NULL;
ref->next = (struct ref *)(*refstartP);
(*refstartP) = (void *)ref;
}
/* This might break on some exotic systems... */
vim_snprintf(name, sizeof(name), "::vim::%s_%lx",
prefix, (unsigned long)vimobj);
cmd = Tcl_CreateObjCommand(interp, name, proc,
(ClientData)ref, (Tcl_CmdDeleteProc *)delref);
if (!cmd)
return NULL;
ref->interp = interp;
ref->cmd = cmd;
ref->delcmd = NULL;
ref->vimobj = vimobj;
}
return name;
}
static char *
tclgetwindow(Tcl_Interp *interp, win_T *win)
{
return tclgetref(interp, &(win->w_tcl_ref), "win", (void *)win, winselfcmd);
}
static char *
tclgetbuffer(Tcl_Interp *interp, buf_T *buf)
{
return tclgetref(interp, &(buf->b_tcl_ref), "buf", (void *)buf, bufselfcmd);
}
static int
tclsetdelcmd(
Tcl_Interp *interp,
struct ref *reflist,
void *vimobj,
Tcl_Obj *delcmd)
{
if (reflist == &refsdeleted)
{
Tcl_SetResult(interp, _("cannot register callback command: buffer/window is already being deleted"), TCL_STATIC);
return TCL_ERROR;
}
while (reflist != NULL)
{
if (reflist->interp == interp && reflist->vimobj == vimobj)
{
if (reflist->delcmd)
{
Tcl_DecrRefCount(reflist->delcmd);
}
Tcl_IncrRefCount(delcmd);
reflist->delcmd = delcmd;
return TCL_OK;
}
reflist = reflist->next;
}
/* This should never happen. Famous last word? */
EMSG(_("E280: TCL FATAL ERROR: reflist corrupt!? Please report this to [email protected]"));
Tcl_SetResult(interp, _("cannot register callback command: buffer/window reference not found"), TCL_STATIC);
return TCL_ERROR;
}
/*******************************************
I/O Channel
********************************************/
static int
tcl_channel_close(ClientData instance, Tcl_Interp *interp UNUSED)
{
int err = 0;
/* currently does nothing */
if (instance != VIMOUT && instance != VIMERR)
{
Tcl_SetErrno(EBADF);
err = EBADF;
}
return err;
}
static int
tcl_channel_input(
ClientData instance UNUSED,
char *buf UNUSED,
int bufsiz UNUSED,
int *errptr)
{
/* input is currently not supported */
Tcl_SetErrno(EINVAL);
if (errptr)
*errptr = EINVAL;
return -1;
}
static int
tcl_channel_output(
ClientData instance,
const char *buf,
int bufsiz,
int *errptr)
{
char_u *str;
int result;
/* The buffer is not guaranteed to be 0-terminated, and we don't if
* there is enough room to add a '\0'. So we have to create a copy
* of the buffer...
*/
str = vim_strnsave((char_u *)buf, bufsiz);
if (!str)
{
Tcl_SetErrno(ENOMEM);
if (errptr)
*errptr = ENOMEM;
return -1;
}
result = bufsiz;
if (instance == VIMOUT)
tclmsg((char *)str);
else
if (instance == VIMERR)
tclerrmsg((char *)str);
else
{
Tcl_SetErrno(EBADF);
if (errptr)
*errptr = EBADF;
result = -1;
}
vim_free(str);
return result;
}
static void
tcl_channel_watch(ClientData instance UNUSED, int mask UNUSED)
{
Tcl_SetErrno(EINVAL);
}
static int
tcl_channel_gethandle(
ClientData instance UNUSED,
int direction UNUSED,
ClientData *handleptr UNUSED)
{
Tcl_SetErrno(EINVAL);
return EINVAL;
}
static Tcl_ChannelType tcl_channel_type =
{
"vimmessage", /* typeName */
TCL_CHANNEL_VERSION_2, /* version */
tcl_channel_close, /* closeProc */
tcl_channel_input, /* inputProc */
tcl_channel_output, /* outputProc */
NULL, /* seekProc */
NULL, /* setOptionProc */
NULL, /* getOptionProc */
tcl_channel_watch, /* watchProc */
tcl_channel_gethandle, /* getHandleProc */
NULL, /* close2Proc */
NULL, /* blockModeProc */
#ifdef TCL_CHANNEL_VERSION_2
NULL, /* flushProc */
NULL, /* handlerProc */
#endif
/* The following should not be necessary since TCL_CHANNEL_VERSION_2 was
* set above */
#ifdef TCL_CHANNEL_VERSION_3
NULL, /* wideSeekProc */
#endif
#ifdef TCL_CHANNEL_VERSION_4
NULL, /* threadActionProc */
#endif
#ifdef TCL_CHANNEL_VERSION_5
NULL /* truncateProc */
#endif
};
/**********************************
Interface to vim
**********************************/
static void
tclupdatevars(void)
{
char varname[VARNAME_SIZE]; /* must be writeable */
char *name;
strcpy(varname, VAR_RANGE1);
Tcl_UpdateLinkedVar(tclinfo.interp, varname);
strcpy(varname, VAR_RANGE2);
Tcl_UpdateLinkedVar(tclinfo.interp, varname);
strcpy(varname, VAR_RANGE3);
Tcl_UpdateLinkedVar(tclinfo.interp, varname);
strcpy(varname, VAR_LBASE);
Tcl_UpdateLinkedVar(tclinfo.interp, varname);
name = tclgetbuffer(tclinfo.interp, curbuf);
strcpy(tclinfo.curbuf, name);
strcpy(varname, VAR_CURBUF);
Tcl_UpdateLinkedVar(tclinfo.interp, varname);
name = tclgetwindow(tclinfo.interp, curwin);
strcpy(tclinfo.curwin, name);
strcpy(varname, VAR_CURWIN);
Tcl_UpdateLinkedVar(tclinfo.interp, varname);
}
static int
tclinit(exarg_T *eap)
{
char varname[VARNAME_SIZE]; /* Tcl_LinkVar requires writeable varname */
char *name;
#ifdef DYNAMIC_TCL
if (!tcl_enabled(TRUE))
{
EMSG(_("E571: Sorry, this command is disabled: the Tcl library could not be loaded."));
return FAIL;
}
#endif
if (!tclinfo.interp)
{
Tcl_Interp *interp;
static Tcl_Channel ch1, ch2;
/* Create replacement channels for stdout and stderr; this has to be
* done each time an interpreter is created since the channels are closed
* when the interpreter is deleted */
ch1 = Tcl_CreateChannel(&tcl_channel_type, "vimout", VIMOUT, TCL_WRITABLE);
ch2 = Tcl_CreateChannel(&tcl_channel_type, "vimerr", VIMERR, TCL_WRITABLE);
Tcl_SetStdChannel(ch1, TCL_STDOUT);
Tcl_SetStdChannel(ch2, TCL_STDERR);
interp = Tcl_CreateInterp();
Tcl_Preserve(interp);
if (Tcl_Init(interp) == TCL_ERROR)
{
Tcl_Release(interp);
Tcl_DeleteInterp(interp);
return FAIL;
}
#if 0
/* VIM sure is interactive */
Tcl_SetVar(interp, "tcl_interactive", "1", TCL_GLOBAL_ONLY);
#endif
Tcl_SetChannelOption(interp, ch1, "-buffering", "line");
#ifdef WIN3264
Tcl_SetChannelOption(interp, ch1, "-translation", "lf");
#endif
Tcl_SetChannelOption(interp, ch2, "-buffering", "line");
#ifdef WIN3264
Tcl_SetChannelOption(interp, ch2, "-translation", "lf");
#endif
/* replace standard Tcl exit command */
Tcl_DeleteCommand(interp, "exit");
Tcl_CreateObjCommand(interp, "exit", exitcmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
/* new commands, in ::vim namespace */
Tcl_CreateObjCommand(interp, "::vim::buffer", buffercmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
Tcl_CreateObjCommand(interp, "::vim::window", windowcmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
Tcl_CreateObjCommand(interp, "::vim::command", commandcmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
Tcl_CreateObjCommand(interp, "::vim::beep", beepcmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
Tcl_CreateObjCommand(interp, "::vim::option", optioncmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
Tcl_CreateObjCommand(interp, "::vim::expr", exprcmd,
(ClientData)NULL, (Tcl_CmdDeleteProc *)NULL);
/* "lbase" variable */
tclinfo.lbase = 1;
strcpy(varname, VAR_LBASE);
Tcl_LinkVar(interp, varname, (char *)&tclinfo.lbase, TCL_LINK_INT);
/* "range" variable */
tclinfo.range_start = eap->line1;
strcpy(varname, VAR_RANGE1);
Tcl_LinkVar(interp, varname, (char *)&tclinfo.range_start, TCL_LINK_INT|TCL_LINK_READ_ONLY);
strcpy(varname, VAR_RANGE2);
Tcl_LinkVar(interp, varname, (char *)&tclinfo.range_start, TCL_LINK_INT|TCL_LINK_READ_ONLY);
tclinfo.range_end = eap->line2;
strcpy(varname, VAR_RANGE3);
Tcl_LinkVar(interp, varname, (char *)&tclinfo.range_end, TCL_LINK_INT|TCL_LINK_READ_ONLY);
/* "current" variable */
tclinfo.curbuf = Tcl_Alloc(VARNAME_SIZE);
tclinfo.curwin = Tcl_Alloc(VARNAME_SIZE);
name = tclgetbuffer(interp, curbuf);
strcpy(tclinfo.curbuf, name);
strcpy(varname, VAR_CURBUF);
Tcl_LinkVar(interp, varname, (char *)&tclinfo.curbuf, TCL_LINK_STRING|TCL_LINK_READ_ONLY);
name = tclgetwindow(interp, curwin);
strcpy(tclinfo.curwin, name);
strcpy(varname, VAR_CURWIN);
Tcl_LinkVar(interp, varname, (char *)&tclinfo.curwin, TCL_LINK_STRING|TCL_LINK_READ_ONLY);
tclinfo.interp = interp;
}
else
{
/* Interpreter already exists, just update variables */
tclinfo.range_start = row2tcl(eap->line1);
tclinfo.range_end = row2tcl(eap->line2);
tclupdatevars();
}
tclinfo.exitvalue = 0;
return OK;
}
static void
tclerrmsg(char *text)
{
char *next;
while ((next=strchr(text, '\n')))
{
*next++ = '\0';
EMSG(text);
text = next;
}
if (*text)
EMSG(text);
}
static void
tclmsg(char *text)
{
char *next;
while ((next=strchr(text, '\n')))
{
*next++ = '\0';
MSG(text);
text = next;
}
if (*text)
MSG(text);
}
static void
tcldelthisinterp(void)
{
if (!Tcl_InterpDeleted(tclinfo.interp))
Tcl_DeleteInterp(tclinfo.interp);
Tcl_Release(tclinfo.interp);
/* The interpreter is now gets deleted. All registered commands (esp.
* window and buffer commands) are deleted, triggering their deletion
* callback, which deletes all refs pointing to this interpreter.
* We could garbage-collect the unused ref structs in all windows and
* buffers, but unless the user creates hundreds of sub-interpreters
* all referring to lots of windows and buffers, this is hardly worth
* the effort. Unused refs are recycled by other interpreters, and
* all refs are free'd when the window/buffer gets closed by vim.
*/
tclinfo.interp = NULL;
Tcl_Free(tclinfo.curbuf);
Tcl_Free(tclinfo.curwin);
tclinfo.curbuf = tclinfo.curwin = NULL;
}
static int
tclexit(int error)
{
int newerr = OK;
if (Tcl_InterpDeleted(tclinfo.interp) /* True if we intercepted Tcl's exit command */
#if (TCL_MAJOR_VERSION == 8 && TCL_MINOR_VERSION >= 5) || TCL_MAJOR_VERSION > 8
|| Tcl_LimitExceeded(tclinfo.interp) /* True if the interpreter cannot continue */
#endif
)
{
char buf[50];
sprintf(buf, _("E572: exit code %d"), tclinfo.exitvalue);
tclerrmsg(buf);
if (tclinfo.exitvalue == 0)
{
did_emsg = 0;
newerr = OK;
}
else
newerr = FAIL;
tcldelthisinterp();
}
else
{
char *result;
result = (char *)Tcl_GetStringResult(tclinfo.interp);
if (error == TCL_OK)
{
tclmsg(result);
newerr = OK;
}
else
{
tclerrmsg(result);
newerr = FAIL;
}
}
return newerr;
}
/*
* ":tcl"
*/
void
ex_tcl(exarg_T *eap)
{
char_u *script;
int err;
script = script_get(eap, eap->arg);
if (!eap->skip)
{
err = tclinit(eap);
if (err == OK)
{
Tcl_AllowExceptions(tclinfo.interp);
if (script == NULL)
err = Tcl_Eval(tclinfo.interp, (char *)eap->arg);
else
err = Tcl_Eval(tclinfo.interp, (char *)script);
err = tclexit(err);
}
}
vim_free(script);
}
/*
* ":tclfile"
*/
void
ex_tclfile(exarg_T *eap)
{
char *file = (char *)eap->arg;
int err;
err = tclinit(eap);
if (err == OK)
{
Tcl_AllowExceptions(tclinfo.interp);
err = Tcl_EvalFile(tclinfo.interp, file);
err = tclexit(err);
}
}
/*
* ":tcldo"
*/
void
ex_tcldo(exarg_T *eap)
{
char *script, *line;
int err, rs, re, lnum;
char var_lnum[VARNAME_SIZE]; /* must be writeable memory */
char var_line[VARNAME_SIZE];
linenr_T first_line = 0;
linenr_T last_line = 0;
rs = eap->line1;
re = eap->line2;
script = (char *)eap->arg;
strcpy(var_lnum, VAR_CURLNUM);
strcpy(var_line, VAR_CURLINE);
err = tclinit(eap);
if (err != OK)
return;
lnum = row2tcl(rs);
Tcl_LinkVar(tclinfo.interp, var_lnum, (char *)&lnum, TCL_LINK_INT|TCL_LINK_READ_ONLY);
err = TCL_OK;
if (u_save((linenr_T)(rs-1), (linenr_T)(re+1)) != OK)
{
Tcl_SetResult(tclinfo.interp, _("cannot save undo information"), TCL_STATIC);
err = TCL_ERROR;
}
while (err == TCL_OK && rs <= re)
{
line = (char *)ml_get_buf(curbuf, (linenr_T)rs, FALSE);
if (!line)
{
Tcl_SetResult(tclinfo.interp, _("cannot get line"), TCL_STATIC);
err = TCL_ERROR;
break;
}
Tcl_SetVar(tclinfo.interp, var_line, line, 0);
Tcl_AllowExceptions(tclinfo.interp);
err = Tcl_Eval(tclinfo.interp, script);
if (err != TCL_OK
|| Tcl_InterpDeleted(tclinfo.interp)
#if (TCL_MAJOR_VERSION == 8 && TCL_MINOR_VERSION >= 5) || TCL_MAJOR_VERSION > 8
|| Tcl_LimitExceeded(tclinfo.interp)
#endif
)
break;
line = (char *)Tcl_GetVar(tclinfo.interp, var_line, 0);
if (line)
{
if (ml_replace((linenr_T)rs, (char_u *)line, TRUE) != OK)
{
Tcl_SetResult(tclinfo.interp, _("cannot replace line"), TCL_STATIC);
err = TCL_ERROR;
break;
}
if (first_line == 0)
first_line = rs;
last_line = rs;
}
++rs;
++lnum;
Tcl_UpdateLinkedVar(tclinfo.interp, var_lnum);
}
if (first_line)
changed_lines(first_line, 0, last_line + 1, (long)0);
Tcl_UnsetVar(tclinfo.interp, var_line, 0);
Tcl_UnlinkVar(tclinfo.interp, var_lnum);
if (err == TCL_OK)
Tcl_ResetResult(tclinfo.interp);
(void)tclexit(err);
}
static void
tcldelallrefs(struct ref *ref)
{
struct ref *next;
int err;
char *result;
#ifdef DYNAMIC_TCL
/* TODO: this code currently crashes Vim on exit */
if (exiting)
return;
#endif
while (ref != NULL)
{
next = ref->next;
if (ref->interp)
{
if (ref->delcmd)
{
err = Tcl_GlobalEvalObj(ref->interp, ref->delcmd);
if (err != TCL_OK)
{
result = (char *)Tcl_GetStringResult(ref->interp);
if (result)
tclerrmsg(result);
}
Tcl_DecrRefCount(ref->delcmd);
ref->delcmd = NULL;
}
Tcl_DeleteCommandFromToken(ref->interp, ref->cmd);
}
Tcl_Free((char *)ref);
ref = next;
}
}
void
tcl_buffer_free(buf_T *buf)
{
struct ref *reflist;
#ifdef DYNAMIC_TCL
if (!stubs_initialized) /* Not using Tcl, nothing to do. */
return;
#endif
reflist = (struct ref *)(buf->b_tcl_ref);
if (reflist != &refsdeleted)
{
buf->b_tcl_ref = (void *)&refsdeleted;
tcldelallrefs(reflist);
buf->b_tcl_ref = NULL;
}
}
#if defined(FEAT_WINDOWS) || defined(PROTO)
void
tcl_window_free(win_T *win)
{
struct ref *reflist;
#ifdef DYNAMIC_TCL
if (!stubs_initialized) /* Not using Tcl, nothing to do. */
return;
#endif
reflist = (struct ref*)(win->w_tcl_ref);
if (reflist != &refsdeleted)
{
win->w_tcl_ref = (void *)&refsdeleted;
tcldelallrefs(reflist);
win->w_tcl_ref = NULL;
}
}
#endif
/* The End */
|
706057.c | /***************************************************************************//**
* @file
* @brief Stub version of the Certificate Based Key Exchange library. See non-stub
* file for more information.
*******************************************************************************
* # License
* <b>Copyright 2018 Silicon Laboratories Inc. www.silabs.com</b>
*******************************************************************************
*
* The licensor of this software is Silicon Laboratories Inc. Your use of this
* software is governed by the terms of Silicon Labs Master Software License
* Agreement (MSLA) available at
* www.silabs.com/about-us/legal/master-software-license-agreement. This
* software is distributed to you in Source Code format and is governed by the
* sections of the MSLA applicable to Source Code.
*
******************************************************************************/
#include PLATFORM_HEADER
#include "stack/include/ember-types.h"
#include "stack/include/library.h"
#include "stack/include/ember-types-internal.h"
CryptoOperation emNextCryptoOperation = NULL_OPERATION;
uint8_t* partnerEuiBigEndian = NULL;
// Normally these booleans never change in the stub library. However for testing
// it is easier to use the stub version of the library, and allow them to be
// changed.
EMBER_TEST_EXTERNAL_CONST bool emKeysAuthorizedByDefault = true;
EMBER_TEST_EXTERNAL_CONST bool emAppKeyRequestsAreAllowed = true;
bool emUseStaticEmpheralKeys = false;
const EmberLibraryStatus emCbkeCoreLibraryStatus = EMBER_LIBRARY_IS_STUB;
//------------------------------------------------------------------------------
bool emIsCbkeEnabled(void)
{
return false;
}
bool emCbkeIsIdle(void)
{
return true;
}
void emCbkeTick(void)
{
}
void emPrepareForCbkeOperation(void)
{
}
void emPrepForEccOperation(uint8_t index, bool start)
{
(void)index;
(void)start;
}
void startupRadio(void)
{
}
int emWatchdogTickle(void)
{
return EMBER_ERR_FATAL;
}
bool emAreKeysAuthorizedByDefault(void)
{
// By default without the CBKE library we assume that all keys added
// (via emberSetInitialSecurityState() or emberAddOrUpdateKeyTableEntry())
// are authorized for APS data messages.
return emKeysAuthorizedByDefault; // true
}
bool emDoesSecurityPolicyAllowAppKeyRequests(EmberEUI64 partner1,
EmberEUI64 partner2)
{
(void)partner1;
(void)partner2;
return emAppKeyRequestsAreAllowed; // true
}
EmberStatus emValidatePartnerLinkKeyRequest(EmberEUI64 partner)
{
(void)partner;
return EMBER_LIBRARY_NOT_PRESENT;
}
int emRandomDataGenerator(void *buffer, uint32_t size)
{
(void)buffer;
(void)size;
return EMBER_ERR_FATAL;
}
int emHashFunction(uint8_t* digest, uint32_t size, uint8_t* data)
{
(void)digest;
(void)size;
(void)data;
return EMBER_ERR_FATAL;
}
void copyEui64BigEndian(bool mine, uint8_t* eui64)
{
(void)mine;
(void)eui64;
}
|
721711.c | /*
* Copyright (C) 2009 Martin Willi
* Copyright (C) 2008 Tobias Brunner
* Hochschule fuer Technik Rapperswil
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <openssl/opensslconf.h>
#ifndef OPENSSL_NO_RSA
#include "openssl_rsa_public_key.h"
#include "openssl_util.h"
#include <utils/debug.h>
#include <openssl/evp.h>
#include <openssl/rsa.h>
#include <openssl/x509.h>
typedef struct private_openssl_rsa_public_key_t private_openssl_rsa_public_key_t;
/**
* Private data structure with signing context.
*/
struct private_openssl_rsa_public_key_t {
/**
* Public interface for this signer.
*/
openssl_rsa_public_key_t public;
/**
* RSA object from OpenSSL
*/
RSA *rsa;
/**
* reference counter
*/
refcount_t ref;
};
/**
* Verification of an EMPSA PKCS1 signature described in PKCS#1
*/
static bool verify_emsa_pkcs1_signature(private_openssl_rsa_public_key_t *this,
int type, chunk_t data, chunk_t signature)
{
bool valid = FALSE;
int rsa_size = RSA_size(this->rsa);
/* OpenSSL expects a signature of exactly RSA size (no leading 0x00) */
if (signature.len > rsa_size)
{
signature = chunk_skip(signature, signature.len - rsa_size);
}
if (type == NID_undef)
{
char *buf;
int len;
buf = malloc(rsa_size);
len = RSA_public_decrypt(signature.len, signature.ptr, buf, this->rsa,
RSA_PKCS1_PADDING);
if (len != -1)
{
valid = chunk_equals(data, chunk_create(buf, len));
}
free(buf);
}
else
{
EVP_MD_CTX *ctx;
EVP_PKEY *key;
const EVP_MD *hasher;
hasher = EVP_get_digestbynid(type);
if (!hasher)
{
return FALSE;
}
ctx = EVP_MD_CTX_create();
key = EVP_PKEY_new();
if (!ctx || !key)
{
goto error;
}
if (!EVP_PKEY_set1_RSA(key, this->rsa))
{
goto error;
}
if (!EVP_VerifyInit_ex(ctx, hasher, NULL))
{
goto error;
}
if (!EVP_VerifyUpdate(ctx, data.ptr, data.len))
{
goto error;
}
valid = (EVP_VerifyFinal(ctx, signature.ptr, signature.len, key) == 1);
error:
if (key)
{
EVP_PKEY_free(key);
}
if (ctx)
{
EVP_MD_CTX_destroy(ctx);
}
}
return valid;
}
METHOD(public_key_t, get_type, key_type_t,
private_openssl_rsa_public_key_t *this)
{
return KEY_RSA;
}
METHOD(public_key_t, verify, bool,
private_openssl_rsa_public_key_t *this, signature_scheme_t scheme,
chunk_t data, chunk_t signature)
{
switch (scheme)
{
case SIGN_RSA_EMSA_PKCS1_NULL:
return verify_emsa_pkcs1_signature(this, NID_undef, data, signature);
case SIGN_RSA_EMSA_PKCS1_SHA1:
return verify_emsa_pkcs1_signature(this, NID_sha1, data, signature);
case SIGN_RSA_EMSA_PKCS1_SHA224:
return verify_emsa_pkcs1_signature(this, NID_sha224, data, signature);
case SIGN_RSA_EMSA_PKCS1_SHA256:
return verify_emsa_pkcs1_signature(this, NID_sha256, data, signature);
case SIGN_RSA_EMSA_PKCS1_SHA384:
return verify_emsa_pkcs1_signature(this, NID_sha384, data, signature);
case SIGN_RSA_EMSA_PKCS1_SHA512:
return verify_emsa_pkcs1_signature(this, NID_sha512, data, signature);
case SIGN_RSA_EMSA_PKCS1_MD5:
return verify_emsa_pkcs1_signature(this, NID_md5, data, signature);
default:
DBG1(DBG_LIB, "signature scheme %N not supported in RSA",
signature_scheme_names, scheme);
return FALSE;
}
}
METHOD(public_key_t, encrypt, bool,
private_openssl_rsa_public_key_t *this, encryption_scheme_t scheme,
chunk_t plain, chunk_t *crypto)
{
int padding, len;
char *encrypted;
switch (scheme)
{
case ENCRYPT_RSA_PKCS1:
padding = RSA_PKCS1_PADDING;
break;
case ENCRYPT_RSA_OAEP_SHA1:
padding = RSA_PKCS1_OAEP_PADDING;
break;
default:
DBG1(DBG_LIB, "decryption scheme %N not supported via openssl",
encryption_scheme_names, scheme);
return FALSE;
}
encrypted = malloc(RSA_size(this->rsa));
len = RSA_public_encrypt(plain.len, plain.ptr, encrypted,
this->rsa, padding);
if (len < 0)
{
DBG1(DBG_LIB, "RSA decryption failed");
free(encrypted);
return FALSE;
}
*crypto = chunk_create(encrypted, len);
return TRUE;
}
METHOD(public_key_t, get_keysize, int,
private_openssl_rsa_public_key_t *this)
{
return RSA_size(this->rsa) * 8;
}
/**
* Calculate fingerprint from a RSA key, also used in rsa private key.
*/
bool openssl_rsa_fingerprint(RSA *rsa, cred_encoding_type_t type, chunk_t *fp)
{
hasher_t *hasher;
chunk_t key;
u_char *p;
if (lib->encoding->get_cache(lib->encoding, type, rsa, fp))
{
return TRUE;
}
switch (type)
{
case KEYID_PUBKEY_SHA1:
key = chunk_alloc(i2d_RSAPublicKey(rsa, NULL));
p = key.ptr;
i2d_RSAPublicKey(rsa, &p);
break;
case KEYID_PUBKEY_INFO_SHA1:
key = chunk_alloc(i2d_RSA_PUBKEY(rsa, NULL));
p = key.ptr;
i2d_RSA_PUBKEY(rsa, &p);
break;
default:
{
chunk_t n = chunk_empty, e = chunk_empty;
bool success = FALSE;
if (openssl_bn2chunk(rsa->n, &n) &&
openssl_bn2chunk(rsa->e, &e))
{
success = lib->encoding->encode(lib->encoding, type, rsa, fp,
CRED_PART_RSA_MODULUS, n,
CRED_PART_RSA_PUB_EXP, e, CRED_PART_END);
}
chunk_free(&n);
chunk_free(&e);
return success;
}
}
hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
if (!hasher || !hasher->allocate_hash(hasher, key, fp))
{
DBG1(DBG_LIB, "SHA1 hash algorithm not supported, fingerprinting failed");
DESTROY_IF(hasher);
free(key.ptr);
return FALSE;
}
free(key.ptr);
hasher->destroy(hasher);
lib->encoding->cache(lib->encoding, type, rsa, *fp);
return TRUE;
}
METHOD(public_key_t, get_fingerprint, bool,
private_openssl_rsa_public_key_t *this, cred_encoding_type_t type,
chunk_t *fingerprint)
{
return openssl_rsa_fingerprint(this->rsa, type, fingerprint);
}
METHOD(public_key_t, get_encoding, bool,
private_openssl_rsa_public_key_t *this, cred_encoding_type_t type,
chunk_t *encoding)
{
bool success = FALSE;
u_char *p;
switch (type)
{
case PUBKEY_SPKI_ASN1_DER:
case PUBKEY_PEM:
{
*encoding = chunk_alloc(i2d_RSA_PUBKEY(this->rsa, NULL));
p = encoding->ptr;
i2d_RSA_PUBKEY(this->rsa, &p);
success = TRUE;
if (type == PUBKEY_PEM)
{
chunk_t asn1_encoding = *encoding;
success = lib->encoding->encode(lib->encoding, PUBKEY_PEM,
NULL, encoding, CRED_PART_RSA_PUB_ASN1_DER,
asn1_encoding, CRED_PART_END);
chunk_clear(&asn1_encoding);
}
return success;
}
case PUBKEY_ASN1_DER:
{
*encoding = chunk_alloc(i2d_RSAPublicKey(this->rsa, NULL));
p = encoding->ptr;
i2d_RSAPublicKey(this->rsa, &p);
return TRUE;
}
default:
{
chunk_t n = chunk_empty, e = chunk_empty;
if (openssl_bn2chunk(this->rsa->n, &n) &&
openssl_bn2chunk(this->rsa->e, &e))
{
success = lib->encoding->encode(lib->encoding, type, NULL,
encoding, CRED_PART_RSA_MODULUS, n,
CRED_PART_RSA_PUB_EXP, e, CRED_PART_END);
}
chunk_free(&n);
chunk_free(&e);
return success;
}
}
}
METHOD(public_key_t, get_ref, public_key_t*,
private_openssl_rsa_public_key_t *this)
{
ref_get(&this->ref);
return &this->public.key;
}
METHOD(public_key_t, destroy, void,
private_openssl_rsa_public_key_t *this)
{
if (ref_put(&this->ref))
{
if (this->rsa)
{
lib->encoding->clear_cache(lib->encoding, this->rsa);
RSA_free(this->rsa);
}
free(this);
}
}
/**
* Generic private constructor
*/
static private_openssl_rsa_public_key_t *create_empty()
{
private_openssl_rsa_public_key_t *this;
INIT(this,
.public = {
.key = {
.get_type = _get_type,
.verify = _verify,
.encrypt = _encrypt,
.equals = public_key_equals,
.get_keysize = _get_keysize,
.get_fingerprint = _get_fingerprint,
.has_fingerprint = public_key_has_fingerprint,
.get_encoding = _get_encoding,
.get_ref = _get_ref,
.destroy = _destroy,
},
},
.ref = 1,
);
return this;
}
/**
* See header.
*/
openssl_rsa_public_key_t *openssl_rsa_public_key_load(key_type_t type,
va_list args)
{
private_openssl_rsa_public_key_t *this;
chunk_t blob, n, e;
n = e = blob = chunk_empty;
while (TRUE)
{
switch (va_arg(args, builder_part_t))
{
case BUILD_BLOB_ASN1_DER:
blob = va_arg(args, chunk_t);
continue;
case BUILD_RSA_MODULUS:
n = va_arg(args, chunk_t);
continue;
case BUILD_RSA_PUB_EXP:
e = va_arg(args, chunk_t);
continue;
case BUILD_END:
break;
default:
return NULL;
}
break;
}
this = create_empty();
if (blob.ptr)
{
switch (type)
{
case KEY_ANY:
this->rsa = d2i_RSA_PUBKEY(NULL, (const u_char**)&blob.ptr,
blob.len);
break;
case KEY_RSA:
this->rsa = d2i_RSAPublicKey(NULL, (const u_char**)&blob.ptr,
blob.len);
break;
default:
break;
}
if (this->rsa)
{
return &this->public;
}
}
else if (n.ptr && e.ptr && type == KEY_RSA)
{
this->rsa = RSA_new();
this->rsa->n = BN_bin2bn((const u_char*)n.ptr, n.len, NULL);
this->rsa->e = BN_bin2bn((const u_char*)e.ptr, e.len, NULL);
return &this->public;
}
destroy(this);
return NULL;
}
#endif /* OPENSSL_NO_RSA */
|
16533.c | /*
* VirtioBus
*
* Copyright (C) 2012 : GreenSocs Ltd
* http://www.greensocs.com/ , email: [email protected]
*
* Developed by :
* Frederic Konrad <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "hw/hw.h"
#include "qemu/error-report.h"
#include "hw/qdev.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio.h"
/* #define DEBUG_VIRTIO_BUS */
#ifdef DEBUG_VIRTIO_BUS
#define DPRINTF(fmt, ...) \
do { printf("virtio_bus: " fmt , ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) do { } while (0)
#endif
/* A VirtIODevice is being plugged */
int virtio_bus_device_plugged(VirtIODevice *vdev)
{
DeviceState *qdev = DEVICE(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(qdev));
VirtioBusState *bus = VIRTIO_BUS(qbus);
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus);
DPRINTF("%s: plug device.\n", qbus->name);
if (klass->device_plugged != NULL) {
klass->device_plugged(qbus->parent);
}
return 0;
}
/* Reset the virtio_bus */
void virtio_bus_reset(VirtioBusState *bus)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
DPRINTF("%s: reset device.\n", qbus->name);
if (vdev != NULL) {
virtio_reset(vdev);
}
}
/* A VirtIODevice is being unplugged */
void virtio_bus_device_unplugged(VirtIODevice *vdev)
{
DeviceState *qdev = DEVICE(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(qdev));
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(qbus);
DPRINTF("%s: remove device.\n", qbus->name);
if (vdev != NULL) {
if (klass->device_unplugged != NULL) {
klass->device_unplugged(qbus->parent);
}
}
}
/* Get the device id of the plugged device. */
uint16_t virtio_bus_get_vdev_id(VirtioBusState *bus)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
assert(vdev != NULL);
return vdev->device_id;
}
/* Get the config_len field of the plugged device. */
size_t virtio_bus_get_vdev_config_len(VirtioBusState *bus)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
assert(vdev != NULL);
return vdev->config_len;
}
/* Get the features of the plugged device. */
uint32_t virtio_bus_get_vdev_features(VirtioBusState *bus,
uint32_t requested_features)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioDeviceClass *k;
assert(vdev != NULL);
k = VIRTIO_DEVICE_GET_CLASS(vdev);
assert(k->get_features != NULL);
return k->get_features(vdev, requested_features);
}
/* Set the features of the plugged device. */
void virtio_bus_set_vdev_features(VirtioBusState *bus,
uint32_t requested_features)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioDeviceClass *k;
assert(vdev != NULL);
k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->set_features != NULL) {
k->set_features(vdev, requested_features);
}
}
/* Get bad features of the plugged device. */
uint32_t virtio_bus_get_vdev_bad_features(VirtioBusState *bus)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioDeviceClass *k;
assert(vdev != NULL);
k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->bad_features != NULL) {
return k->bad_features(vdev);
} else {
return 0;
}
}
/* Get config of the plugged device. */
void virtio_bus_get_vdev_config(VirtioBusState *bus, uint8_t *config)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioDeviceClass *k;
assert(vdev != NULL);
k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->get_config != NULL) {
k->get_config(vdev, config);
}
}
/* Set config of the plugged device. */
void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioDeviceClass *k;
assert(vdev != NULL);
k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->set_config != NULL) {
k->set_config(vdev, config);
}
}
static char *virtio_bus_get_dev_path(DeviceState *dev)
{
BusState *bus = qdev_get_parent_bus(dev);
DeviceState *proxy = DEVICE(bus->parent);
return qdev_get_dev_path(proxy);
}
static char *virtio_bus_get_fw_dev_path(DeviceState *dev)
{
return NULL;
}
static void virtio_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *bus_class = BUS_CLASS(klass);
bus_class->get_dev_path = virtio_bus_get_dev_path;
bus_class->get_fw_dev_path = virtio_bus_get_fw_dev_path;
}
static const TypeInfo virtio_bus_info = {
.name = TYPE_VIRTIO_BUS,
.parent = TYPE_BUS,
.instance_size = sizeof(VirtioBusState),
.abstract = true,
.class_size = sizeof(VirtioBusClass),
.class_init = virtio_bus_class_init
};
static void virtio_register_types(void)
{
type_register_static(&virtio_bus_info);
}
type_init(virtio_register_types)
|
561543.c | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* uvector_copy.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: akharrou <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2019/05/24 15:37:15 by akharrou #+# #+# */
/* Updated: 2019/05/25 11:37:09 by akharrou ### ########.fr */
/* */
/* ************************************************************************** */
/*
** NAME
** uvector_copy -- create a shallow copy of a uvector
**
** SYNOPSIS
** #include <libft.h>
**
** struct s_uvector
** uvector_copy(struct s_uvector instance);
**
** PARAMETERS
**
** struct s_uvector self A uvector instance.
**
** DESCRIPTION
** Makes a duplicate of a uvector and returns
** it. The uvector only in as far as the pointers
** to each data is copied. The data to which
** the pointers point to is not copied (it is
** not a deep copy).
**
** RETURN VALUES
** If successful returns a duplicate instance of the
** passed in instance; otherwise the passed in instance
** unchanged is returned.
*/
#include "../Includes/stdlib_42.h"
#include "../Includes/uvector.h"
struct s_uvector uvector_copy(struct s_uvector instance)
{
struct s_uvector duplicate;
duplicate = uvector.constructor(instance.capacity);
duplicate.uvector = ft_memdup(
instance.uvector, (instance.capacity) * sizeof(intmax_t));
if (!duplicate.uvector)
return (instance);
duplicate.length = instance.length;
return (duplicate);
}
|
546205.c | /***************************************************************************
video.c
Functions to emulate the video hardware of the machine.
***************************************************************************/
#include "emu.h"
UINT8 *tankbatt_bulletsram;
size_t tankbatt_bulletsram_size;
static tilemap_t *bg_tilemap;
/***************************************************************************
Convert the color PROMs into a more useable format.
***************************************************************************/
PALETTE_INIT( tankbatt )
{
int i;
#define RES_1 0xc0 /* this is a guess */
#define RES_2 0x3f /* this is a guess */
/* allocate the colortable */
machine->colortable = colortable_alloc(machine, 0x100);
/* create a lookup table for the palette */
for (i = 0; i < 0x100; i++)
{
int bit0, bit1, bit2, bit3;
int r, g, b;
bit0 = (color_prom[i] >> 0) & 0x01; /* intensity */
bit1 = (color_prom[i] >> 1) & 0x01; /* red */
bit2 = (color_prom[i] >> 2) & 0x01; /* green */
bit3 = (color_prom[i] >> 3) & 0x01; /* blue */
/* red component */
r = RES_1 * bit1;
if (bit1) r += RES_2 * bit0;
/* green component */
g = RES_1 * bit2;
if (bit2) g += RES_2 * bit0;
/* blue component */
b = RES_1 * bit3;
if (bit3) b += RES_2 * bit0;
colortable_palette_set_color(machine->colortable, i, MAKE_RGB(r, g, b));
}
for (i = 0; i < 0x200; i += 2)
{
colortable_entry_set_value(machine->colortable, i + 0, 0);
colortable_entry_set_value(machine->colortable, i + 1, i >> 1);
}
}
WRITE8_HANDLER( tankbatt_videoram_w )
{
space->machine->generic.videoram.u8[offset] = data;
tilemap_mark_tile_dirty(bg_tilemap, offset);
}
static TILE_GET_INFO( get_bg_tile_info )
{
int code = machine->generic.videoram.u8[tile_index];
int color = machine->generic.videoram.u8[tile_index] | 0x01;
SET_TILE_INFO(0, code, color, 0);
}
VIDEO_START( tankbatt )
{
bg_tilemap = tilemap_create(machine, get_bg_tile_info, tilemap_scan_rows, 8, 8, 32, 32);
}
static void draw_bullets(running_machine *machine, bitmap_t *bitmap, const rectangle *cliprect)
{
int offs;
for (offs = 0;offs < tankbatt_bulletsram_size;offs += 2)
{
int color = 0xff; /* cyan, same color as the tanks */
int x = tankbatt_bulletsram[offs + 1];
int y = 255 - tankbatt_bulletsram[offs] - 2;
drawgfx_opaque(bitmap,cliprect,machine->gfx[1],
0, /* this is just a square, generated by the hardware */
color,
0,0,
x,y);
}
}
VIDEO_UPDATE( tankbatt )
{
tilemap_draw(bitmap, cliprect, bg_tilemap, 0, 0);
draw_bullets(screen->machine, bitmap, cliprect);
return 0;
}
|
31275.c | /*
* Copyright 1996-2020 Cyberbotics Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implements a simple PSO evolution for an ANN
*/
#include "pso.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <webots/robot.h>
#define PSO_W 1.0 // parameter w in PSO
#define PSO_NW 2.0 // parameter nw in PSO
#define PSO_PW 2.0 // parameter pw in PSO
/***************************************************************
* LOCAL FUNCTIONS
**************************************************************/
double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x)); // the sigmoid function
}
float rand01() {
return (((float)rand()) / RAND_MAX);
}
/************************************
* Layer Manipulation functions
************************************/
void RandomizeLayer(layer_t *l) {
int i;
float *W;
float *vW;
W = l->W;
vW = l->vW;
for (i = 0; i < (l->depth) * (l->width); i++) {
*W = (float)(2.0 * (((double)rand()) / RAND_MAX) - 1.0);
*vW = (float)(2.0 * (((double)rand()) / RAND_MAX) - 1.0);
W++;
vW++;
}
}
void InputToLayer(layer_t *l, float *values) {
int i;
float *x;
x = l->x;
for (i = 0; i < l->width; i++)
x[i] = values[i];
}
void ActivateLayer(layer_t *l) {
int i, j;
float *W;
// for each neuron in layer
W = l->W;
for (i = 0; i < l->depth; i++) {
// calculate weighted input
float sum = 0;
for (j = 0; j < l->width; j++) {
sum += (*W) * l->x[j];
W++;
}
// compute activation function and save output of layer
l->y[i] = (float)sigmoid(sum);
}
}
/*void WeightedGradient(layer_t* l, float* Wg)
{
int i,j;
for(i=0;i<l->width;i++)
{
Wg[i]=0;
for(j=0;j<l->depth;j++)
{
Wg[i] += (*(l->W+j*l->width+i))*(l->g[j]);
}
}
}*/
void EvolveLayer(layer_t *l, layer_t *pbest, layer_t *gbest) {
int i, j;
float *lW = l->W;
float *lvW = l->vW;
float *gbW = gbest->W;
float *pbW = pbest->W;
for (i = 0; i < l->width; i++) {
for (j = 0; j < l->depth; j++) {
float v = PSO_W * (*lvW + PSO_PW * rand01() * (*pbW - *lW) + PSO_NW * rand01() * (*gbW - *lW));
*lvW = v;
float x = *lW + v;
x = x > 100.0 ? 100.0 : x;
x = x < -100.0 ? -100.0 : x;
*lW = x;
}
}
}
void PrintLayerOutput(layer_t *l) {
int i;
char *main_buffer = (char *)malloc(sizeof(char) * (10 * l->depth + 1));
strcpy(main_buffer, "");
char buffer[10];
printf("%f\n", l->y[0]);
for (i = 0; i < l->depth; i++) {
sprintf(buffer, "%4.3f ", l->y[i]);
strcat(main_buffer, buffer);
}
strcat(main_buffer, "\n");
printf("%s", main_buffer);
free(main_buffer);
}
void SaveLayerWeights(layer_t *l, FILE *fp) {
if (fwrite(l->W, (sizeof(float) * (l->depth) * (l->width)), 1, fp) != 1)
printf("error writing to file\n");
}
void SaveLayerWeightsHDT(layer_t *l, FILE *fp) {
int i, j;
float *W;
W = l->W;
fprintf(fp, "{\n");
for (i = 0; i < l->depth; i++) {
fprintf(fp, "{");
for (j = 0; j < l->width; j++) {
fprintf(fp, "%f", *W);
if (j < l->width - 1)
fprintf(fp, ", ");
W++;
}
if (i < l->depth - 1)
fprintf(fp, "},\n");
else
fprintf(fp, "}\n");
}
fprintf(fp, "}");
}
void LoadLayerWeights(layer_t *l, FILE *fp) {
if (fread(l->W, (sizeof(float) * (l->depth) * (l->width)), 1, fp) != 1)
printf("error reading from file\n");
}
/************************************
* Network Manipulation functions
************************************/
void InputToNetwork(network_t *n, float *values) {
InputToLayer(&n->layers[0], values);
}
void OutputFromNetwork(network_t *n, float *values) {
int i;
layer_t *pl;
pl = &n->layers[n->size - 1];
for (i = 0; i < pl->depth; i++)
values[i] = pl->y[i];
}
void ActivateNetwork(network_t *n) {
int i;
for (i = 0; i < n->size - 1; i++) {
ActivateLayer(&n->layers[i]);
InputToLayer(&n->layers[i + 1], n->layers[i].y);
}
ActivateLayer(&n->layers[n->size - 1]);
}
void RandomizeNetwork(network_t *n) {
int i;
for (i = 0; i < n->size; i++)
RandomizeLayer(&n->layers[i]);
}
void EvolveNetwork(network_t *n, network_t *pbest, network_t *gbest) {
int i;
// printf("START EVOLVING\n");
// printf("===============\n");
for (i = 0; i < n->size; i++)
EvolveLayer(&n->layers[i], &pbest->layers[i], &gbest->layers[i]);
// printf("===============\n");
}
void PrintNetwork(network_t *n) {
int i;
for (i = 0; i < n->size; i++)
PrintLayerOutput(&n->layers[i]);
}
void PrintNetworkOutput(network_t *n) {
PrintLayerOutput(&n->layers[n->size - 1]);
}
void SaveNetworkWeights(network_t *n, const char *filename) {
FILE *fp;
int i;
if ((fp = fopen(filename, "w+b")) == NULL) {
fprintf(stderr, "Cannot open %s\n", filename);
return;
}
for (i = 0; i < n->size; i++)
SaveLayerWeights(&n->layers[i], fp);
fclose(fp);
}
void SaveNetworkWeightsHDT(network_t *n, const char *filename) {
FILE *fp;
int i;
if ((fp = fopen(filename, "w+b")) == NULL) {
fprintf(stderr, "Cannot open %s\n", filename);
return;
}
for (i = 0; i < n->size; i++)
SaveLayerWeightsHDT(&n->layers[i], fp);
fclose(fp);
}
void LoadNetworkWeights(network_t *n, const char *filename) {
FILE *fp;
int i;
if ((fp = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "Cannot open %s\n", filename);
return;
}
for (i = 0; i < n->size; i++)
LoadLayerWeights(&n->layers[i], fp);
fclose(fp);
}
|
902544.c | /* ************************************************************************** */
/** Descriptive File Name
@Company
Digilent
@File Name
swt.c
@Description
This file groups the functions that implement the SWT library.
The functions implement basic digital input functionality.
Include the file in the project, together with config.h, when this library is needed.
@Author
Cristian Fatu
[email protected]
*/
/* ************************************************************************** */
/* ************************************************************************** */
/* ************************************************************************** */
/* Section: Included Files */
/* ************************************************************************** */
#include <xc.h>
#include <sys/attribs.h>
#include "config.h"
#include "swt.h"
/* ************************************************************************** */
/*** SWT_Init
**
** Parameters:
**
**
** Return Value:
**
**
** Description:
** This function initializes the hardware involved in the SWT module:
** the pins corresponding to switches are initialized as digital inputs.
**
*/
void SWT_Init()
{
SWT_ConfigurePins();
}
/*** SWT_ConfigurePins
**
** Parameters:
**
**
** Return Value:
**
**
** Description:
** This function configures the IO pins involved in the SWT module as digital input pins.
** The function uses pin related definitions from config.h file.
** This is a low-level function called by SWT_Init(), so user should avoid calling it directly.
**
*/
void SWT_ConfigurePins()
{
// Configure SWTs as digital inputs.
tris_SWT_SWT0 = 1;
tris_SWT_SWT1 = 1;
tris_SWT_SWT2 = 1;
tris_SWT_SWT3 = 1;
tris_SWT_SWT4 = 1;
tris_SWT_SWT5 = 1;
tris_SWT_SWT6 = 1;
tris_SWT_SWT7 = 1;
// disable analog (set pins as digital))
ansel_SWT_SWT5 = 0;
ansel_SWT_SWT6 = 0;
ansel_SWT_SWT7 = 0;
}
/*** SWT_GetValue
**
** Parameters:
** unsigned char bNo - the number of switch whose value will be read. The value must be between 0 and 7.
**
** Return Value:
** unsigned char - the value corresponding to the specified switch:
** 0 when SW<bNo> is turned off
** 1 when SW<bNo> is turned on
** - 0xFF if bNo is not within 0 - 7.
**
**
** Description:
** This function gets the value of the switch specified by bNo (0 or 1).
** If the value provided for bNo is not between 0 and 7, 0xFF is returned.
**
*/
unsigned char SWT_GetValue(unsigned char bNo)
{
unsigned bResult = 0xFF;
switch (bNo)
{
case 0:
bResult = prt_SWT_SWT0;
break;
case 1:
bResult = prt_SWT_SWT1;
break;
case 2:
bResult = prt_SWT_SWT2;
break;
case 3:
bResult = prt_SWT_SWT3;
break;
case 4:
bResult = prt_SWT_SWT4;
break;
case 5:
bResult = prt_SWT_SWT5;
break;
case 6:
bResult = prt_SWT_SWT6;
break;
case 7:
bResult = prt_SWT_SWT7;
break;
}
return bResult;
}
/*** SWT_GetGroupValue
**
** Parameters:
**
** Return Value:
** unsigned char - the 8 bit value B7 B6 B5 B4 B3 B2 B1 B0 where each bit Bi corresponds to LD<i>:
** 0 if LD<i> is turned off
** 1 if LD<i> is turned on
**
**
** Description:
** This function gets the value of the all 8 switches as a single value on 8 bits.
** Each bit from returned value corresponds to a switch: Bit 0 (LSB) corresponds to SW0, bit 7 (MSB) corresponds to SW7.
**
*/
unsigned char SWT_GetGroupValue()
{
int i;
unsigned char bResult = 0;
for(i = 0; i < 8; i++)
{
if(SWT_GetValue(i))
{
bResult |= (1 << i);
}
}
return bResult;
}
/* *****************************************************************************
End of File
*/
|
398637.c | #include "expr.h"
#if 0
/* This can be useful for debugging */
#include "expr_debug.h"
#endif
#include <assert.h>
#include <stdio.h>
#ifndef _WIN32
#include <sys/time.h>
#else
#include "gettimeofday.h"
#endif
int status = 0;
/*
* VECTOR TESTS
*/
typedef vec(int) test_vec_int_t;
typedef vec(char *) test_vec_str_t;
static void test_vector() {
test_vec_int_t ints = vec_init();
test_vec_str_t strings = vec_init();
vec_push(&ints, 3);
assert(vec_len(&ints) == 1);
assert(vec_peek(&ints) == 3);
assert(vec_pop(&ints) == 3);
assert(vec_len(&ints) == 0);
vec_free(&ints);
vec_push(&strings, "hello");
vec_push(&strings, "world");
vec_push(&strings, "foo");
assert(vec_len(&strings) == 3);
{
int i;
char *el;
vec_foreach(&strings, el, i) { printf("%s %d\n", el, i); }
}
vec_free(&strings);
}
/*
* VARIABLES VECTOR TEST
*/
static void test_vars() {
struct expr_var_list vars = {0};
struct expr_var *a = expr_var(&vars, "a", 1);
a->value = 4;
expr_var(&vars, "b", 1);
expr_var(&vars, "ab", 2);
{
struct expr_var *again = expr_var(&vars, "a", 1);
assert(again == a);
assert(again->value == 4);
}
expr_destroy(NULL, &vars);
}
/*
* LEXER TESTS
*/
static int assert_tokens(char *s, char **expected) {
int len = strlen(s);
int flags = EXPR_TDEFAULT;
char *test = s;
for (;;) {
int n = expr_next_token(s, len, &flags);
if (n == 0) {
if (*expected == NULL) {
printf("OK '%s'\n", test);
return 0;
} else {
printf("FAIL '%s': not enough tokens\n", test);
status = 1;
}
} else if (n < 0) {
printf("FAIL '%s': error %d\n", test, n);
status = 1;
return 0;
}
if (strncmp(*expected, s, n) != 0) {
printf("FAIL '%s': token mismatch %.*s %s\n", test, n, s, *expected);
status = 1;
return 0;
}
expected++;
s = s + n;
len = len - n;
}
}
static void test_tokizer() {
unsigned int i;
#ifdef _MSC_VER
char *T1[] = {"", NULL};
char *T2[] = {"1", "1", NULL};
char *T3[] = {"1+11", "1", "+", "11", NULL};
char *T4[] = {"1*11", "1", "*", "11", NULL};
char *T5[] = {"1**11", "1", "**", "11", NULL};
char *T6[] = {"1**-11", "1", "**", "-", "11", NULL};
char **TESTS[] = {
T1, T2, T3, T4, T5, T6
};
#else
char **TESTS[] = {
(char *[]){"", NULL},
(char *[]){"1", "1", NULL},
(char *[]){"1+11", "1", "+", "11", NULL},
(char *[]){"1*11", "1", "*", "11", NULL},
(char *[]){"1**11", "1", "**", "11", NULL},
(char *[]){"1**-11", "1", "**", "-", "11", NULL},
};
#endif
for (i = 0; i < sizeof(TESTS) / sizeof(TESTS[0]); i++) {
assert_tokens(TESTS[i][0], TESTS[i] + 1);
}
}
/*
* PARSER TESTS
*/
struct nop_context {
void *p;
};
static void user_func_nop_cleanup(struct expr_func *f, void *c) {
struct nop_context *nop = (struct nop_context *)c;
(void)f;
free(nop->p);
}
static double user_func_nop(struct expr_func *f, vec_expr_t *args, void *c) {
struct nop_context *nop = (struct nop_context *)c;
(void)args;
if (f->ctxsz == 0) {
free(nop->p);
return 0;
}
if (nop->p == NULL) {
nop->p = malloc(10000);
}
return 0;
}
static double user_func_add(struct expr_func *f, vec_expr_t *args, void *c) {
double a = expr_eval(&vec_nth(args, 0));
double b = expr_eval(&vec_nth(args, 1));
(void)f, (void)c;
return a + b;
}
static double user_func_next(struct expr_func *f, vec_expr_t *args, void *c) {
double a = expr_eval(&vec_nth(args, 0));
(void)f, (void)c;
return a + 1;
}
static double user_func_print(struct expr_func *f, vec_expr_t *args, void *c) {
int i;
struct expr e;
(void)f, (void)c;
fprintf(stderr, ">> ");
vec_foreach(args, e, i) { fprintf(stderr, "%f ", expr_eval(&e)); }
fprintf(stderr, "\n");
return 0;
}
static struct expr_func user_funcs[] = {
{"nop", user_func_nop, user_func_nop_cleanup, sizeof(struct nop_context)},
{"add", user_func_add, NULL, 0},
{"next", user_func_next, NULL, 0},
{"print", user_func_print, NULL, 0},
{NULL, NULL, NULL, 0},
};
static void test_expr(char *s, double expected) {
struct expr_var_list vars = {0};
struct expr *e = expr_create(s, strlen(s), &vars, user_funcs);
if (e == NULL) {
printf("FAIL: %s returned NULL\n", s);
status = 1;
return;
}
{
double result = expr_eval(e);
char *p = (char *)malloc(strlen(s) + 1);
char *it;
strncpy(p, s, strlen(s) + 1);
for (it = p; *it; it++) {
if (*it == '\n') {
*it = '\\';
}
}
if ((isnan(result) && !isnan(expected)) ||
fabs(result - expected) > 0.00001f) {
printf("FAIL: %s: %f != %f\n", p, result, expected);
status = 1;
} else {
printf("OK: %s == %f\n", p, expected);
}
expr_destroy(e, &vars);
free(p);
}
}
static void test_expr_error(char *s) {
struct expr_var_list vars = {0};
struct expr *e = expr_create(s, strlen(s), &vars, user_funcs);
if (e != NULL) {
printf("FAIL: %s should return error\n", s);
status = 1;
}
expr_destroy(e, &vars);
}
static void test_empty() {
test_expr("", 0);
test_expr(" ", 0);
test_expr(" \t \n ", 0);
}
static void test_const() {
test_expr("1", 1);
test_expr(" 1 ", 1);
test_expr("12", 12);
test_expr("12.3", 12.3);
}
static void test_unary() {
test_expr("-1", -1);
test_expr("--1", -(-1));
test_expr("!0 ", !0);
test_expr("!2 ", !2);
test_expr("^3", ~3);
}
static void test_binary() {
test_expr("1+2", 1 + 2);
test_expr("10-2", 10 - 2);
test_expr("2*3", 2 * 3);
test_expr("2+3*4", 2 + 3 * 4);
test_expr("2*3+4", 2 * 3 + 4);
test_expr("2+3/2", 2 + 3.0 / 2.0);
test_expr("1/3*6/4*2", 1.0 / 3 * 6 / 4.0 * 2);
test_expr("1*3/6*4/2", 1.0 * 3 / 6 * 4.0 / 2.0);
test_expr("6/2+8*4/2", 19);
test_expr("3/2", 3.0 / 2.0);
test_expr("(3/2)|0", 3 / 2);
test_expr("(3/0)", INFINITY);
test_expr("(3/0)|0", INT_MAX);
test_expr("(3%0)", NAN);
test_expr("(3%0)|0", 0);
test_expr("2**3", 8);
test_expr("9**(1/2)", 3);
test_expr("1+2<<3", (1 + 2) << 3);
test_expr("2<<3", 2 << 3);
test_expr("12>>2", 12 >> 2);
test_expr("1<2", 1 < 2);
test_expr("2<2", 2 < 2);
test_expr("3<2", 3 < 2);
test_expr("1>2", 1 > 2);
test_expr("2>2", 2 > 2);
test_expr("3>2", 3 > 2);
test_expr("1==2", 1 == 2);
test_expr("2==2", 2 == 2);
test_expr("3==2", 3 == 2);
test_expr("3.2==3.1", 3.2f == 3.1f);
test_expr("1<=2", 1 <= 2);
test_expr("2<=2", 2 <= 2);
test_expr("3<=2", 3 <= 2);
test_expr("1>=2", 1 >= 2);
test_expr("2>=2", 2 >= 2);
test_expr("3>=2", 3 >= 2);
test_expr("123&42", 123 & 42);
test_expr("123^42", 123 ^ 42);
test_expr("1-1+1+1", 1 - 1 + 1 + 1);
test_expr("2**2**3", 256); /* 2^(2^3), not (2^2)^3 */
}
static void test_logical() {
test_expr("2&&3", 3);
test_expr("0&&3", 0);
test_expr("3&&0", 0);
test_expr("2||3", 2);
test_expr("0||3", 3);
test_expr("2||0", 2);
test_expr("0||0", 0);
test_expr("1&&(3%0)", NAN);
test_expr("(3%0)&&1", NAN);
test_expr("1||(3%0)", 1);
test_expr("(3%0)||1", 1);
}
static void test_parens() {
test_expr("(1+2)*3", (1 + 2) * 3);
test_expr("(1)", 1);
test_expr("(2.4)", 2.4);
test_expr("((2))", 2);
test_expr("(((3)))", 3);
test_expr("(((3)))*(1+(2))", 9);
}
static void test_assign() {
test_expr("x=5", 5);
test_expr("x=y=3", 3);
}
static void test_comma() {
test_expr("2,3,4", 4);
test_expr("2+3,4*5", 4 * 5);
test_expr("x=5, x", 5);
test_expr("x=5, y = 3, x+y", 8);
test_expr("x=5, x=(x!=0)", 1);
test_expr("x=5, x = x+1", 6);
}
static void test_funcs() {
test_expr("add(1,2) + next(3)", 7);
test_expr("add(1,next(2))", 4);
test_expr("add(1,1+1) + add(2*2+1,2)", 10);
test_expr("nop()", 0);
test_expr("x=2,add(1, next(x))", 4);
test_expr("$(zero), zero()", 0);
test_expr("$(zero), zero(1, 2, 3)", 0);
test_expr("$(one, 1), one()+one(1)+one(1, 2, 4)", 3);
test_expr("$(number, 1), $(number, 2+3), number()", 5);
test_expr("$(triw, ($1 * 256) & 255), triw(0.5, 2)", 128);
test_expr("$(triw, ($1 * 256) & 255), triw(0.1)+triw(0.7)+triw(0.2)", 255);
}
static void test_name_collision() {
test_expr("next=5", 5);
test_expr("next=2,next(5)+next", 8);
}
static void test_fancy_variable_names() {
test_expr("one=1", 1);
#ifndef _MSC_VER
test_expr("один=1", 1);
#endif
test_expr("six=6, seven=7, six*seven", 42);
#ifndef _MSC_VER
test_expr("шість=6, сім=7, шість*сім", 42);
test_expr("六=6, 七=7, 六*七", 42);
test_expr("ταῦ=1.618, 3*ταῦ", 3 * 1.618);
test_expr("$(ταῦ, 1.618), 3*ταῦ()", 3 * 1.618);
#endif
test_expr("x#4=12, x#3=3, x#4+x#3", 15);
}
static void test_auto_comma() {
test_expr("a=3\na+2\n", 5);
test_expr("a=3\n\n\na+2\n", 5);
test_expr("\n\na=\n3\n\n\na+2\n", 5);
test_expr("\n\n3\n\n", 3);
test_expr("\n\n\n\n", 0);
test_expr("3\n\n\n\n", 3);
test_expr("a=3\nb=4\na", 3);
test_expr("(\n2+3\n)\n", 5);
test_expr("a=\n3*\n(4+\n3)\na+\na\n", 42);
}
static void test_benchmark(const char *s) {
struct timeval t;
gettimeofday(&t, NULL);
{
double start = t.tv_sec + t.tv_usec * 1e-6;
double end, ns;
long N, i;
struct expr_var_list vars = {0};
struct expr *e = expr_create(s, strlen(s), &vars, user_funcs);
if (e == NULL) {
printf("FAIL: %s can't be compiled\n", s);
status = 1;
return;
}
N = 1000000L;
for (i = 0; i < N; i++) {
expr_eval(e);
}
gettimeofday(&t, NULL);
end = t.tv_sec + t.tv_usec * 1e-6;
expr_destroy(e, &vars);
ns = 1000000000 * (end - start) / N;
printf("BENCH %40s:\t%f ns/op (%dM op/sec)\n", s, ns, (int)(1000 / ns));
}
}
static void test_bad_syntax() {
test_expr_error("(");
test_expr_error(")");
test_expr_error("()3");
test_expr_error("()x");
test_expr_error("0^+1");
test_expr_error("()\\");
test_expr_error("().");
test_expr_error("4ever");
test_expr_error("(2+3");
test_expr_error("(-2");
test_expr_error("*2");
test_expr_error("nop=");
test_expr_error("nop(");
test_expr_error("unknownfunc()");
test_expr_error("$(recurse, recurse()), recurse()");
test_expr_error("),");
test_expr_error("+(");
test_expr_error("2=3");
test_expr_error("2.3.4");
test_expr_error("1()");
test_expr_error("x()");
test_expr_error(",");
test_expr_error("1,,2");
test_expr_error("nop(,x)");
test_expr_error("nop(x=)>1");
test_expr_error("1 x");
test_expr_error("1++");
test_expr_error("foo((x))");
test_expr_error("nop(x))");
test_expr_error("nop((x)");
test_expr_error("$($())");
test_expr_error("$(1)");
test_expr_error("$()");
}
int main() {
test_vector();
test_vars();
test_tokizer();
test_empty();
test_const();
test_unary();
test_binary();
test_logical();
test_parens();
test_assign();
test_comma();
test_funcs();
test_name_collision();
test_fancy_variable_names();
test_auto_comma();
test_bad_syntax();
test_benchmark("5");
test_benchmark("5+5+5+5+5+5+5+5+5+5");
test_benchmark("5*5*5*5*5*5*5*5*5*5");
test_benchmark("5,5,5,5,5,5,5,5,5,5");
test_benchmark("((5+5)+(5+5))+((5+5)+(5+5))+(5+5)");
test_benchmark("x=5");
test_benchmark("x=5,x+x+x+x+x+x+x+x+x+x");
test_benchmark("x=5,((x+x)+(x+x))+((x+x)+(x+x))+(x+x)");
test_benchmark("a=1,b=2,c=3,d=4,e=5,f=6,g=7,h=8,i=9,j=10");
test_benchmark("a=1,a=2,a=3,a=4,a=5,a=6,a=7,a=8,a=9,a=10");
test_benchmark("$(sqr,$1*$1),5*5");
test_benchmark("$(sqr,$1*$1),sqr(5)");
test_benchmark("x=2+3*(x/(42+next(x))),x");
test_benchmark("add(next(x), next(next(x)))");
test_benchmark("a,b,c,d,e,d,e,f,g,h,i,j,k");
test_benchmark("$(a,1),$(b,2),$(c,3),$(d,4),5");
return status;
}
|
925202.c | #include "miinclude.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if(nlhs >3)
mexErrMsgTxt("too many output...from estpab");
long i,j;
double * vec1 = mxGetPr(prhs[0]);
long len1 = mxGetNumberOfElements(prhs[0]);
double * vec2 = mxGetPr(prhs[1]);
long len2 = mxGetNumberOfElements(prhs[1]);
if (len1!=len2)
mexErrMsgTxt("The two vectors should have the same length.");
int nstate1 = int(mxGetScalar(prhs[2])), nstate2 = int(mxGetScalar(prhs[3]));
double *hab = (double *) mxGetPr(plhs[0]);
double **hab2d = new double * [nstate2];
for (i=0; i<nstate1;i++)
for (j=0; j<nstate2;j++)
{
hab2d[j][i] = 0;
}
int n1,n2;
for (i=0; i<nstate1;i++)
for (j=0; j<nstate2;j++)
{
hab2d[j][i] /= len1;
}
if (nlhs>=2)
{
plhs[1] = mxCreateDoubleMatrix(nstate1,1,mxREAL);
double *ha = (double *)mxGetPr(plhs[1]);
for (i=0;i<nstate1;i++) {ha[i] = 0;}
for (i=0;i<nstate1;i++)
for (j=0;j<nstate2;j++)
{
ha[i] += hab2d[j][i];
}
}
return;
}
|
310933.c | /*------------------------------------------------------------------------------
*
* Copyright (c) 2011-2022, EURid vzw. All rights reserved.
* The YADIFA TM software product is provided under the BSD 3-clause license:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of EURid nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*------------------------------------------------------------------------------
*
*/
/** @defgroup streaming Streams
* @ingroup dnscore
* @brief
*
*
*
* @{
*
*----------------------------------------------------------------------------*/
#if _FILE_OFFSET_BITS != 64
#define _LARGEFILE64_SOURCE
#endif
#include "dnscore/dnscore-config.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <limits.h>
#include "dnscore/file_input_stream.h"
#include "dnscore/fdtools.h"
#include "dnscore/timems.h"
typedef struct file_input_stream file_input_stream;
struct file_input_stream
{
union
{
void* _voidp;
int fd;
} data;
const input_stream_vtbl* vtbl;
};
#if DEBUG_BENCH_FD
static debug_bench_s debug_read;
static bool file_input_stream_debug_bench_register_done = FALSE;
static inline void file_input_stream_debug_bench_register()
{
if(!file_input_stream_debug_bench_register_done)
{
file_input_stream_debug_bench_register_done = TRUE;
debug_bench_register(&debug_read, "read");
}
}
#endif
/*
* Maybe I should not do a "read-fully" here ...
*/
static ya_result
file_input_stream_read(input_stream* stream_, void* buffer_, u32 len)
{
#if DEBUG_BENCH_FD
file_input_stream_debug_bench_register();
u64 bench = debug_bench_start(&debug_read);
#endif
u8 *buffer = (u8*)buffer_;
file_input_stream* stream = (file_input_stream*)stream_;
u8* start = buffer;
while(len > 0)
{
#if defined(SSIZE_MAX) && (SSIZE_MAX < 0xffffffffU)
ssize_t ret = read(stream->data.fd, buffer, MIN(len, SSIZE_MAX));
#else
ssize_t ret = read(stream->data.fd, buffer, len);
#endif
if(ret < 0)
{
int err = errno;
if(err == EINTR)
{
continue;
}
#if DEBUG
if(err == EBADF)
{
fprintf(stderr, "bad file descriptor %i", stream->data.fd);
}
#endif
if(buffer - start > 0)
{
return buffer - start;
}
/* error */
return MAKE_ERRNO_ERROR(err);
}
if(ret == 0) /* EOF */
{
break;
}
buffer += ret;
len -= ret;
}
#if DEBUG_BENCH_FD
debug_bench_stop(&debug_read, bench);
#endif
return buffer - start;
}
static void
file_input_stream_close(input_stream* stream_)
{
file_input_stream* stream = (file_input_stream*)stream_;
assert((stream->data.fd < 0)||(stream->data.fd >2));
if(stream->data.fd != -1)
{
close_ex(stream->data.fd);
stream->data.fd = -1;
}
input_stream_set_void(stream_);
}
static void
file_input_stream_noclose(input_stream* stream_)
{
file_input_stream* stream = (file_input_stream*)stream_;
assert((stream->data.fd < 0)||(stream->data.fd >2));
stream->data.fd = -1;
input_stream_set_void(stream_);
}
static ya_result
file_input_stream_skip(input_stream* stream_, u32 len)
{
file_input_stream* stream = (file_input_stream*)stream_;
if(lseek(stream->data.fd, len, SEEK_CUR) >= 0)
{
return len;
}
return ERRNO_ERROR;
}
static const input_stream_vtbl file_input_stream_vtbl =
{
file_input_stream_read,
file_input_stream_skip,
file_input_stream_close,
"file_input_stream"
};
static const input_stream_vtbl file_input_stream_noclose_vtbl =
{
file_input_stream_read,
file_input_stream_skip,
file_input_stream_noclose,
"file_input_stream-noclose"
};
ya_result
fd_input_stream_attach(input_stream *stream_, int fd)
{
file_input_stream* stream = (file_input_stream*)stream_;
if(fd < 0)
{
return ERRNO_ERROR;
}
stream->data.fd = fd;
stream->vtbl = &file_input_stream_vtbl;
return SUCCESS;
}
ya_result
fd_input_stream_attach_noclose(input_stream *stream_, int fd)
{
file_input_stream* stream = (file_input_stream*)stream_;
if(fd < 0)
{
return ERRNO_ERROR;
}
stream->data.fd = fd;
stream->vtbl = &file_input_stream_noclose_vtbl;
return SUCCESS;
}
void
fd_input_stream_detach(input_stream *stream_)
{
file_input_stream* stream = (file_input_stream*)stream_;
stream->data.fd = -1;
}
ya_result
file_input_stream_open(input_stream *stream_, const char *filename)
{
int fd = open_ex(filename, O_RDONLY|O_CLOEXEC);
#if (_XOPEN_SOURCE >= 600 || _POSIX_C_SOURCE >= 200112L) && !defined(__gnu__hurd__)
if(fd >= 0)
{
posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL);
}
#endif
return fd_input_stream_attach(stream_, fd);
}
ya_result
file_input_stream_open_ex(input_stream *stream_, const char *filename, int flags)
{
int fd = open_ex(filename, O_RDONLY|O_CLOEXEC| flags);
#if (_XOPEN_SOURCE >= 600 || _POSIX_C_SOURCE >= 200112L) && !defined(__gnu__hurd__)
if(fd >= 0)
{
posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL);
}
#endif
return fd_input_stream_attach(stream_, fd);
}
ya_result
fd_input_stream_get_filedescriptor(input_stream* stream_)
{
file_input_stream* stream = (file_input_stream*)stream_;
return stream->data.fd ;
}
ya_result
fd_input_stream_seek(input_stream* stream_, u64 offset)
{
if(is_fd_input_stream(stream_))
{
file_input_stream* stream = (file_input_stream*)stream_;
int ret;
#if _FILE_OFFSET_BITS == 64
ret = lseek(stream->data.fd, offset, SEEK_SET);
#else
ret = lseek64(stream->data.fd, offset, SEEK_SET);
#endif
if(ret >= 0)
{
return SUCCESS;
}
else
{
return ERRNO_ERROR;
}
}
else
{
return INCORRECT_RDATA;
}
}
bool
is_fd_input_stream(input_stream* stream_)
{
file_input_stream* stream = (file_input_stream*)stream_;
return (stream != NULL) && (stream->vtbl->read == file_input_stream_read);
}
void
file_input_steam_advise_sequential(input_stream* stream_)
{
file_input_stream* stream = (file_input_stream*)stream_;
#if (_XOPEN_SOURCE >= 600 || _POSIX_C_SOURCE >= 200112L) && !defined(__gnu_hurd__)
if(stream->data.fd >= 0)
{
posix_fadvise(stream->data.fd, 0, 0, POSIX_FADV_SEQUENTIAL);
}
#endif
}
/** @} */
|
291642.c | /** \file dnx/swstate/auto_generated/diagnostic/algo_tunnel_commandline.c
*
* sw state functions definitions
*
* DO NOT EDIT THIS FILE!
* This file is auto-generated.
* Edits to this file will be lost when it is regenerated.
*/
/*
* This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file.
*
* Copyright 2007-2020 Broadcom Inc. All rights reserved.
*/
#ifdef BSL_LOG_MODULE
#error "BSL_LOG_MODULE redefined"
#endif
#define BSL_LOG_MODULE BSL_LS_SWSTATEDNX_GENERAL
#include <shared/error.h>
#include <shared/bsl.h>
#include <appl/diag/sand/diag_sand_framework.h>
#include <appl/diag/sand/diag_sand_utils.h>
#include <soc/dnxc/swstate/dnxc_sw_state_c_includes.h>
#include <bcm_int/dnx/algo/swstate/auto_generated/diagnostic/algo_tunnel_diagnostic.h>
#include "algo_tunnel_commandline.h"
#include <bcm_int/dnx/algo/swstate/auto_generated/access/algo_tunnel_access.h>
#if defined(DNX_SW_STATE_DIAGNOSTIC)
sh_sand_cmd_t sh_dnx_swstate_algo_tunnel_cmds[] = {
{"tunnel_ipv6_sip_profile", NULL, sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_cmds, NULL, &dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_man},
{"udp_ports_profile", NULL, sh_dnx_swstate_algo_tunnel_udp_ports_profile_cmds, NULL, &dnx_swstate_algo_tunnel_udp_ports_profile_man},
{"dump", sh_dnx_swstate_algo_tunnel_dump_cmd, NULL, dnx_swstate_algo_tunnel_dump_options, &dnx_swstate_algo_tunnel_dump_man},
{"size_get", sh_dnx_swstate_algo_tunnel_size_get_cmd, NULL, dnx_swstate_algo_tunnel_size_get_options, &dnx_swstate_algo_tunnel_size_get_man},
{"diagnostic_operation_counters", sh_dnx_swstate_algo_tunnel_diagnostic_operation_counters_cmd, NULL, dnx_swstate_algo_tunnel_diagnostic_operation_counters_options, &dnx_swstate_algo_tunnel_diagnostic_operation_counters_man},
{NULL}
};
shr_error_e sh_dnx_swstate_algo_tunnel_dump_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
uint8 is_init;
dnx_sw_state_dump_filters_t filters;
SHR_FUNC_INIT_VARS(unit);
SH_SAND_GET_BOOL("nocontent", filters.nocontent);
SH_SAND_GET_STR("typefilter", filters.typefilter);
SH_SAND_GET_STR("namefilter", filters.namefilter);
algo_tunnel_db.is_init(unit, &is_init);
if (is_init) {
SHR_IF_ERR_EXIT(algo_tunnel_db_dump(unit, filters));
if (!filters.nocontent)
{
dnx_sw_state_dump_detach_file(unit);
}
}
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_dump_options[] = {
{"nocontent", SAL_FIELD_TYPE_BOOL, "Decision if to perform content dump or to print the access tree.", "false"},
{"typefilter", SAL_FIELD_TYPE_STR, "Filter for the variable type.", ""},
{"namefilter", SAL_FIELD_TYPE_STR, "Filter for the variable name.", ""},
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_dump_man = {
"swstate algo_tunnel dump",
"print the module's content",
"swstate algo_tunnel dump",
"swstate algo_tunnel dump nocontent=false typefilter="" namefilter="" ",
};
shr_error_e sh_dnx_swstate_algo_tunnel_size_get_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
uint32 size = 0;
SHR_FUNC_INIT_VARS(unit);
size = dnx_sw_state_info_size_get(algo_tunnel_db_info[unit], algo_tunnel_db_layout_str, ALGO_TUNNEL_DB_INFO_NOF_ENTRIES, "ALGO_TUNNEL_DB~", 0);
dnx_sw_state_size_print(size);
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_size_get_options[] = {
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_size_get_man = {
"swstate algo_tunnel size_get",
"print the module's size",
"swstate algo_tunnel size_get",
"swstate algo_tunnel size_get",
};
shr_error_e sh_dnx_swstate_algo_tunnel_diagnostic_operation_counters_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
SHR_FUNC_INIT_VARS(unit);
dnx_sw_state_diagnostic_operation_counters_print(algo_tunnel_db_info[unit], algo_tunnel_db_layout_str, ALGO_TUNNEL_DB_INFO_NOF_ENTRIES, "ALGO_TUNNEL_DB~");
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_diagnostic_operation_counters_options[] = {
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_diagnostic_operation_counters_man = {
"swstate algo_tunnel diagnostic_operation_counters",
"print the module's diagnostic operation counters",
"swstate algo_tunnel diagnostic_operation_counters",
"swstate algo_tunnel diagnostic_operation_counters",
};
sh_sand_man_t dnx_swstate_algo_tunnel_man = {
cmd_dnx_swstate_algo_tunnel_desc,
NULL,
NULL,
NULL,
};
const char cmd_dnx_swstate_algo_tunnel_desc[] = "swstate algo_tunnel commands";
sh_sand_cmd_t sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_cmds[] = {
{"dump", sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_dump_cmd, NULL, dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_dump_options, &dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_dump_man},
{"size_get", sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_size_get_cmd, NULL, dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_size_get_options, &dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_size_get_man},
{"diagnostic_operation_counters", sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_diagnostic_operation_counters_cmd, NULL, dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_diagnostic_operation_counters_options, &dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_diagnostic_operation_counters_man},
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_man = {
cmd_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_desc,
NULL,
NULL,
NULL,
};
const char cmd_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_desc[] = "algo_tunnel_db tunnel_ipv6_sip_profile commands";
shr_error_e sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_dump_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
dnx_sw_state_dump_filters_t filters;
SHR_FUNC_INIT_VARS(unit);
SH_SAND_GET_BOOL("nocontent", filters.nocontent);
SH_SAND_GET_STR("typefilter", filters.typefilter);
SH_SAND_GET_STR("namefilter", filters.namefilter);
SHR_IF_ERR_EXIT(algo_tunnel_db_tunnel_ipv6_sip_profile_dump(unit, filters));
if (!filters.nocontent)
{
dnx_sw_state_dump_detach_file(unit);
}
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_dump_options[] = {
{"nocontent", SAL_FIELD_TYPE_BOOL, "Decision if to perform content dump or to print the access tree.", "false"},
{"typefilter", SAL_FIELD_TYPE_STR, "Filter for the variable type.", ""},
{"namefilter", SAL_FIELD_TYPE_STR, "Filter for the variable name.", ""},
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_dump_man = {
"swstate dump",
"print the variable value",
"algo_tunnel_db tunnel_ipv6_sip_profile dump []\n",
"algo_tunnel_db tunnel_ipv6_sip_profile dump nocontent=false typefilter="" namefilter="" ",
};
shr_error_e sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_size_get_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
uint32 size = 0;
SHR_FUNC_INIT_VARS(unit);
size = dnx_sw_state_info_size_get(algo_tunnel_db_info[unit], algo_tunnel_db_layout_str, ALGO_TUNNEL_DB_INFO_NOF_ENTRIES, "ALGO_TUNNEL_DB~TUNNEL_IPV6_SIP_PROFILE~", 0);
dnx_sw_state_size_print(size);
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_size_get_options[] = {
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_size_get_man = {
"swstate size_get",
"print the variable size",
"algo_tunnel_db tunnel_ipv6_sip_profile size_get",
"algo_tunnel_db tunnel_ipv6_sip_profile size_get",
};
shr_error_e sh_dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_diagnostic_operation_counters_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
SHR_FUNC_INIT_VARS(unit);
dnx_sw_state_diagnostic_operation_counters_print(algo_tunnel_db_info[unit], algo_tunnel_db_layout_str, ALGO_TUNNEL_DB_INFO_NOF_ENTRIES, "ALGO_TUNNEL_DB~TUNNEL_IPV6_SIP_PROFILE~");
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_diagnostic_operation_counters_options[] = {
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_tunnel_ipv6_sip_profile_diagnostic_operation_counters_man = {
"swstate diagnostic_operation_counters",
"print the diagnostic operation counters",
"algo_tunnel_db tunnel_ipv6_sip_profile diagnostic_operation_counters",
"algo_tunnel_db tunnel_ipv6_sip_profile diagnostic_operation_counters",
};
sh_sand_cmd_t sh_dnx_swstate_algo_tunnel_udp_ports_profile_cmds[] = {
{"dump", sh_dnx_swstate_algo_tunnel_udp_ports_profile_dump_cmd, NULL, dnx_swstate_algo_tunnel_udp_ports_profile_dump_options, &dnx_swstate_algo_tunnel_udp_ports_profile_dump_man},
{"size_get", sh_dnx_swstate_algo_tunnel_udp_ports_profile_size_get_cmd, NULL, dnx_swstate_algo_tunnel_udp_ports_profile_size_get_options, &dnx_swstate_algo_tunnel_udp_ports_profile_size_get_man},
{"diagnostic_operation_counters", sh_dnx_swstate_algo_tunnel_udp_ports_profile_diagnostic_operation_counters_cmd, NULL, dnx_swstate_algo_tunnel_udp_ports_profile_diagnostic_operation_counters_options, &dnx_swstate_algo_tunnel_udp_ports_profile_diagnostic_operation_counters_man},
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_udp_ports_profile_man = {
cmd_dnx_swstate_algo_tunnel_udp_ports_profile_desc,
NULL,
NULL,
NULL,
};
const char cmd_dnx_swstate_algo_tunnel_udp_ports_profile_desc[] = "algo_tunnel_db udp_ports_profile commands";
shr_error_e sh_dnx_swstate_algo_tunnel_udp_ports_profile_dump_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
dnx_sw_state_dump_filters_t filters;
SHR_FUNC_INIT_VARS(unit);
SH_SAND_GET_BOOL("nocontent", filters.nocontent);
SH_SAND_GET_STR("typefilter", filters.typefilter);
SH_SAND_GET_STR("namefilter", filters.namefilter);
SHR_IF_ERR_EXIT(algo_tunnel_db_udp_ports_profile_dump(unit, filters));
if (!filters.nocontent)
{
dnx_sw_state_dump_detach_file(unit);
}
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_udp_ports_profile_dump_options[] = {
{"nocontent", SAL_FIELD_TYPE_BOOL, "Decision if to perform content dump or to print the access tree.", "false"},
{"typefilter", SAL_FIELD_TYPE_STR, "Filter for the variable type.", ""},
{"namefilter", SAL_FIELD_TYPE_STR, "Filter for the variable name.", ""},
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_udp_ports_profile_dump_man = {
"swstate dump",
"print the variable value",
"algo_tunnel_db udp_ports_profile dump []\n",
"algo_tunnel_db udp_ports_profile dump nocontent=false typefilter="" namefilter="" ",
};
shr_error_e sh_dnx_swstate_algo_tunnel_udp_ports_profile_size_get_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
uint32 size = 0;
SHR_FUNC_INIT_VARS(unit);
size = dnx_sw_state_info_size_get(algo_tunnel_db_info[unit], algo_tunnel_db_layout_str, ALGO_TUNNEL_DB_INFO_NOF_ENTRIES, "ALGO_TUNNEL_DB~UDP_PORTS_PROFILE~", 0);
dnx_sw_state_size_print(size);
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_udp_ports_profile_size_get_options[] = {
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_udp_ports_profile_size_get_man = {
"swstate size_get",
"print the variable size",
"algo_tunnel_db udp_ports_profile size_get",
"algo_tunnel_db udp_ports_profile size_get",
};
shr_error_e sh_dnx_swstate_algo_tunnel_udp_ports_profile_diagnostic_operation_counters_cmd(int unit, args_t *args, sh_sand_control_t *sand_control)
{
SHR_FUNC_INIT_VARS(unit);
dnx_sw_state_diagnostic_operation_counters_print(algo_tunnel_db_info[unit], algo_tunnel_db_layout_str, ALGO_TUNNEL_DB_INFO_NOF_ENTRIES, "ALGO_TUNNEL_DB~UDP_PORTS_PROFILE~");
SHR_EXIT();
exit:
SHR_FUNC_EXIT;
}
sh_sand_option_t dnx_swstate_algo_tunnel_udp_ports_profile_diagnostic_operation_counters_options[] = {
{NULL}
};
sh_sand_man_t dnx_swstate_algo_tunnel_udp_ports_profile_diagnostic_operation_counters_man = {
"swstate diagnostic_operation_counters",
"print the diagnostic operation counters",
"algo_tunnel_db udp_ports_profile diagnostic_operation_counters",
"algo_tunnel_db udp_ports_profile diagnostic_operation_counters",
};
#endif /* DNX_SW_STATE_DIAGNOSTIC */
#undef BSL_LOG_MODULE
|
718293.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*-------------------------------------------------------------------------
*
* asetdirect.c
* A specialized implementation of the abstract MemoryContext type,
* which allocates directly from malloc() and does not support pfree().
*
* Portions Copyright (c) 2007-2008, Greenplum inc
* Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "utils/memutils.h"
#include "cdb/cdbptrbuf.h" /* CdbPtrBuf */
/* Define this to detail debug alloc information */
/* #define HAVE_ALLOCINFO */
#ifdef CDB_PALLOC_CALLER_ID
#define CDB_MCXT_WHERE(context) (context)->callerFile, (context)->callerLine
#else
#define CDB_MCXT_WHERE(context) __FILE__, __LINE__
#endif
/*
* AsetDirectContext
*/
typedef struct AsetDirectContext
{
MemoryContextData header; /* standard memory-context fields */
Size size_total; /* total size of all allocated areas */
unsigned narea_total; /* number of allocated areas */
CdbPtrBuf areas; /* collection of allocated area ptrs */
/* variably-sized array, must be last */
CdbPtrBuf_Ptr areaspace[10];
} AsetDirectContext;
#define ASETDIRECTCONTEXT_BYTES(nareaspace) \
(MAXALIGN(SIZEOF_VARSTRUCT(nareaspace, AsetDirectContext, areaspace)))
/*
* These functions implement the MemoryContext API for AsetDirect contexts.
*/
static void *AsetDirectAlloc(MemoryContext context, Size size);
static void AsetDirectInit(MemoryContext context);
static void AsetDirectReset(MemoryContext context);
static void AsetDirectDelete(MemoryContext context);
static bool AsetDirectIsEmpty(MemoryContext context);
static void AsetDirectStats(MemoryContext context, uint64 *nBlocks, uint64 *nChunks,
uint64 *currentAvailable, uint64 *allAllocated, uint64 *allFreed, uint64 *maxHeld);
#ifdef MEMORY_CONTEXT_CHECKING
static void AsetDirectCheck(MemoryContext context);
#endif
/*
* This is the virtual function table for AsetDirect contexts.
*/
static MemoryContextMethods AsetDirectMethods = {
AsetDirectAlloc,
NULL, /* pfree */
NULL, /* repalloc */
AsetDirectInit,
AsetDirectReset,
AsetDirectDelete,
NULL, /* GetChunkSpace */
AsetDirectIsEmpty,
AsetDirectStats,
#ifdef MEMORY_CONTEXT_CHECKING
AsetDirectCheck
#endif
};
/* ----------
* Debug macros
* ----------
*/
#ifdef HAVE_ALLOCINFO
#define AllocAllocInfo(_cxt, _chunk) \
fprintf(stderr, "AsetDirectAlloc: %s: %p, %d\n", \
(_cxt)->header.name, (_chunk), (_cxt)->chunksize)
#else
#define AllocAllocInfo(_cxt, _chunk)
#endif
/*
* Public routines
*/
/*
* AsetDirectContextCreate
* Create a new AsetDirect context.
*
* parent: parent context, or NULL if top-level context
* name: name of context (for debugging --- string will be copied)
*/
MemoryContext
AsetDirectContextCreate(MemoryContext parent, const char *name)
{
AsetDirectContext *set;
Size namesize = MAXALIGN(strlen(name) + 1);
Size allocsize;
Size setsize; /* #bytes to request for new context */
int nareaspace; /* num of slots in areaspace array */
/*
* Determine amount of memory to request for the AsetDirectContext struct.
*
* Assume the total allocation will be rounded up to a power of 2, and
* will include the AsetDirectContext with variably sized 'areaspace' array
* and the context 'name' string. Size the 'areaspace' array to use up any
* extra space in the expected allocation.
*/
allocsize = 1 << ceil_log2_Size(MAXALIGN(sizeof(AsetDirectContext)) + namesize);
nareaspace = VARELEMENTS_TO_FIT(allocsize - namesize, AsetDirectContext, areaspace);
setsize = ASETDIRECTCONTEXT_BYTES(nareaspace);
/*
* Create the new memory context and hook up to parent context.
*/
set = (AsetDirectContext *)MemoryContextCreate(T_AsetDirectContext,
setsize,
&AsetDirectMethods,
parent,
name);
/*
* Initialize empty collection of ptrs to allocated areas.
*/
CdbPtrBuf_Init(&set->areas,
set->areaspace,
nareaspace,
50, /* num_cells_expand */
set->header.parent);
return (MemoryContext)set;
} /* AsetDirectContextCreate */
/*
* AsetDirectInit
* Context-type-specific initialization routine.
*
* This is called by MemoryContextCreate() after setting up the
* generic MemoryContext fields and before linking the new context
* into the context tree. We must do whatever is needed to make the
* new context minimally valid for deletion. We must *not* risk
* failure --- thus, for example, allocating more memory is not cool.
* (AsetDirectContextCreate can allocate memory when it gets control
* back, however.)
*/
static void
AsetDirectInit(MemoryContext context)
{
/*
* Since MemoryContextCreate already zeroed the context node, we don't
* have to do anything here: it's already OK.
*/
} /* AsetDirectInit */
/*
* AsetDirectReset
* Frees all memory which is allocated in the given set.
*/
static void
AsetDirectReset(MemoryContext context)
{
AsetDirectContext *set = (AsetDirectContext *)context;
CdbPtrBuf_Iterator it;
CdbPtrBuf_Ptr *pp;
CdbPtrBuf_Ptr p;
Assert(set && IsA(set, AsetDirectContext));
Assert(CdbPtrBuf_IsOk(&set->areas));
/* Free allocated areas. */
CdbPtrBuf_Iterator_Init(&it, &set->areas);
while (NULL != (pp = CdbPtrBuf_Iterator_NextCell(&it)))
{
p = *pp;
*pp = NULL;
if (p)
{
#ifdef CLOBBER_FREED_MEMORY
/* Wipe first few bytes of freed memory for debugging purposes */
memset(p, 0x7F, MAXALIGN(1)); /* don't know actual size of area */
#endif
free(p);
}
}
/* Empty the 'areas' collection. */
CdbPtrBuf_Reset(&set->areas);
/* Update statistics. */
MemoryContextNoteFree(&set->header, set->size_total);
set->narea_total = 0;
set->size_total = 0;
} /* AsetDirectReset */
/*
* AsetDirectDelete
* Frees all memory which is allocated in the given set,
* in preparation for deletion of the set.
*
* Unlike AsetDirectReset, this *must* free all resources of the set.
* But note we are not responsible for deleting the context node itself.
*/
static void
AsetDirectDelete(MemoryContext context)
{
AsetDirectReset(context);
} /* AsetDirectDelete */
/*
* AsetDirectAlloc
* Returns pointer to allocated memory of given size; memory is added
* to the set.
*/
static void *
AsetDirectAlloc(MemoryContext context, Size size)
{
AsetDirectContext *set = (AsetDirectContext *)context;
CdbPtrBuf_Ptr *pp;
Assert(set && IsA(set, AsetDirectContext));
if (size < MAXALIGN(1))
size = MAXALIGN(1);
/* Obtain a slot in 'areas' collection to point to the new allocation. */
pp = CdbPtrBuf_Append(&set->areas, NULL);
/* Allocate the memory. */
*pp = malloc(size);
if (!*pp)
MemoryContextError(ERRCODE_OUT_OF_MEMORY,
&set->header, CDB_MCXT_WHERE(&set->header),
"Out of memory. Failed on request of size %lu bytes.",
(unsigned long)size);
/* Update statistics. */
set->size_total += size;
set->narea_total++;
MemoryContextNoteAlloc(&set->header, size);
AllocAllocInfo(set, chunk);
return *pp;
} /* AsetDirectAlloc */
/*
* AsetDirectIsEmpty
* Is an allocset empty of any allocated space?
*/
static bool
AsetDirectIsEmpty(MemoryContext context)
{
AsetDirectContext *set = (AsetDirectContext *)context;
return set->narea_total == 0;
} /* AsetDirectIsEmpty */
/*
* AsetDirectStats
* Returns stats about memory consumption of an AsetDirectContext.
*
* Input parameters:
* context: the context of interest
*
* Output parameters:
* nBlocks: number of blocks in the context
* nChunks: number of chunks in the context
*
* currentAvailable: free space across all blocks
*
* allAllocated: total bytes allocated during lifetime (including
* blocks that was dropped later on, e.g., freeing a large chunk
* in an exclusive block would drop the block)
*
* allFreed: total bytes that was freed during lifetime
* maxHeld: maximum bytes held during lifetime
*/
static void
AsetDirectStats(MemoryContext context, uint64 *nBlocks, uint64 *nChunks,
uint64 *currentAvailable, uint64 *allAllocated, uint64 *allFreed, uint64 *maxHeld)
{
AsetDirectContext *set = (AsetDirectContext *)context;
Assert(set && IsA(set, AsetDirectContext));
*nBlocks = 0;
*nChunks = set->narea_total;
*currentAvailable = 0;
*allAllocated = set->header.allBytesAlloc;
*allFreed = set->header.allBytesFreed;
*maxHeld = set->header.maxBytesHeld;
}
#ifdef MEMORY_CONTEXT_CHECKING
/*
* AsetDirectCheck
* Walk through chunks and check consistency of memory.
*
* NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
* find yourself in an infinite loop when trouble occurs, because this
* routine will be entered again when elog cleanup tries to release memory!
*/
static void
AsetDirectCheck(MemoryContext context)
{
AsetDirectContext *set = (AsetDirectContext *)context;
const char *name = set->header.name;
if (!IsA(set, AsetDirectContext))
elog(WARNING, "problem in alloc set %s: type=%d",
name, set->header.type);
else if (!CdbPtrBuf_IsOk(&set->areas))
elog(WARNING, "problem in alloc set %s: CdbPtrBuf error",
name);
else if (set->narea_total < 0 ||
set->narea_total > CdbPtrBuf_Length(&set->areas))
elog(WARNING, "problem in alloc set %s: narea=%d",
name, set->narea_total);
} /* AsetDirectCheck */
#endif /* MEMORY_CONTEXT_CHECKING */
|
219946.c | /****************************/
/* THIS IS OPEN SOURCE CODE */
/****************************/
/**
* @file linux-lustre.c
* @author Haihang You (in collaboration with Michael Kluge, TU Dresden)
* [email protected]
* @author Heike Jagode
* [email protected]
* @author Vince Weaver
* [email protected]
* @brief A component for the luster filesystem.
*/
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#include <stdint.h>
#include <ctype.h>
#include "papi.h"
#include "papi_internal.h"
#include "papi_vector.h"
#include "papi_memory.h"
/** describes a single counter with its properties */
typedef struct counter_info_struct
{
int idx;
char *name;
char *description;
char *unit;
unsigned long long value;
} counter_info;
typedef struct
{
int count;
char **data;
} string_list;
/** describes the infos collected from a mounted Lustre filesystem */
typedef struct lustre_fs_struct
{
char *proc_file;
char *proc_file_readahead;
counter_info *write_cntr;
counter_info *read_cntr;
counter_info *readahead_cntr;
struct lustre_fs_struct *next;
} lustre_fs;
#define LUSTRE_MAX_COUNTERS 100
#define LUSTRE_MAX_COUNTER_TERMS LUSTRE_MAX_COUNTERS
typedef counter_info LUSTRE_register_t;
typedef counter_info LUSTRE_native_event_entry_t;
typedef counter_info LUSTRE_reg_alloc_t;
typedef struct LUSTRE_control_state
{
long long start_count[LUSTRE_MAX_COUNTERS];
long long current_count[LUSTRE_MAX_COUNTERS];
long long difference[LUSTRE_MAX_COUNTERS];
int which_counter[LUSTRE_MAX_COUNTERS];
int num_events;
} LUSTRE_control_state_t;
typedef struct LUSTRE_context
{
LUSTRE_control_state_t state;
} LUSTRE_context_t;
/* Default path to lustre stats */
#ifdef FAKE_LUSTRE
const char proc_base_path[] = "./components/lustre/fake_proc/fs/lustre/";
#else
const char proc_base_path[] = "/proc/fs/lustre/";
#endif
static counter_info **lustre_native_table = NULL;
static int num_events = 0;
static int table_size = 32;
/* mount Lustre fs are kept in a list */
static lustre_fs *root_lustre_fs = NULL;
papi_vector_t _lustre_vector;
/******************************************************************************
******** BEGIN FUNCTIONS USED INTERNALLY SPECIFIC TO THIS COMPONENT ********
*****************************************************************************/
static int resize_native_table() {
SUBDBG("ENTER:\n");
counter_info** new_table;
int new_size = table_size*2;
new_table = (counter_info**)papi_calloc(new_size, sizeof(counter_info*));
if (NULL==new_table) {
SUBDBG("EXIT: PAPI_ENOMEM\n");
return PAPI_ENOMEM;
}
if ( lustre_native_table) {
memcpy(new_table, lustre_native_table, sizeof(counter_info*) * table_size );
papi_free(lustre_native_table);
}
lustre_native_table = new_table;
table_size*=2;
SUBDBG("EXIT: PAPI_OK\n");
return PAPI_OK;
}
/**
* add a counter to the list of available counters
* @param name the short name of the counter
* @param desc a longer description
* @param unit the unit for this counter
*/
static counter_info *
addCounter( const char *name, const char *desc, const char *unit )
{
SUBDBG("ENTER: name: %s, desc: %s, unit: %s\n", name, desc, unit);
counter_info *cntr;
if ( num_events >= table_size )
if (PAPI_OK != resize_native_table()) {
SUBDBG("EXIT: can not resize native table\n" );
return NULL;
}
cntr = malloc( sizeof ( counter_info ) );
if ( cntr == NULL ) {
SUBDBG("EXIT: can not allocate memory for new counter\n" );
return NULL;
}
cntr->idx=num_events;
cntr->name = strdup( name );
cntr->description = strdup( desc );
cntr->unit = strdup( unit );
cntr->value = 0;
lustre_native_table[num_events]=cntr;
num_events++;
SUBDBG("EXIT: cntr: %p\n", cntr);
return cntr;
}
/**
* adds a Lustre fs to the fs list and creates the counters for it
* @param name fs name
* @param procpath_general path to the 'stats' file in /proc/fs/lustre/... for this fs
* @param procpath_readahead path to the 'readahead' file in /proc/fs/lustre/... for this fs
*/
static int
addLustreFS( const char *name,
const char *procpath_general,
const char *procpath_readahead )
{
lustre_fs *fs, *last;
char counter_name[512];
FILE *fff;
SUBDBG("Adding lustre fs\n");
fs = malloc( sizeof ( lustre_fs ) );
if ( fs == NULL ) {
SUBDBG("can not allocate memory for new Lustre FS description\n" );
return PAPI_ENOMEM;
}
fs->proc_file=strdup(procpath_general);
fff = fopen( procpath_general, "r" );
if ( fff == NULL ) {
SUBDBG("can not open '%s'\n", procpath_general );
free(fs);
return PAPI_ESYS;
}
fclose(fff);
fs->proc_file_readahead = strdup(procpath_readahead);
fff = fopen( procpath_readahead, "r" );
if ( fff == NULL ) {
SUBDBG("can not open '%s'\n", procpath_readahead );
free(fs);
return PAPI_ESYS;
}
fclose(fff);
sprintf( counter_name, "%s_llread", name );
if (NULL == (fs->read_cntr = addCounter( counter_name,
"bytes read on this lustre client",
"bytes" ))) {
free(fs);
return PAPI_ENOMEM;
}
sprintf( counter_name, "%s_llwrite", name );
if ( NULL == (fs->write_cntr = addCounter( counter_name,
"bytes written on this lustre client",
"bytes" ))) {
free(fs->read_cntr);
free(fs);
return PAPI_ENOMEM;
}
sprintf( counter_name, "%s_wrong_readahead", name );
if ( NULL == (fs->readahead_cntr = addCounter( counter_name,
"bytes read but discarded due to readahead",
"bytes" ))) {
free(fs->read_cntr);
free(fs->write_cntr);
free(fs);
return PAPI_ENOMEM;
}
fs->next = NULL;
/* Insert into the linked list */
/* Does this need locking? */
if ( root_lustre_fs == NULL ) {
root_lustre_fs = fs;
} else {
last = root_lustre_fs;
while ( last->next != NULL )
last = last->next;
last->next = fs;
}
return PAPI_OK;
}
/**
* goes through proc and tries to discover all mounted Lustre fs
*/
static int
init_lustre_counters( void )
{
SUBDBG("ENTER:\n");
char lustre_dir[PATH_MAX];
char path[PATH_MAX];
char path_readahead[PATH_MAX],path_stats[PATH_MAX];
char *ptr;
char fs_name[100];
int found_luster_fs = 0;
int idx = 0;
int tmp_fd;
DIR *proc_dir;
struct dirent *entry;
sprintf(lustre_dir,"%s/llite",proc_base_path);
proc_dir = opendir( lustre_dir );
if ( proc_dir == NULL ) {
SUBDBG("EXIT: PAPI_ESYS (Cannot open %s)\n",lustre_dir);
return PAPI_ESYS;
}
while ( (entry = readdir( proc_dir )) != NULL ) {
memset( path, 0, PATH_MAX );
snprintf( path, PATH_MAX - 1, "%s/%s/stats", lustre_dir,
entry->d_name );
SUBDBG("checking for file %s\n", path);
if ( ( tmp_fd = open( path, O_RDONLY ) ) == -1 ) {
SUBDBG("Path: %s, can not be opened.\n", path);
continue;
}
close( tmp_fd );
/* erase \r and \n at the end of path */
/* why is this necessary? */
idx = strlen( path );
idx--;
while ( path[idx] == '\r' || path[idx] == '\n' )
path[idx--] = 0;
/* Lustre paths are of type server-UUID */
idx = 0;
ptr = strstr(path,"llite/") + 6;
if (ptr == NULL) {
SUBDBG("Path: %s, missing llite directory, performance event not created.\n", path);
continue;
}
strncpy(fs_name, ptr, sizeof(fs_name)-1);
fs_name[sizeof(fs_name)-1] = '\0';
SUBDBG("found Lustre FS: %s\n", fs_name);
snprintf( path_stats, PATH_MAX - 1,
"%s/%s/stats",
lustre_dir,
entry->d_name );
SUBDBG("Found file %s\n", path_stats);
snprintf( path_readahead, PATH_MAX - 1,
"%s/%s/read_ahead_stats",
lustre_dir,
entry->d_name );
SUBDBG("Now checking for file %s\n", path_readahead);
strcpy( ptr, "read_ahead_stats" );
addLustreFS( fs_name, path_stats, path_readahead );
found_luster_fs++;
}
closedir( proc_dir );
if (found_luster_fs == 0) {
SUBDBG("EXIT: PAPI_ESYS (No luster file systems found)\n");
return PAPI_ESYS;
}
SUBDBG("EXIT: PAPI_OK\n");
return PAPI_OK;
}
/**
* updates all Lustre related counters
*/
static void
read_lustre_counter( )
{
lustre_fs *fs = root_lustre_fs;
FILE *fff;
char buffer[BUFSIZ];
while ( fs != NULL ) {
/* read values from stats file */
fff=fopen(fs->proc_file,"r" );
if (fff != NULL) {
while(1) {
if (fgets(buffer,BUFSIZ,fff)==NULL) break;
if (strstr( buffer, "write_bytes" )) {
sscanf(buffer,"%*s %*d %*s %*s %*d %*d %llu",&fs->write_cntr->value);
SUBDBG("Read %llu write_bytes\n",fs->write_cntr->value);
}
if (strstr( buffer, "read_bytes" )) {
sscanf(buffer,"%*s %*d %*s %*s %*d %*d %llu",&fs->read_cntr->value);
SUBDBG("Read %llu read_bytes\n",fs->read_cntr->value);
}
}
fclose(fff);
}
fff=fopen(fs->proc_file_readahead,"r");
if (fff != NULL) {
while(1) {
if (fgets(buffer,BUFSIZ,fff)==NULL) break;
if (strstr( buffer, "read but discarded")) {
sscanf(buffer,"%*s %*s %*s %llu",&fs->readahead_cntr->value);
SUBDBG("Read %llu discared\n",fs->readahead_cntr->value);
break;
}
}
fclose(fff);
}
fs = fs->next;
}
}
/**
* frees all allocated resources
*/
static void
host_finalize( void )
{
int i;
lustre_fs *fs, *next_fs;
counter_info *cntr;
for(i=0;i<num_events;i++) {
cntr=lustre_native_table[i];
if ( cntr != NULL ) {
free( cntr->name );
free( cntr->description );
free( cntr->unit );
free( cntr );
}
lustre_native_table[i]=NULL;
}
fs = root_lustre_fs;
while ( fs != NULL ) {
next_fs = fs->next;
free(fs->proc_file);
free(fs->proc_file_readahead);
free( fs );
fs = next_fs;
}
root_lustre_fs = NULL;
}
/*****************************************************************************
******************* BEGIN PAPI's COMPONENT REQUIRED FUNCTIONS *************
*****************************************************************************/
/*
* Component setup and shutdown
*/
static int
_lustre_init_component( int cidx )
{
SUBDBG("ENTER:\n");
int ret = PAPI_OK;
resize_native_table();
ret=init_lustre_counters();
if (ret!=PAPI_OK) {
strncpy(_lustre_vector.cmp_info.disabled_reason,
"No lustre filesystems found",PAPI_MAX_STR_LEN);
SUBDBG("EXIT: ret: %d\n", ret);
return ret;
}
_lustre_vector.cmp_info.num_native_events=num_events;
_lustre_vector.cmp_info.CmpIdx = cidx;
SUBDBG("EXIT: ret: %d\n", ret);
return ret;
}
/*
* This is called whenever a thread is initialized
*/
static int
_lustre_init_thread( hwd_context_t * ctx )
{
(void) ctx;
return PAPI_OK;
}
/*
*
*/
static int
_lustre_shutdown_component( void )
{
SUBDBG("ENTER:\n");
host_finalize( );
papi_free( lustre_native_table );
lustre_native_table = NULL;
num_events = 0;
table_size = 32;
SUBDBG("EXIT:\n");
return PAPI_OK;
}
/*
*
*/
static int
_lustre_shutdown_thread( hwd_context_t * ctx )
{
( void ) ctx;
return PAPI_OK;
}
/*
* Control of counters (Reading/Writing/Starting/Stopping/Setup) functions
*/
static int
_lustre_init_control_state( hwd_control_state_t *ctl )
{
LUSTRE_control_state_t *lustre_ctl = (LUSTRE_control_state_t *)ctl;
memset(lustre_ctl->start_count,0,sizeof(long long)*LUSTRE_MAX_COUNTERS);
memset(lustre_ctl->current_count,0,sizeof(long long)*LUSTRE_MAX_COUNTERS);
return PAPI_OK;
}
/*
*
*/
static int
_lustre_update_control_state( hwd_control_state_t *ctl,
NativeInfo_t *native,
int count,
hwd_context_t *ctx )
{
SUBDBG("ENTER: ctl: %p, native: %p, count: %d, ctx: %p\n", ctl, native, count, ctx);
LUSTRE_control_state_t *lustre_ctl = (LUSTRE_control_state_t *)ctl;
( void ) ctx;
int i, index;
for ( i = 0; i < count; i++ ) {
index = native[i].ni_event;
lustre_ctl->which_counter[i]=index;
native[i].ni_position = i;
}
lustre_ctl->num_events=count;
SUBDBG("EXIT: PAPI_OK\n");
return PAPI_OK;
}
/*
*
*/
static int
_lustre_start( hwd_context_t *ctx, hwd_control_state_t *ctl )
{
( void ) ctx;
LUSTRE_control_state_t *lustre_ctl = (LUSTRE_control_state_t *)ctl;
int i;
read_lustre_counter( );
for(i=0;i<lustre_ctl->num_events;i++) {
lustre_ctl->current_count[i]=
lustre_native_table[lustre_ctl->which_counter[i]]->value;
}
memcpy( lustre_ctl->start_count,
lustre_ctl->current_count,
LUSTRE_MAX_COUNTERS * sizeof ( long long ) );
return PAPI_OK;
}
/*
*
*/
static int
_lustre_stop( hwd_context_t *ctx, hwd_control_state_t *ctl )
{
(void) ctx;
LUSTRE_control_state_t *lustre_ctl = (LUSTRE_control_state_t *)ctl;
int i;
read_lustre_counter( );
for(i=0;i<lustre_ctl->num_events;i++) {
lustre_ctl->current_count[i]=
lustre_native_table[lustre_ctl->which_counter[i]]->value;
}
return PAPI_OK;
}
/*
*
*/
static int
_lustre_read( hwd_context_t *ctx, hwd_control_state_t *ctl,
long long **events, int flags )
{
(void) ctx;
( void ) flags;
LUSTRE_control_state_t *lustre_ctl = (LUSTRE_control_state_t *)ctl;
int i;
read_lustre_counter( );
for(i=0;i<lustre_ctl->num_events;i++) {
lustre_ctl->current_count[i]=
lustre_native_table[lustre_ctl->which_counter[i]]->value;
lustre_ctl->difference[i]=lustre_ctl->current_count[i]-
lustre_ctl->start_count[i];
}
*events = lustre_ctl->difference;
return PAPI_OK;
}
/*
*
*/
static int
_lustre_reset( hwd_context_t * ctx, hwd_control_state_t * ctrl )
{
/* re-initializes counter_start values to current */
_lustre_start(ctx,ctrl);
return PAPI_OK;
}
/*
* Unused lustre write function
*/
/* static int */
/* _lustre_write( hwd_context_t * ctx, hwd_control_state_t * ctrl, long long *from ) */
/* { */
/* ( void ) ctx; */
/* ( void ) ctrl; */
/* ( void ) from; */
/* return PAPI_OK; */
/* } */
/*
* Functions for setting up various options
*/
/* This function sets various options in the component
* The valid codes being passed in are PAPI_SET_DEFDOM,
* PAPI_SET_DOMAIN, PAPI_SETDEFGRN, PAPI_SET_GRANUL * and PAPI_SET_INHERIT
*/
static int
_lustre_ctl( hwd_context_t * ctx, int code, _papi_int_option_t * option )
{
( void ) ctx;
( void ) code;
( void ) option;
return PAPI_OK;
}
/*
* This function can be used to set the event set level domains
* where the events should be counted. In particular: PAPI_DOM_USER,
* PAPI_DOM_KERNEL PAPI_DOM_OTHER. But the lustre component does not
* provide a field in its control_state (LUSTRE_control_state_t) to
* save this information. It would also need some way to control when
* the counts get updated in order to support domain filters for
* event counting.
*
* So we just ignore this call.
*/
static int
_lustre_set_domain( hwd_control_state_t * cntrl, int domain )
{
( void ) cntrl;
( void ) domain;
SUBDBG("ENTER: \n");
// this component does not allow limiting which domains will increment event counts
SUBDBG("EXIT: PAPI_OK\n");
return PAPI_OK;
}
/*
*
*/
static int
_lustre_ntv_code_to_name( unsigned int EventCode, char *name, int len )
{
SUBDBG("ENTER: EventCode: %#x, name: %p, len: %d\n", EventCode, name, len);
int event=EventCode;
if (event >=0 && event < num_events) {
strncpy( name, lustre_native_table[event]->name, len-1 );
name[len-1] = '\0';
SUBDBG("EXIT: event name: %s\n", name);
return PAPI_OK;
}
SUBDBG("EXIT: PAPI_ENOEVNT\n");
return PAPI_ENOEVNT;
}
/*
*
*/
static int
_lustre_ntv_code_to_descr( unsigned int EventCode, char *name, int len )
{
SUBDBG("ENTER: EventCode: %#x, name: %p, len: %d\n", EventCode, name, len);
int event=EventCode;
if (event >=0 && event < num_events) {
strncpy( name, lustre_native_table[event]->description, len-1 );
name[len-1] = '\0';
SUBDBG("EXIT: description: %s\n", name);
return PAPI_OK;
}
SUBDBG("EXIT: PAPI_ENOEVNT\n");
return PAPI_ENOEVNT;
}
/*
*
*/
static int
_lustre_ntv_enum_events( unsigned int *EventCode, int modifier )
{
SUBDBG("ENTER: EventCode: %p, modifier: %d\n", EventCode, modifier);
if ( modifier == PAPI_ENUM_FIRST ) {
if (num_events==0) return PAPI_ENOEVNT;
*EventCode = 0;
SUBDBG("EXIT: *EventCode: %#x\n", *EventCode);
return PAPI_OK;
}
if ( modifier == PAPI_ENUM_EVENTS ) {
int index = *EventCode;
if ((index+1 < num_events) && lustre_native_table[index + 1]) {
*EventCode = *EventCode + 1;
SUBDBG("EXIT: *EventCode: %#x\n", *EventCode);
return PAPI_OK;
} else {
SUBDBG("EXIT: PAPI_ENOEVNT\n");
return PAPI_ENOEVNT;
}
}
SUBDBG("EXIT: PAPI_EINVAL\n");
return PAPI_EINVAL;
}
/*
*
*/
papi_vector_t _lustre_vector = {
.cmp_info = {
/* component information (unspecified values initialized to 0) */
.name = "lustre",
.short_name = "lustre",
.version = "1.9",
.description = "Lustre filesystem statistics",
.num_mpx_cntrs = LUSTRE_MAX_COUNTERS,
.num_cntrs = LUSTRE_MAX_COUNTERS,
.default_domain = PAPI_DOM_ALL,
.default_granularity = PAPI_GRN_SYS,
.available_granularities = PAPI_GRN_SYS,
.hardware_intr_sig = PAPI_INT_SIGNAL,
/* component specific cmp_info initializations */
.fast_real_timer = 0,
.fast_virtual_timer = 0,
.attach = 0,
.attach_must_ptrace = 0,
.available_domains = PAPI_DOM_ALL,
},
/* sizes of framework-opaque component-private structures */
.size = {
.context = sizeof ( LUSTRE_context_t ),
.control_state = sizeof ( LUSTRE_control_state_t ),
.reg_value = sizeof ( LUSTRE_register_t ),
.reg_alloc = sizeof ( LUSTRE_reg_alloc_t ),
},
/* function pointers in this component */
.init_thread = _lustre_init_thread,
.init_component = _lustre_init_component,
.init_control_state = _lustre_init_control_state,
.start = _lustre_start,
.stop = _lustre_stop,
.read = _lustre_read,
.shutdown_thread = _lustre_shutdown_thread,
.shutdown_component = _lustre_shutdown_component,
.ctl = _lustre_ctl,
.update_control_state = _lustre_update_control_state,
.set_domain = _lustre_set_domain,
.reset = _lustre_reset,
.ntv_enum_events = _lustre_ntv_enum_events,
.ntv_code_to_name = _lustre_ntv_code_to_name,
.ntv_code_to_descr = _lustre_ntv_code_to_descr,
};
|
378371.c | #include "params.h"
#include "reduce.h"
#include "ntt.h"
#ifndef BENCH_WARM
#define BENCH_WARM 100
#endif
#ifndef BENCH_LOOPS
#define BENCH_LOOPS 1
#endif
#ifndef BENCH_CYCLES
#define BENCH_CYCLES 100000
#endif
#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <string.h>
typedef struct
{
uint64_t *list;
size_t size;
size_t i;
uint64_t p25;
uint64_t median;
uint64_t p75;
}bench;
inline uint64_t bench_get_time(){
unsigned int h, l;
__asm__ __volatile__ ("rdtsc" : "=a" (l), "=d" (h));
return ((uint64_t) h << 32) | l;
}
static int bench_u64_cmp(const void *left, const void *right)
{
if (*(const uint64_t *)left > *(const uint64_t *)right) return 1;
if (*(const uint64_t *)left < *(const uint64_t *)right) return -1;
return 0;
}
static int bench_init(bench **m, size_t n)
{
assert(m);
*m = (bench*) calloc(1, sizeof(bench));
assert(*m);
(*m)->list = (uint64_t*) calloc(n, sizeof(uint64_t));
assert((*m)->list);
(*m)->size = n;
(*m)->i = 0;
return 0;
}
static int bench_destroy(bench **m)
{
assert(m && *m);
free((*m)->list);
free((*m));
*m = NULL;
return 0;
}
static int bench_add(bench *m, uint64_t start, uint64_t end)
{
assert(m);
if(m->i >= m->size)
return -1;
m->list[m->i] = (end - start);
m->i++;
return 0;
}
static int bench_set_median(bench *m)
{
assert(m && m->list);
qsort(m->list, m->i, sizeof(uint64_t), bench_u64_cmp);
m->p25 = m->list[m->i >> 2];
m->median = m->list[m->i >> 1];
m->p75 = m->list[m->i - (m->i >> 2)];
return 0;
}
static int bench_reset(bench *m)
{
assert(m);
memset(m->list, 0, sizeof(uint64_t) * m->size);
m->i = 0;
m->p25 = 0;
m->median = 0;
m->p75 = 0;
return 0;
}
static int bench_print_header(FILE *f)
{
fprintf(f, "run,percentil25,percentil75,median\n");
fflush(f);
return 0;
}
static int bench_print(bench *m, FILE *f, size_t n)
{
fprintf(f, "%zu,%" PRIu64 ",%" PRIu64 ",%" PRIu64 "\n", n, m->p25, m->p75, m->median);
fflush(f);
return 0;
}
#define KYBER_N 256
#define KYBER_Q 7681
const uint64_t zetas_mil[KYBER_N] = {
990, 7427, 2634, 6819, 578, 3281, 2143, 1095, 484, 6362, 3336, 5382, 6086, 3823, 877, 5656,
3583, 7010, 6414, 263, 1285, 291, 7143, 7338, 1581, 5134, 5184, 5932, 4042, 5775, 2468, 3,
606, 729, 5383, 962, 3240, 7548, 5129, 7653, 5929, 4965, 2461, 641, 1584, 2666, 1142, 157,
7407, 5222, 5602, 5142, 6140, 5485, 4931, 1559, 2085, 5284, 2056, 3538, 7269, 3535, 7190, 1957,
3465, 6792, 1538, 4664, 2023, 7643, 3660, 7673, 1694, 6905, 3995, 3475, 5939, 1859, 6910, 4434,
1019, 1492, 7087, 4761, 657, 4859, 5798, 2640, 1693, 2607, 2782, 5400, 6466, 1010, 957, 3851,
2121, 6392, 7319, 3367, 3659, 3375, 6430, 7583, 1549, 5856, 4773, 6084, 5544, 1650, 3997, 4390,
6722, 2915, 4245, 2635, 6128, 7676, 5737, 1616, 3457, 3132, 7196, 4702, 6239, 851, 2122, 3009,
7613, 7295, 2007, 323, 5112, 3716, 2289, 6442, 6965, 2713, 7126, 3401, 963, 6596, 607, 5027,
7078, 4484, 5937, 944, 2860, 2680, 5049, 1777, 5850, 3387, 6487, 6777, 4812, 4724, 7077, 186,
6848, 6793, 3463, 5877, 1174, 7116, 3077, 5945, 6591, 590, 6643, 1337, 6036, 3991, 1675, 2053,
6055, 1162, 1679, 3883, 4311, 2106, 6163, 4486, 6374, 5006, 4576, 4288, 5180, 4102, 282, 6119,
7443, 6330, 3184, 4971, 2530, 5325, 4171, 7185, 5175, 5655, 1898, 382, 7211, 43, 5965, 6073,
1730, 332, 1577, 3304, 2329, 1699, 6150, 2379, 5113, 333, 3502, 4517, 1480, 1172, 5567, 651,
925, 4573, 599, 1367, 4109, 1863, 6929, 1605, 3866, 2065, 4048, 839, 5764, 2447, 2022, 3345,
1990, 4067, 2036, 2069, 3567, 7371, 2368, 339, 6947, 2159, 654, 7327, 2768, 6676, 987, 2214};
extern void ntt_mil(uint64_t *p, const uint64_t *zetas);
uint16_t a[256] __attribute__((aligned(32)));
uint64_t b[256];
int main()
{
int trash;
FILE* urandom;
/* Initialize a and b with random coefficients */
urandom = fopen("/dev/urandom", "r");
trash=fread(a, 2, KYBER_N, urandom);
for(int i=0;i<KYBER_N;i++)
{
a[i] %= KYBER_Q;
b[i] = a[i];
}
fclose(urandom);
uint64_t __c__, __l__, __start__, __end__;
bench *__b__;
bench_init(&__b__, BENCH_CYCLES);
for(__c__=0; __c__<BENCH_WARM; __c__++)
{ ntt_mil(b,zetas_mil); }
bench_print_header(stdout);
for(__l__=0; __l__<BENCH_LOOPS; __l__++)
{ bench_reset(__b__);
for(__c__=0; __c__<BENCH_CYCLES; __c__++)
{ __start__ = bench_get_time();
ntt_mil(b,zetas_mil);
__end__ = bench_get_time();
bench_add(__b__,__start__,__end__);
}
bench_set_median(__b__);
bench_print(__b__,stdout,__l__);
}
bench_init(&__b__, BENCH_CYCLES);
for(__c__=0; __c__<BENCH_WARM; __c__++)
{ ntt(a,zetas_exp); }
bench_print_header(stdout);
for(__l__=0; __l__<BENCH_LOOPS; __l__++)
{ bench_reset(__b__);
for(__c__=0; __c__<BENCH_CYCLES; __c__++)
{ __start__ = bench_get_time();
ntt(a,zetas_exp);
__end__ = bench_get_time();
bench_add(__b__,__start__,__end__);
}
bench_set_median(__b__);
bench_print(__b__,stdout,__l__);
}
return 0;
}
|
637628.c | /*
* TP-LINK TL-WR741ND v4/TL-MR3220 v2 board support
*
* Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "dev-eth.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-m25p80.h"
#include "dev-usb.h"
#include "dev-wmac.h"
#include "machtypes.h"
#define TL_WR741NDV4_GPIO_BTN_RESET 11
#define TL_WR741NDV4_GPIO_BTN_WPS 26
#define TL_WR741NDV4_GPIO_LED_WLAN 0
#define TL_WR741NDV4_GPIO_LED_QSS 1
#define TL_WR741NDV4_GPIO_LED_WAN 13
#define TL_WR741NDV4_GPIO_LED_LAN1 14
#define TL_WR741NDV4_GPIO_LED_LAN2 15
#define TL_WR741NDV4_GPIO_LED_LAN3 16
#define TL_WR741NDV4_GPIO_LED_LAN4 17
#define TL_WR741NDV4_GPIO_LED_SYSTEM 27
#define TL_MR3220V2_GPIO_BTN_WPS 11
#define TL_MR3220V2_GPIO_BTN_WIFI 24
#define TL_MR3220V2_GPIO_LED_3G 26
#define TL_MR3220V2_GPIO_USB_POWER 8
#define TL_WR741NDV4_KEYS_POLL_INTERVAL 20 /* msecs */
#define TL_WR741NDV4_KEYS_DEBOUNCE_INTERVAL (3 * TL_WR741NDV4_KEYS_POLL_INTERVAL)
static const char *tl_wr741ndv4_part_probes[] = {
"tp-link",
NULL,
};
static struct flash_platform_data tl_wr741ndv4_flash_data = {
.part_probes = tl_wr741ndv4_part_probes,
};
static struct gpio_led tl_wr741ndv4_leds_gpio[] __initdata = {
{
.name = "tp-link:green:lan1",
.gpio = TL_WR741NDV4_GPIO_LED_LAN1,
.active_low = 0,
}, {
.name = "tp-link:green:lan2",
.gpio = TL_WR741NDV4_GPIO_LED_LAN2,
.active_low = 0,
}, {
.name = "tp-link:green:lan3",
.gpio = TL_WR741NDV4_GPIO_LED_LAN3,
.active_low = 0,
}, {
.name = "tp-link:green:lan4",
.gpio = TL_WR741NDV4_GPIO_LED_LAN4,
.active_low = 1,
}, {
.name = "tp-link:green:qss",
.gpio = TL_WR741NDV4_GPIO_LED_QSS,
.active_low = 0,
}, {
.name = "tp-link:green:system",
.gpio = TL_WR741NDV4_GPIO_LED_SYSTEM,
.active_low = 1,
}, {
.name = "tp-link:green:wan",
.gpio = TL_WR741NDV4_GPIO_LED_WAN,
.active_low = 0,
}, {
.name = "tp-link:green:wlan",
.gpio = TL_WR741NDV4_GPIO_LED_WLAN,
.active_low = 0,
}, {
/* the 3G LED is only present on the MR3220 v2 */
.name = "tp-link:green:3g",
.gpio = TL_MR3220V2_GPIO_LED_3G,
.active_low = 0,
},
};
static struct gpio_keys_button tl_wr741ndv4_gpio_keys[] __initdata = {
{
.desc = "reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = TL_WR741NDV4_KEYS_DEBOUNCE_INTERVAL,
.gpio = TL_WR741NDV4_GPIO_BTN_RESET,
.active_low = 0,
}, {
.desc = "WPS",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = TL_WR741NDV4_KEYS_DEBOUNCE_INTERVAL,
.gpio = TL_WR741NDV4_GPIO_BTN_WPS,
.active_low = 0,
}
};
static struct gpio_keys_button tl_mr3220v2_gpio_keys[] __initdata = {
{
.desc = "WPS",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = TL_WR741NDV4_KEYS_DEBOUNCE_INTERVAL,
.gpio = TL_MR3220V2_GPIO_BTN_WPS,
.active_low = 0,
}, {
.desc = "WIFI button",
.type = EV_KEY,
.code = KEY_RFKILL,
.debounce_interval = TL_WR741NDV4_KEYS_DEBOUNCE_INTERVAL,
.gpio = TL_MR3220V2_GPIO_BTN_WIFI,
.active_low = 0,
}
};
static void __init tl_ap121_setup(void)
{
u8 *mac = (u8 *) KSEG1ADDR(0x1f01fc00);
u8 *ee = (u8 *) KSEG1ADDR(0x1fff1000);
ath79_setup_ar933x_phy4_switch(true, true);
ath79_gpio_function_disable(AR933X_GPIO_FUNC_ETH_SWITCH_LED0_EN |
AR933X_GPIO_FUNC_ETH_SWITCH_LED1_EN |
AR933X_GPIO_FUNC_ETH_SWITCH_LED2_EN |
AR933X_GPIO_FUNC_ETH_SWITCH_LED3_EN |
AR933X_GPIO_FUNC_ETH_SWITCH_LED4_EN);
ath79_register_m25p80(&tl_wr741ndv4_flash_data);
ath79_init_mac(ath79_eth0_data.mac_addr, mac, 1);
ath79_init_mac(ath79_eth1_data.mac_addr, mac, -1);
ath79_register_mdio(0, 0x0);
ath79_register_eth(1);
ath79_register_eth(0);
ath79_register_wmac(ee, mac);
}
static void __init tl_wr741ndv4_setup(void)
{
tl_ap121_setup();
ath79_register_leds_gpio(-1, ARRAY_SIZE(tl_wr741ndv4_leds_gpio) - 1,
tl_wr741ndv4_leds_gpio);
ath79_register_gpio_keys_polled(1, TL_WR741NDV4_KEYS_POLL_INTERVAL,
ARRAY_SIZE(tl_wr741ndv4_gpio_keys),
tl_wr741ndv4_gpio_keys);
}
MIPS_MACHINE(ATH79_MACH_TL_WR741ND_V4, "TL-WR741ND-v4",
"TP-LINK TL-WR741ND v4", tl_wr741ndv4_setup);
static void __init tl_mr3220v2_setup(void)
{
tl_ap121_setup();
gpio_request_one(TL_MR3220V2_GPIO_USB_POWER,
GPIOF_OUT_INIT_HIGH | GPIOF_EXPORT_DIR_FIXED,
"USB power");
ath79_register_usb();
ath79_register_leds_gpio(-1, ARRAY_SIZE(tl_wr741ndv4_leds_gpio),
tl_wr741ndv4_leds_gpio);
ath79_register_gpio_keys_polled(1, TL_WR741NDV4_KEYS_POLL_INTERVAL,
ARRAY_SIZE(tl_mr3220v2_gpio_keys),
tl_mr3220v2_gpio_keys);
}
MIPS_MACHINE(ATH79_MACH_TL_MR3220_V2, "TL-MR3220-v2",
"TP-LINK TL-MR3220 v2", tl_mr3220v2_setup);
|
138123.c | /* manage endpoint /getData/ functions */
#include "dataApi.h"
static unsigned wigTableDataOutput(struct jsonWrite *jw, char *database,
char *table, char *chrom, int start, int end, unsigned itemsDone)
/* output wiggle data from the given table and specified chrom:start-end */
{
struct wiggleDataStream *wds = wiggleDataStreamNew();
wds->setMaxOutput(wds, maxItemsOutput);
wds->setChromConstraint(wds, chrom);
wds->setPositionConstraint(wds, start, end);
int operations = wigFetchAscii;
(void) wds->getData(wds, database, table, operations);
struct wigAsciiData *el;
unsigned itemCount = 0;
for (el = wds->ascii; (itemCount + itemsDone) < maxItemsOutput && el; el = el->next)
{
unsigned span = el->span;
unsigned count = el->count;
unsigned i = 0;
struct asciiDatum *data = el->data;
for ( ; (i < count) && ((itemCount + itemsDone) < maxItemsOutput); i++,data++)
{
int s = data->chromStart;
int e = s + span;
double val = data->value;
if (jsonOutputArrays)
{
jsonWriteListStart(jw, NULL);
jsonWriteNumber(jw, NULL, (long long)s);
jsonWriteNumber(jw, NULL, (long long)e);
jsonWriteDouble(jw, NULL, val);
jsonWriteListEnd(jw);
}
else
{
jsonWriteObjectStart(jw, NULL);
jsonWriteNumber(jw, "start", (long long)s);
jsonWriteNumber(jw, "end", (long long)e);
jsonWriteDouble(jw, "value", val);
jsonWriteObjectEnd(jw);
}
++itemCount;
}
}
return itemCount;
} /* static unsigned wigTableDataOutput(struct jsonWrite *jw, ...) */
static void jsonDatumOut(struct jsonWrite *jw, char *name, char *val,
int jsonType)
/* output a json item, determine type, appropriate output, name can be NULL */
{
if (JSON_DOUBLE == jsonType)
jsonWriteDouble(jw, name, sqlDouble(val));
else if (JSON_NUMBER == jsonType)
jsonWriteNumber(jw, name, sqlLongLong(val));
else
jsonWriteString(jw, name, val);
}
static void wigColumnTypes(struct jsonWrite *jw)
/* output column headers for a wiggle data output schema */
{
jsonWriteListStart(jw, "columnTypes");
jsonWriteObjectStart(jw, NULL);
jsonWriteString(jw, "name", "start");
jsonWriteString(jw, "sqlType", "int");
jsonWriteString(jw, "jsonType", "number");
jsonWriteString(jw, "description", "chromStart: 0-based chromosome start position");
jsonWriteObjectEnd(jw);
jsonWriteObjectStart(jw, NULL);
jsonWriteString(jw, "name", "end");
jsonWriteString(jw, "sqlType", "int");
jsonWriteString(jw, "jsonType", "number");
jsonWriteString(jw, "description", "chromEnd: 1-based chromosome end position");
jsonWriteObjectEnd(jw);
jsonWriteObjectStart(jw, NULL);
jsonWriteString(jw, "name", "value");
jsonWriteString(jw, "sqlType", "float");
jsonWriteString(jw, "jsonType", "number");
jsonWriteString(jw, "description", "numerical data value for this location:start-end");
jsonWriteObjectEnd(jw);
jsonWriteListEnd(jw);
} /* static void wigColumnTypes(struct jsonWrite jw) */
static unsigned sqlQueryJsonOutput(struct sqlConnection *conn,
struct jsonWrite *jw, char *query, int columnCount, char **columnNames,
int *jsonTypes, unsigned itemsDone)
/* with the SQL query set up, run through those selected items */
{
struct sqlResult *sr = sqlGetResult(conn, query);
char **row = NULL;
unsigned itemCount = 0;
while ((itemCount+itemsDone) < maxItemsOutput && (row = sqlNextRow(sr)) != NULL)
{
int i = 0;
if (jsonOutputArrays)
{
jsonWriteListStart(jw, NULL);
for (i = 0; i < columnCount; ++i)
jsonDatumOut(jw, NULL, row[i], jsonTypes[i]);
jsonWriteListEnd(jw);
}
else
{
jsonWriteObjectStart(jw, NULL);
for (i = 0; i < columnCount; ++i)
jsonDatumOut(jw, columnNames[i], row[i], jsonTypes[i]);
jsonWriteObjectEnd(jw);
}
++itemCount;
}
sqlFreeResult(&sr);
return itemCount;
}
static void tableDataOutput(char *db, struct trackDb *tdb,
struct sqlConnection *conn, struct jsonWrite *jw, char *track,
char *chrom, unsigned start, unsigned end)
/* output the SQL table data for given track */
{
/* for MySQL select statements, name for 'chrom' 'start' 'end' to use
* for a table which has different names than that
*/
char chromName[256];
char startName[256];
char endName[256];
/* defaults, normal stuff */
safef(chromName, sizeof(chromName), "chrom");
safef(startName, sizeof(startName), "chromStart");
safef(endName, sizeof(endName), "chromEnd");
/* XXX - need to add wiggle data table processing here to output the
* the data points instead of what it does now: the wig SQL table
*/
/* 'track' name in trackDb often refers to a SQL 'table' */
char *sqlTable = cloneString(track);
/* might have a specific table defined instead of the track name */
char *tableName = trackDbSetting(tdb, "table");
if (isNotEmpty(tableName))
{
freeMem(sqlTable);
sqlTable = cloneString(tableName);
jsonWriteString(jw, "sqlTable", sqlTable);
}
/* to be determined if this name is used or changes */
char *splitSqlTable = cloneString(sqlTable);
/* this function knows how to deal with split chromosomes, the NULL
* here for the chrom name means to use the first chrom name in chromInfo
*/
struct hTableInfo *hti = hFindTableInfoWithConn(conn, NULL, sqlTable);
if (debug && hti)
{
jsonWriteBoolean(jw, "isPos", hti->isPos);
jsonWriteBoolean(jw, "isSplit", hti->isSplit);
jsonWriteBoolean(jw, "hasBin", hti->hasBin);
}
/* check if table name needs to be modified */
if (hti && hti->isSplit)
{
if (isNotEmpty(chrom))
{
char fullTableName[256];
safef(fullTableName, sizeof(fullTableName), "%s_%s", chrom, hti->rootName);
freeMem(splitSqlTable);
splitSqlTable = cloneString(fullTableName);
if (debug)
jsonWriteString(jw, "splitSqlTable", splitSqlTable);
}
else
{
char *defaultChrom = hDefaultChrom(db);
char fullTableName[256];
safef(fullTableName, sizeof(fullTableName), "%s_%s", defaultChrom, hti->rootName);
freeMem(splitSqlTable);
splitSqlTable = cloneString(fullTableName);
if (debug)
jsonWriteString(jw, "splitSqlTable", splitSqlTable);
}
}
/* determine name for 'chrom' in table select */
if (! sqlColumnExists(conn, splitSqlTable, "chrom"))
{
if (sqlColumnExists(conn, splitSqlTable, "tName")) // track type psl
{
safef(chromName, sizeof(chromName), "tName");
safef(startName, sizeof(startName), "tStart");
safef(endName, sizeof(endName), "tEnd");
}
else if (sqlColumnExists(conn, splitSqlTable, "genoName"))// track type rmsk
{
safef(chromName, sizeof(chromName), "genoName");
safef(startName, sizeof(startName), "genoStart");
safef(endName, sizeof(endName), "genoEnd");
}
}
if (sqlColumnExists(conn, splitSqlTable, "txStart")) // track type genePred
{
safef(startName, sizeof(startName), "txStart");
safef(endName, sizeof(endName), "txEnd");
}
struct dyString *query = dyStringNew(64);
/* no chrom specified, return entire table */
if (isEmpty(chrom))
{
/* this setup here is for the case of non-split tables, will later
* determine if split, and then will go through each chrom
*/
sqlDyStringPrintf(query, "select * from %s", splitSqlTable);
}
else if (0 == (start + end)) /* have chrom, no start,end == full chr */
{
if (! sqlColumnExists(conn, splitSqlTable, chromName))
apiErrAbort(err400, err400Msg, "track '%s' is not a position track, request track without chrom specification, genome: '%s'", track, db);
jsonWriteString(jw, "chrom", chrom);
struct chromInfo *ci = hGetChromInfo(db, chrom);
jsonWriteNumber(jw, "start", (long long)0);
jsonWriteNumber(jw, "end", (long long)ci->size);
if (startsWith("wig", tdb->type))
{
if (jsonOutputArrays || debug)
wigColumnTypes(jw);
jsonWriteListStart(jw, chrom);
wigTableDataOutput(jw, db, splitSqlTable, chrom, 0, ci->size, 0);
jsonWriteListEnd(jw);
return; /* DONE */
}
else
{
sqlDyStringPrintf(query, "select * from %s where %s='%s'", splitSqlTable, chromName, chrom);
}
}
else /* fully specified chrom:start-end */
{
jsonWriteString(jw, "chrom", chrom);
// jsonWriteNumber(jw, "start", (long long)start); already printed out
// jsonWriteNumber(jw, "end", (long long)end); already printed out
if (startsWith("wig", tdb->type))
{
if (jsonOutputArrays || debug)
wigColumnTypes(jw);
jsonWriteListStart(jw, chrom);
wigTableDataOutput(jw, db, splitSqlTable, chrom, start, end, 0);
jsonWriteListEnd(jw);
return; /* DONE */
}
else
{
sqlDyStringPrintf(query, "select * from %s where ", splitSqlTable);
hAddBinToQuery(start, end, query);
sqlDyStringPrintf(query, "%s='%s' AND %s > %u AND %s < %u", chromName, chrom, endName, start, startName, end);
}
}
if (debug)
jsonWriteString(jw, "select", query->string);
/* continuing, could be wiggle output with no chrom specified */
char **columnNames = NULL;
char **columnTypes = NULL;
int *jsonTypes = NULL;
struct asObject *as = asForTable(conn, splitSqlTable, tdb);
struct asColumn *columnEl = as->columnList;
int asColumnCount = slCount(columnEl);
int columnCount = tableColumns(conn, jw, splitSqlTable, &columnNames, &columnTypes, &jsonTypes);
if (jsonOutputArrays || debug)
{
if (startsWith("wig", tdb->type))
{
wigColumnTypes(jw);
}
else
{
jsonWriteListStart(jw, "columnTypes");
int i = 0;
for (i = 0; i < columnCount; ++i)
{
jsonWriteObjectStart(jw, NULL);
jsonWriteString(jw, "name", columnNames[i]);
jsonWriteString(jw, "sqlType", columnTypes[i]);
jsonWriteString(jw, "jsonType", jsonTypeStrings[jsonTypes[i]]);
if ((0 == i) && (hti && hti->hasBin))
jsonWriteString(jw, "description", "Indexing field to speed chromosome range queries");
else if (columnEl && isNotEmpty(columnEl->comment))
jsonWriteString(jw, "description", columnEl->comment);
else
jsonWriteString(jw, "description", "");
/* perhaps move the comment pointer forward */
if (columnEl)
{
if (asColumnCount == columnCount)
columnEl = columnEl->next;
else if (! ((0 == i) && (hti && hti->hasBin)))
columnEl = columnEl->next;
}
jsonWriteObjectEnd(jw);
}
jsonWriteListEnd(jw);
}
}
unsigned itemsDone = 0;
/* empty chrom, needs to run through all chrom names */
if (isEmpty(chrom))
{
jsonWriteObjectStart(jw, track); /* begin track data output */
char fullTableName[256];
struct chromInfo *ciList = createChromInfoList(NULL, db);
slSort(ciList, chromInfoCmp);
struct chromInfo *ci = ciList;
for ( ; ci && itemsDone < maxItemsOutput; ci = ci->next )
{
jsonWriteListStart(jw, ci->chrom); /* starting a chrom output */
freeDyString(&query);
query = dyStringNew(64);
if (hti && hti->isSplit) /* when split, make up split chr name */
{
safef(fullTableName, sizeof(fullTableName), "%s_%s", ci->chrom, hti->rootName);
sqlDyStringPrintf(query, "select * from %s", fullTableName);
}
else
sqlDyStringPrintf(query, "select * from %s", splitSqlTable);
if (startsWith("wig", tdb->type))
itemsDone += wigTableDataOutput(jw, db, splitSqlTable, chrom,
start, end, itemsDone);
else
itemsDone += sqlQueryJsonOutput(conn, jw, query->string,
columnCount, columnNames, jsonTypes, itemsDone);
jsonWriteListEnd(jw); /* chrom data output list end */
}
jsonWriteObjectEnd(jw); /* end track data output */
}
else
{ /* a single chrom has been requested, run it */
jsonWriteListStart(jw, track); /* data output list starting */
itemsDone += sqlQueryJsonOutput(conn, jw, query->string, columnCount,
columnNames, jsonTypes, itemsDone);
jsonWriteListEnd(jw); /* data output list end */
}
freeDyString(&query);
} /* static void tableDataOutput(char *db, struct trackDb *tdb, ... ) */
static unsigned bbiDataOutput(struct jsonWrite *jw, struct bbiFile *bbi,
char *chrom, unsigned start, unsigned end, struct sqlFieldType *fiList,
struct trackDb *tdb, unsigned itemsDone)
/* output bed data for one chrom in the given bbi file */
{
char *itemRgb = trackDbSetting(tdb, "itemRgb");
if (bbi->definedFieldCount > 8)
itemRgb = "on";
int *jsonTypes = NULL;
int columnCount = slCount(fiList);
AllocArray(jsonTypes, columnCount);
int i = 0;
struct sqlFieldType *fi;
for ( fi = fiList; fi; fi = fi->next)
{
if (itemRgb)
{
if (8 == i && sameWord("on", itemRgb))
jsonTypes[i++] = JSON_STRING;
else
jsonTypes[i++] = autoSqlToJsonType(fi->type);
}
else
jsonTypes[i++] = autoSqlToJsonType(fi->type);
}
struct lm *bbLm = lmInit(0);
struct bigBedInterval *iv, *ivList = NULL;
ivList = bigBedIntervalQuery(bbi,chrom, start, end, 0, bbLm);
char *row[bbi->fieldCount];
unsigned itemCount = 0;
for (iv = ivList; itemCount < maxItemsOutput && iv; iv = iv->next)
{
char startBuf[16], endBuf[16];
bigBedIntervalToRow(iv, chrom, startBuf, endBuf, row, bbi->fieldCount);
int i;
struct sqlFieldType *fi = fiList;
if (jsonOutputArrays)
{
jsonWriteListStart(jw, NULL);
for (i = 0; i < bbi->fieldCount; ++i)
jsonDatumOut(jw, NULL, row[i], jsonTypes[i]);
jsonWriteListEnd(jw);
}
else
{
jsonWriteObjectStart(jw, NULL);
for (i = 0; i < bbi->fieldCount; ++i, fi = fi->next)
jsonDatumOut(jw, fi->name, row[i], jsonTypes[i]);
jsonWriteObjectEnd(jw);
}
++itemCount;
}
lmCleanup(&bbLm);
return itemCount;
} /* static void bbiDataOutput(struct jsonWrite *jw, . . . ) */
static unsigned wigDataOutput(struct jsonWrite *jw, struct bbiFile *bwf,
char *chrom, unsigned start, unsigned end)
/* output wig data for one chrom in the given bwf file, return itemCount out */
{
unsigned itemCount = 0;
struct lm *lm = lmInit(0);
struct bbiInterval *iv, *ivList = bigWigIntervalQuery(bwf, chrom, start, end, lm);
if (NULL == ivList)
return itemCount;
jsonWriteListStart(jw, chrom);
for (iv = ivList; iv && itemCount < maxItemsOutput; iv = iv->next)
{
int s = max(iv->start, start);
int e = min(iv->end, end);
double val = iv->val;
if (jsonOutputArrays)
{
jsonWriteListStart(jw, NULL);
jsonWriteNumber(jw, NULL, (long long)s);
jsonWriteNumber(jw, NULL, (long long)e);
jsonWriteDouble(jw, NULL, val);
jsonWriteListEnd(jw);
}
else
{
jsonWriteObjectStart(jw, NULL);
jsonWriteNumber(jw, "start", (long long)s);
jsonWriteNumber(jw, "end", (long long)e);
jsonWriteDouble(jw, "value", val);
jsonWriteObjectEnd(jw);
}
++itemCount;
}
jsonWriteListEnd(jw);
return itemCount;
}
static void wigData(struct jsonWrite *jw, struct bbiFile *bwf, char *chrom,
unsigned start, unsigned end)
/* output the data for a bigWig bbi file */
{
struct bbiChromInfo *chromList = NULL;
if (isEmpty(chrom))
{
chromList = bbiChromList(bwf);
struct bbiChromInfo *bci;
unsigned itemsDone = 0;
for (bci = chromList; bci && (itemsDone < maxItemsOutput); bci = bci->next)
{
itemsDone += wigDataOutput(jw, bwf, bci->name, 0, bci->size);
}
}
else
(void) wigDataOutput(jw, bwf, chrom, start, end);
}
static void bigColumnTypes(struct jsonWrite *jw, struct sqlFieldType *fiList,
struct asObject *as)
/* show the column types from a big file autoSql definitions */
{
struct asColumn *columnEl = as->columnList;
jsonWriteListStart(jw, "columnTypes");
struct sqlFieldType *fi = fiList;
for ( ; fi; fi = fi->next, columnEl = columnEl->next)
{
int jsonType = autoSqlToJsonType(fi->type);
jsonWriteObjectStart(jw, NULL);
jsonWriteString(jw, "name", fi->name);
jsonWriteString(jw, "sqlType", fi->type);
jsonWriteString(jw, "jsonType",jsonTypeStrings[jsonType]);
if (columnEl && isNotEmpty(columnEl->comment))
jsonWriteString(jw, "description", columnEl->comment);
else
jsonWriteString(jw, "description", "");
jsonWriteObjectEnd(jw);
}
jsonWriteListEnd(jw);
}
static void getHubTrackData(char *hubUrl)
/* return data from a hub track, optionally just one chrom data,
* optionally just one section of that chrom data
*/
{
char *genome = cgiOptionalString("genome");
char *track = cgiOptionalString("track");
char *chrom = cgiOptionalString("chrom");
char *start = cgiOptionalString("start");
char *end = cgiOptionalString("end");
if (isEmpty(genome))
apiErrAbort(err400, err400Msg, "missing genome=<name> for endpoint '/getData/track' given hubUrl='%s'", hubUrl);
if (isEmpty(track))
apiErrAbort(err400, err400Msg, "missing track=<name> for endpoint '/getData/track' given hubUrl='%s'", hubUrl);
struct trackHub *hub = errCatchTrackHubOpen(hubUrl);
struct trackHubGenome *hubGenome = findHubGenome(hub, genome, "/getData/track",
hubUrl);
struct trackDb *tdb = obtainTdb(hubGenome, NULL);
if (NULL == tdb)
apiErrAbort(err400, err400Msg, "failed to find a track hub definition in genome=%s for endpoint '/getData/track' given hubUrl='%s'", genome, hubUrl);
struct trackDb *thisTrack = findTrackDb(track, tdb);
if (NULL == thisTrack)
apiErrAbort(err400, err400Msg, "failed to find specified track=%s in genome=%s for endpoint '/getData/track' given hubUrl='%s'", track, genome, hubUrl);
char *bigDataUrl = trackDbSetting(thisTrack, "bigDataUrl");
struct bbiFile *bbi = bigFileOpen(thisTrack->type, bigDataUrl);
if (NULL == bbi)
apiErrAbort(err400, err400Msg, "track type %s management not implemented yet TBD track=%s in genome=%s for endpoint '/getData/track' given hubUrl='%s'", thisTrack->type, track, genome, hubUrl);
struct jsonWrite *jw = apiStartOutput();
jsonWriteString(jw, "hubUrl", hubUrl);
jsonWriteString(jw, "genome", genome);
// jsonWriteString(jw, "track", track);
unsigned chromSize = 0;
struct bbiChromInfo *chromList = NULL;
if (isNotEmpty(chrom))
{
// jsonWriteString(jw, "chrom", chrom);
chromSize = bbiChromSize(bbi, chrom);
if (0 == chromSize)
apiErrAbort(err400, err400Msg, "can not find specified chrom=%s in bigBed file URL %s", chrom, bigDataUrl);
jsonWriteNumber(jw, "chromSize", (long long)chromSize);
}
else
{
chromList = bbiChromList(bbi);
jsonWriteNumber(jw, "chromCount", (long long)slCount(chromList));
}
unsigned uStart = 0;
unsigned uEnd = chromSize;
if ( ! (isEmpty(start) || isEmpty(end)) )
{
uStart = sqlUnsigned(start);
uEnd = sqlUnsigned(end);
jsonWriteNumber(jw, "start", uStart);
jsonWriteNumber(jw, "end", uEnd);
}
jsonWriteString(jw, "bigDataUrl", bigDataUrl);
jsonWriteString(jw, "trackType", thisTrack->type);
if (allowedBigBedType(thisTrack->type))
{
struct asObject *as = bigBedAsOrDefault(bbi);
struct sqlFieldType *fiList = sqlFieldTypesFromAs(as);
if (jsonOutputArrays || debug)
bigColumnTypes(jw, fiList, as);
jsonWriteListStart(jw, track);
unsigned itemsDone = 0;
if (isEmpty(chrom))
{
struct bbiChromInfo *bci;
for (bci = chromList; bci && (itemsDone < maxItemsOutput); bci = bci->next)
{
itemsDone += bbiDataOutput(jw, bbi, bci->name, 0, bci->size,
fiList, thisTrack, itemsDone);
}
}
else
itemsDone += bbiDataOutput(jw, bbi, chrom, uStart, uEnd, fiList,
thisTrack, itemsDone);
jsonWriteListEnd(jw);
}
else if (startsWith("bigWig", thisTrack->type))
{
if (jsonOutputArrays || debug)
wigColumnTypes(jw);
jsonWriteObjectStart(jw, track);
wigData(jw, bbi, chrom, uStart, uEnd);
jsonWriteObjectEnd(jw);
}
bbiFileClose(&bbi);
apiFinishOutput(0, NULL, jw);
} /* static void getHubTrackData(char *hubUrl) */
static void getTrackData()
/* return data from a track, optionally just one chrom data,
* optionally just one section of that chrom data
*/
{
char *db = cgiOptionalString("genome");
char *chrom = cgiOptionalString("chrom");
char *start = cgiOptionalString("start");
char *end = cgiOptionalString("end");
/* 'track' name in trackDb often refers to a SQL 'table' */
char *track = cgiOptionalString("track");
char *sqlTable = cloneString(track); /* might be something else */
/* depends upon 'table' setting in track db, or split table business */
unsigned chromSize = 0; /* maybe set later */
unsigned uStart = 0;
unsigned uEnd = chromSize; /* maybe set later */
if ( ! (isEmpty(start) || isEmpty(end)) )
{
uStart = sqlUnsigned(start);
uEnd = sqlUnsigned(end);
if (uEnd < uStart)
apiErrAbort(err400, err400Msg, "given start coordinate %u is greater than given end coordinate", uStart, uEnd);
}
if (isEmpty(db))
apiErrAbort(err400, err400Msg, "missing URL variable genome=<ucscDb> name for endpoint '/getData/track");
if (isEmpty(track))
apiErrAbort(err400, err400Msg, "missing URL variable track=<trackName> name for endpoint '/getData/track");
struct trackDb *thisTrack = hTrackDbForTrackAndAncestors(db, track);
if (NULL == thisTrack)
apiErrAbort(err400, err400Msg, "can not find track=%s name for endpoint '/getData/track", track);
/* might be a big* track with no table */
char *bigDataUrl = trackDbSetting(thisTrack, "bigDataUrl");
boolean tableTrack = TRUE;
/* might have a specific table defined instead of the track name */
char *tableName = trackDbSetting(thisTrack, "table");
if (isNotEmpty(tableName))
{
freeMem(sqlTable);
sqlTable = cloneString(tableName);
}
/* database existence has already been checked before now, might
* have disappeared in the mean time
*/
struct sqlConnection *conn = hAllocConnMaybe(db);
if (NULL == conn)
apiErrAbort(err400, err400Msg, "can not find genome 'genome=%s' for endpoint '/getData/track", db);
struct hTableInfo *hti = hFindTableInfoWithConn(conn, NULL, sqlTable);
char *splitSqlTable = NULL;
if (hti && hti->isSplit)
{
if (isNotEmpty(chrom))
{
char fullTableName[256];
safef(fullTableName, sizeof(fullTableName), "%s_%s", chrom, hti->rootName);
splitSqlTable = cloneString(fullTableName);
}
else
{
char *defaultChrom = hDefaultChrom(db);
char fullTableName[256];
safef(fullTableName, sizeof(fullTableName), "%s_%s", defaultChrom, hti->rootName);
splitSqlTable = cloneString(fullTableName);
}
}
if (! hTableOrSplitExists(db, sqlTable))
{
if (! bigDataUrl)
apiErrAbort(err400, err400Msg, "can not find specified 'track=%s' for endpoint: /getData/track?genome=%s;track=%s", track, db, track);
else
tableTrack = FALSE;
}
struct jsonWrite *jw = apiStartOutput();
jsonWriteString(jw, "genome", db);
if (tableTrack)
{
char *dataTime = NULL;
if (hti && hti->isSplit)
dataTime = sqlTableUpdate(conn, splitSqlTable);
else
dataTime = sqlTableUpdate(conn, sqlTable);
time_t dataTimeStamp = sqlDateToUnixTime(dataTime);
replaceChar(dataTime, ' ', 'T'); /* ISO 8601 */
jsonWriteString(jw, "dataTime", dataTime);
jsonWriteNumber(jw, "dataTimeStamp", (long long)dataTimeStamp);
if (differentStringNullOk(sqlTable,track))
jsonWriteString(jw, "sqlTable", sqlTable);
}
jsonWriteString(jw, "trackType", thisTrack->type);
jsonWriteString(jw, "track", track);
if (debug)
jsonWriteBoolean(jw, "jsonOutputArrays", jsonOutputArrays);
char query[4096];
struct bbiFile *bbi = NULL;
struct bbiChromInfo *chromList = NULL;
if (startsWith("big", thisTrack->type))
{
if (bigDataUrl)
bbi = bigFileOpen(thisTrack->type, bigDataUrl);
else
{
char quickReturn[2048];
sqlSafef(query, sizeof(query), "select fileName from %s", sqlTable);
if (sqlQuickQuery(conn, query, quickReturn, sizeof(quickReturn)))
{
bigDataUrl = cloneString(quickReturn);
bbi = bigFileOpen(thisTrack->type, bigDataUrl);
}
}
if (NULL == bbi)
apiErrAbort(err400, err400Msg, "failed to find bigDataUrl=%s for track=%s in database=%s for endpoint '/getData/track'", bigDataUrl, track, db);
if (isNotEmpty(chrom))
{
jsonWriteString(jw, "chrom", chrom);
chromSize = bbiChromSize(bbi, chrom);
if (0 == chromSize)
apiErrAbort(err400, err400Msg, "can not find specified chrom=%s in bigWig file URL %s", chrom, bigDataUrl);
if (uEnd < 1)
uEnd = chromSize;
jsonWriteNumber(jw, "chromSize", (long long)chromSize);
}
else
{
chromList = bbiChromList(bbi);
jsonWriteNumber(jw, "chromCount", (long long)slCount(chromList));
}
jsonWriteString(jw, "bigDataUrl", bigDataUrl);
}
/* when start, end given, show them */
if ( uEnd > uStart )
{
jsonWriteNumber(jw, "start", uStart);
jsonWriteNumber(jw, "end", uEnd);
}
if (allowedBigBedType(thisTrack->type))
{
struct asObject *as = bigBedAsOrDefault(bbi);
struct sqlFieldType *fiList = sqlFieldTypesFromAs(as);
if (jsonOutputArrays || debug)
bigColumnTypes(jw, fiList, as);
jsonWriteListStart(jw, track);
unsigned itemsDone = 0;
if (isEmpty(chrom))
{
struct bbiChromInfo *bci;
for (bci = chromList; bci && (itemsDone < maxItemsOutput); bci = bci->next)
{
itemsDone += bbiDataOutput(jw, bbi, bci->name, 0, bci->size,
fiList, thisTrack, itemsDone);
}
}
else
itemsDone += bbiDataOutput(jw, bbi, chrom, uStart, uEnd, fiList,
thisTrack, itemsDone);
jsonWriteListEnd(jw);
}
else if (startsWith("bigWig", thisTrack->type))
{
if (jsonOutputArrays || debug)
wigColumnTypes(jw);
jsonWriteObjectStart(jw, track);
wigData(jw, bbi, chrom, uStart, uEnd);
jsonWriteObjectEnd(jw);
bbiFileClose(&bbi);
}
else
tableDataOutput(db, thisTrack, conn, jw, track, chrom, uStart, uEnd);
apiFinishOutput(0, NULL, jw);
hFreeConn(&conn);
}
static void getSequenceData(char *db, char *hubUrl)
/* return DNA sequence, given at least a genome=name and chrom=chr,
optionally start and end, might be a track hub for UCSC database */
{
char *chrom = cgiOptionalString("chrom");
char *start = cgiOptionalString("start");
char *end = cgiOptionalString("end");
long timeStart = clock1000();
if (isEmpty(chrom))
apiErrAbort(err400, err400Msg, "missing URL chrom=<name> for endpoint '/getData/sequence?genome=%s'", db);
if (chromSeqFileExists(db, chrom))
{
struct chromInfo *ci = hGetChromInfo(db, chrom);
unsigned chromSize = ci->size;
struct dnaSeq *seq = NULL;
if (isEmpty(start) || isEmpty(end))
if (chromSize > MAX_DNA_LENGTH)
apiErrAbort(err400, err400Msg, "DNA sequence request %d too large, limit: %u for endpoint '/getData/sequence?genome=%s;chrom=%s'", chromSize, MAX_DNA_LENGTH, db, chrom);
else
seq = hChromSeqMixed(db, chrom, 0, 0);
else
if ( (sqlSigned(end) - sqlSigned(start)) > MAX_DNA_LENGTH)
apiErrAbort(err400, err400Msg, "DNA sequence request %d too large, limit: %u for endpoint '/getData/sequence?genome=%s;chrom=%s;start=%s;end=%s'", sqlSigned(end) - sqlSigned(start), MAX_DNA_LENGTH, db, chrom, start, end);
else
seq = hChromSeqMixed(db, chrom, sqlSigned(start), sqlSigned(end));
long endTime = clock1000();
long long et = endTime - timeStart;
if (NULL == seq)
apiErrAbort(err400, err400Msg, "can not find sequence for chrom=%s for endpoint '/getData/sequence?genome=%s;chrom=%s'", chrom, db, chrom);
struct jsonWrite *jw = apiStartOutput();
if (isNotEmpty(hubUrl))
jsonWriteString(jw, "hubUrl", hubUrl);
if (measureTiming)
jsonWriteNumber(jw, "dnaFetchTimeMs", et);
jsonWriteString(jw, "genome", db);
jsonWriteString(jw, "chrom", chrom);
if (isEmpty(start) || isEmpty(end))
{
jsonWriteNumber(jw, "start", (long long)0);
jsonWriteNumber(jw, "end", (long long)ci->size);
}
else
{
jsonWriteNumber(jw, "start", (long long)sqlSigned(start));
jsonWriteNumber(jw, "end", (long long)sqlSigned(end));
}
timeStart = clock1000();
jsonWriteString(jw, "dna", seq->dna);
endTime = clock1000();
et = endTime - timeStart;
if (measureTiming)
jsonWriteNumber(jw, "dnaJsonWriteTimeMs", et);
apiFinishOutput(0, NULL, jw);
freeDnaSeq(&seq);
}
else
apiErrAbort(err400, err400Msg, "can not find specified chrom=%s in sequence for endpoint '/getData/sequence?genome=%s;chrom=%s", chrom, db, chrom);
} /* static void getSequenceData(char *db, char *hubUrl) */
static void getHubSequenceData(char *hubUrl)
/* return DNA sequence, given at least a genome=name and chrom=chr,
optionally start and end */
{
char *genome = cgiOptionalString("genome");
char *chrom = cgiOptionalString("chrom");
char *start = cgiOptionalString("start");
char *end = cgiOptionalString("end");
if (isEmpty(genome))
apiErrAbort(err400, err400Msg, "missing genome=<name> for endpoint '/getData/sequence' given hubUrl='%s'", hubUrl);
if (isEmpty(chrom))
apiErrAbort(err400, err400Msg, "missing chrom=<name> for endpoint '/getData/sequence?genome=%s' given hubUrl='%s'", genome, hubUrl);
struct trackHub *hub = errCatchTrackHubOpen(hubUrl);
struct trackHubGenome *hubGenome = NULL;
for (hubGenome = hub->genomeList; hubGenome; hubGenome = hubGenome->next)
{
if (sameString(genome, hubGenome->name))
break;
}
if (NULL == hubGenome)
apiErrAbort(err400, err400Msg, "failed to find specified genome=%s for endpoint '/getData/sequence' given hubUrl '%s'", genome, hubUrl);
/* might be a UCSC database track hub, where hubGenome=name is the database */
if (isEmpty(hubGenome->twoBitPath))
{
getSequenceData(hubGenome->name, hubUrl);
return;
}
/* this MaybeChromInfo will open the twoBit file, if not already done */
struct chromInfo *ci = trackHubMaybeChromInfo(hubGenome->name, chrom);
if (NULL == ci)
apiErrAbort(err400, err400Msg, "can not find sequence for chrom=%s for endpoint '/getData/sequence?genome=%s;chrom=%s' given hubUrl='%s'", chrom, genome, chrom, hubUrl);
struct jsonWrite *jw = apiStartOutput();
jsonWriteString(jw, "hubUrl", hubUrl);
jsonWriteString(jw, "genome", genome);
jsonWriteString(jw, "chrom", chrom);
int fragStart = 0;
int fragEnd = 0;
if (isNotEmpty(start) && isNotEmpty(end))
{
fragStart = sqlSigned(start);
fragEnd = sqlSigned(end);
if ((fragEnd - fragStart) > MAX_DNA_LENGTH)
apiErrAbort(err400, err400Msg, "DNA sequence request %d too large, limit: %u for endpoint '/getData/sequence?genome=%s;chrom=%s;start=%d;end=%d' given hubUrl='%s'", fragEnd-fragEnd, MAX_DNA_LENGTH, genome, chrom, fragStart, fragEnd, hubUrl);
jsonWriteNumber(jw, "start", (long long)fragStart);
jsonWriteNumber(jw, "end", (long long)fragEnd);
}
else
{
if (ci->size > MAX_DNA_LENGTH)
apiErrAbort(err400, err400Msg, "DNA sequence request %d too large, limit: %u for endpoint '/getData/sequence?genome=%s;chrom=%s' given hubUrl='%s'", ci->size, MAX_DNA_LENGTH, genome, chrom, hubUrl);
jsonWriteNumber(jw, "start", (long long)0);
jsonWriteNumber(jw, "end", (long long)ci->size);
}
struct dnaSeq *seq = twoBitReadSeqFrag(hubGenome->tbf, chrom, fragStart, fragEnd);
if (NULL == seq)
{
if (fragEnd > fragStart)
apiErrAbort(err400, err400Msg, "can not find sequence for chrom=%s;start=%s;end=%s for endpoint '/getData/sequence?genome=%s;chrom=%s;start=%s;end=%s' give hubUrl='%s'", chrom, start, end, genome, chrom, start, end, hubUrl);
else
apiErrAbort(err400, err400Msg, "can not find sequence for chrom=%s for endpoint '/getData/sequence?genome=%s;chrom=%s' give hubUrl='%s'", chrom, genome, chrom, hubUrl);
}
jsonWriteString(jw, "dna", seq->dna);
apiFinishOutput(0, NULL, jw);
}
void apiGetData(char *words[MAX_PATH_INFO])
/* 'getData' function, words[1] is the subCommand */
{
char *hubUrl = cgiOptionalString("hubUrl");
if (sameWord("track", words[1]))
{
char *extraArgs = verifyLegalArgs("genome;hubUrl;track;chrom;start;end;maxItemsOutput;jsonOutputArrays");
if (extraArgs)
apiErrAbort(err400, err400Msg, "extraneous arguments found for function /getData/track '%s'", extraArgs);
if (isNotEmpty(hubUrl))
getHubTrackData(hubUrl);
else
getTrackData();
}
else if (sameWord("sequence", words[1]))
{
char *extraArgs = verifyLegalArgs("genome;hubUrl;track;chrom;start;end");
if (extraArgs)
apiErrAbort(err400, err400Msg, "extraneous arguments found for function /getData/sequence '%s'", extraArgs);
if (isNotEmpty(hubUrl))
getHubSequenceData(hubUrl);
else
{
char *db = cgiOptionalString("genome");
if (isEmpty(db))
apiErrAbort(err400, err400Msg, "missing URL genome=<ucscDb> name for endpoint '/getData/sequence");
/* existence of db has already been proven before getting here */
getSequenceData(db, NULL);
}
}
else
apiErrAbort(err400, err400Msg, "do not recognize endpoint function: '/%s/%s'", words[0], words[1]);
}
|
197961.c | /*
* Copyright (c) 2018, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <arm_neon.h>
#include "config/aom_dsp_rtcd.h"
#include "config/aom_config.h"
#include "aom/aom_integer.h"
#include "av1/common/arm/mem_neon.h"
#include "av1/common/arm/transpose_neon.h"
static INLINE uint8x8_t lpf_mask(uint8x8_t p3q3, uint8x8_t p2q2, uint8x8_t p1q1,
uint8x8_t p0q0, const uint8_t blimit,
const uint8_t limit) {
// Calculate mask values for four samples
uint32x2x2_t p0q0_p1q1;
uint16x8_t temp_16x8;
uint16x4_t temp0_16x4, temp1_16x4;
uint8x8_t mask_8x8, temp_8x8;
const uint8x8_t limit_8x8 = vdup_n_u8(limit);
const uint16x4_t blimit_16x4 = vdup_n_u16((uint16_t)blimit);
mask_8x8 = vabd_u8(p3q3, p2q2);
mask_8x8 = vmax_u8(mask_8x8, vabd_u8(p2q2, p1q1));
mask_8x8 = vmax_u8(mask_8x8, vabd_u8(p1q1, p0q0));
mask_8x8 = vcle_u8(mask_8x8, limit_8x8);
temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8)));
mask_8x8 = vand_u8(mask_8x8, temp_8x8);
p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1));
temp_8x8 = vabd_u8(vreinterpret_u8_u32(p0q0_p1q1.val[0]),
vreinterpret_u8_u32(p0q0_p1q1.val[1]));
temp_16x8 = vmovl_u8(temp_8x8);
temp0_16x4 = vshl_n_u16(vget_low_u16(temp_16x8), 1);
temp1_16x4 = vshr_n_u16(vget_high_u16(temp_16x8), 1);
temp0_16x4 = vadd_u16(temp0_16x4, temp1_16x4);
temp0_16x4 = vcle_u16(temp0_16x4, blimit_16x4);
temp_8x8 = vmovn_u16(vcombine_u16(temp0_16x4, temp0_16x4));
mask_8x8 = vand_u8(mask_8x8, temp_8x8);
return mask_8x8;
}
static INLINE uint8x8_t lpf_flat_mask4(uint8x8_t p3q3, uint8x8_t p2q2,
uint8x8_t p1q1, uint8x8_t p0q0) {
const uint8x8_t thresh_8x8 = vdup_n_u8(1); // for bd==8 threshold is always 1
uint8x8_t flat_8x8, temp_8x8;
flat_8x8 = vabd_u8(p1q1, p0q0);
flat_8x8 = vmax_u8(flat_8x8, vabd_u8(p2q2, p0q0));
flat_8x8 = vmax_u8(flat_8x8, vabd_u8(p3q3, p0q0));
flat_8x8 = vcle_u8(flat_8x8, thresh_8x8);
temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(flat_8x8)));
flat_8x8 = vand_u8(flat_8x8, temp_8x8);
return flat_8x8;
}
static INLINE uint8x8_t lpf_flat_mask3(uint8x8_t p2q2, uint8x8_t p1q1,
uint8x8_t p0q0) {
const uint8x8_t thresh_8x8 = vdup_n_u8(1); // for bd==8 threshold is always 1
uint8x8_t flat_8x8, temp_8x8;
flat_8x8 = vabd_u8(p1q1, p0q0);
flat_8x8 = vmax_u8(flat_8x8, vabd_u8(p2q2, p0q0));
flat_8x8 = vcle_u8(flat_8x8, thresh_8x8);
temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(flat_8x8)));
flat_8x8 = vand_u8(flat_8x8, temp_8x8);
return flat_8x8;
}
static INLINE uint8x8_t lpf_mask3_chroma(uint8x8_t p2q2, uint8x8_t p1q1,
uint8x8_t p0q0, const uint8_t blimit,
const uint8_t limit) {
// Calculate mask3 values for four samples
uint32x2x2_t p0q0_p1q1;
uint16x8_t temp_16x8;
uint16x4_t temp0_16x4, temp1_16x4;
uint8x8_t mask_8x8, temp_8x8;
const uint8x8_t limit_8x8 = vdup_n_u8(limit);
const uint16x4_t blimit_16x4 = vdup_n_u16((uint16_t)blimit);
mask_8x8 = vabd_u8(p2q2, p1q1);
mask_8x8 = vmax_u8(mask_8x8, vabd_u8(p1q1, p0q0));
mask_8x8 = vcle_u8(mask_8x8, limit_8x8);
temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8)));
mask_8x8 = vand_u8(mask_8x8, temp_8x8);
p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1));
temp_8x8 = vabd_u8(vreinterpret_u8_u32(p0q0_p1q1.val[0]),
vreinterpret_u8_u32(p0q0_p1q1.val[1]));
temp_16x8 = vmovl_u8(temp_8x8);
temp0_16x4 = vshl_n_u16(vget_low_u16(temp_16x8), 1);
temp1_16x4 = vshr_n_u16(vget_high_u16(temp_16x8), 1);
temp0_16x4 = vadd_u16(temp0_16x4, temp1_16x4);
temp0_16x4 = vcle_u16(temp0_16x4, blimit_16x4);
temp_8x8 = vmovn_u16(vcombine_u16(temp0_16x4, temp0_16x4));
mask_8x8 = vand_u8(mask_8x8, temp_8x8);
return mask_8x8;
}
static void lpf_14_neon(uint8x8_t *p6q6, uint8x8_t *p5q5, uint8x8_t *p4q4,
uint8x8_t *p3q3, uint8x8_t *p2q2, uint8x8_t *p1q1,
uint8x8_t *p0q0, const uint8_t blimit,
const uint8_t limit, const uint8_t thresh) {
uint16x8_t out;
uint8x8_t out_f14_pq0, out_f14_pq1, out_f14_pq2, out_f14_pq3, out_f14_pq4,
out_f14_pq5;
uint8x8_t out_f7_pq0, out_f7_pq1, out_f7_pq2;
uint8x8_t out_f4_pq0, out_f4_pq1;
uint8x8_t mask_8x8, flat_8x8, flat2_8x8;
uint8x8_t q0p0, q1p1, q2p2;
// Calculate filter masks
mask_8x8 = lpf_mask(*p3q3, *p2q2, *p1q1, *p0q0, blimit, limit);
flat_8x8 = lpf_flat_mask4(*p3q3, *p2q2, *p1q1, *p0q0);
flat2_8x8 = lpf_flat_mask4(*p6q6, *p5q5, *p4q4, *p0q0);
{
// filter 4
int32x2x2_t ps0_qs0, ps1_qs1;
int16x8_t filter_s16;
const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
uint8x8_t temp0_8x8, temp1_8x8;
int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
int8x8_t op0, oq0, op1, oq1;
int8x8_t pq_s0, pq_s1;
int8x8_t filter_s8, filter1_s8, filter2_s8;
int8x8_t hev_8x8;
const int8x8_t sign_mask = vdup_n_s8(0x80);
const int8x8_t val_4 = vdup_n_s8(4);
const int8x8_t val_3 = vdup_n_s8(3);
pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
// hev_mask
temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
// add outer taps if we have high edge variance
filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
filter_s8 = vand_s8(filter_s8, hev_8x8);
// inner taps
temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
filter_s16 = vmovl_s8(filter_s8);
filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
filter_s8 = vqmovn_s16(filter_s16);
filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
filter1_s8 = vqadd_s8(filter_s8, val_4);
filter2_s8 = vqadd_s8(filter_s8, val_3);
filter1_s8 = vshr_n_s8(filter1_s8, 3);
filter2_s8 = vshr_n_s8(filter2_s8, 3);
oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
hev_8x8 = vmvn_s8(hev_8x8);
filter_s8 = vrshr_n_s8(filter1_s8, 1);
filter_s8 = vand_s8(filter_s8, hev_8x8);
oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
out_f4_pq0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
out_f4_pq1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
}
// reverse p and q
q0p0 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0)));
q1p1 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p1q1)));
q2p2 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p2q2)));
{
// filter 8
uint16x8_t out_pq0, out_pq1, out_pq2;
out = vaddl_u8(*p3q3, *p2q2);
out = vaddw_u8(out, *p1q1);
out = vaddw_u8(out, *p0q0);
out = vaddw_u8(out, q0p0);
out_pq1 = vaddw_u8(out, *p3q3);
out_pq2 = vaddw_u8(out_pq1, *p3q3);
out_pq2 = vaddw_u8(out_pq2, *p2q2);
out_pq1 = vaddw_u8(out_pq1, *p1q1);
out_pq1 = vaddw_u8(out_pq1, q1p1);
out_pq0 = vaddw_u8(out, *p0q0);
out_pq0 = vaddw_u8(out_pq0, q1p1);
out_pq0 = vaddw_u8(out_pq0, q2p2);
out_f7_pq0 = vrshrn_n_u16(out_pq0, 3);
out_f7_pq1 = vrshrn_n_u16(out_pq1, 3);
out_f7_pq2 = vrshrn_n_u16(out_pq2, 3);
}
{
// filter 14
uint16x8_t out_pq0, out_pq1, out_pq2, out_pq3, out_pq4, out_pq5;
uint16x8_t p6q6_2, p6q6_temp, qp_sum;
uint8x8_t qp_rev;
out = vaddw_u8(out, *p4q4);
out = vaddw_u8(out, *p5q5);
out = vaddw_u8(out, *p6q6);
out_pq5 = vaddw_u8(out, *p4q4);
out_pq4 = vaddw_u8(out_pq5, *p3q3);
out_pq3 = vaddw_u8(out_pq4, *p2q2);
out_pq5 = vaddw_u8(out_pq5, *p5q5);
out_pq4 = vaddw_u8(out_pq4, *p5q5);
out_pq0 = vaddw_u8(out, *p1q1);
out_pq1 = vaddw_u8(out_pq0, *p2q2);
out_pq2 = vaddw_u8(out_pq1, *p3q3);
out_pq0 = vaddw_u8(out_pq0, *p0q0);
out_pq1 = vaddw_u8(out_pq1, *p0q0);
out_pq1 = vaddw_u8(out_pq1, *p6q6);
p6q6_2 = vaddl_u8(*p6q6, *p6q6);
out_pq2 = vaddq_u16(out_pq2, p6q6_2);
p6q6_temp = vaddw_u8(p6q6_2, *p6q6);
out_pq3 = vaddq_u16(out_pq3, p6q6_temp);
p6q6_temp = vaddw_u8(p6q6_temp, *p6q6);
out_pq4 = vaddq_u16(out_pq4, p6q6_temp);
p6q6_temp = vaddq_u16(p6q6_temp, p6q6_2);
out_pq5 = vaddq_u16(out_pq5, p6q6_temp);
out_pq4 = vaddw_u8(out_pq4, q1p1);
qp_sum = vaddl_u8(q2p2, q1p1);
out_pq3 = vaddq_u16(out_pq3, qp_sum);
qp_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p3q3)));
qp_sum = vaddw_u8(qp_sum, qp_rev);
out_pq2 = vaddq_u16(out_pq2, qp_sum);
qp_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p4q4)));
qp_sum = vaddw_u8(qp_sum, qp_rev);
out_pq1 = vaddq_u16(out_pq1, qp_sum);
qp_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p5q5)));
qp_sum = vaddw_u8(qp_sum, qp_rev);
out_pq0 = vaddq_u16(out_pq0, qp_sum);
out_pq0 = vaddw_u8(out_pq0, q0p0);
out_f14_pq0 = vrshrn_n_u16(out_pq0, 4);
out_f14_pq1 = vrshrn_n_u16(out_pq1, 4);
out_f14_pq2 = vrshrn_n_u16(out_pq2, 4);
out_f14_pq3 = vrshrn_n_u16(out_pq3, 4);
out_f14_pq4 = vrshrn_n_u16(out_pq4, 4);
out_f14_pq5 = vrshrn_n_u16(out_pq5, 4);
}
{
uint8x8_t filter4_cond, filter8_cond, filter14_cond;
filter8_cond = vand_u8(flat_8x8, mask_8x8);
filter4_cond = vmvn_u8(filter8_cond);
filter14_cond = vand_u8(filter8_cond, flat2_8x8);
// filter4 outputs
*p0q0 = vbsl_u8(filter4_cond, out_f4_pq0, *p0q0);
*p1q1 = vbsl_u8(filter4_cond, out_f4_pq1, *p1q1);
// filter8 outputs
*p0q0 = vbsl_u8(filter8_cond, out_f7_pq0, *p0q0);
*p1q1 = vbsl_u8(filter8_cond, out_f7_pq1, *p1q1);
*p2q2 = vbsl_u8(filter8_cond, out_f7_pq2, *p2q2);
// filter14 outputs
*p0q0 = vbsl_u8(filter14_cond, out_f14_pq0, *p0q0);
*p1q1 = vbsl_u8(filter14_cond, out_f14_pq1, *p1q1);
*p2q2 = vbsl_u8(filter14_cond, out_f14_pq2, *p2q2);
*p3q3 = vbsl_u8(filter14_cond, out_f14_pq3, *p3q3);
*p4q4 = vbsl_u8(filter14_cond, out_f14_pq4, *p4q4);
*p5q5 = vbsl_u8(filter14_cond, out_f14_pq5, *p5q5);
}
}
static void lpf_8_neon(uint8x8_t *p3q3, uint8x8_t *p2q2, uint8x8_t *p1q1,
uint8x8_t *p0q0, const uint8_t blimit,
const uint8_t limit, const uint8_t thresh) {
uint16x8_t out;
uint8x8_t out_f7_pq0, out_f7_pq1, out_f7_pq2;
uint8x8_t out_f4_pq0, out_f4_pq1;
uint8x8_t mask_8x8, flat_8x8;
// Calculate filter masks
mask_8x8 = lpf_mask(*p3q3, *p2q2, *p1q1, *p0q0, blimit, limit);
flat_8x8 = lpf_flat_mask4(*p3q3, *p2q2, *p1q1, *p0q0);
{
// filter 4
int32x2x2_t ps0_qs0, ps1_qs1;
int16x8_t filter_s16;
const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
uint8x8_t temp0_8x8, temp1_8x8;
int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
int8x8_t op0, oq0, op1, oq1;
int8x8_t pq_s0, pq_s1;
int8x8_t filter_s8, filter1_s8, filter2_s8;
int8x8_t hev_8x8;
const int8x8_t sign_mask = vdup_n_s8(0x80);
const int8x8_t val_4 = vdup_n_s8(4);
const int8x8_t val_3 = vdup_n_s8(3);
pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
// hev_mask
temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
// add outer taps if we have high edge variance
filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
filter_s8 = vand_s8(filter_s8, hev_8x8);
// inner taps
temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
filter_s16 = vmovl_s8(filter_s8);
filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
filter_s8 = vqmovn_s16(filter_s16);
filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
filter1_s8 = vqadd_s8(filter_s8, val_4);
filter2_s8 = vqadd_s8(filter_s8, val_3);
filter1_s8 = vshr_n_s8(filter1_s8, 3);
filter2_s8 = vshr_n_s8(filter2_s8, 3);
oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
hev_8x8 = vmvn_s8(hev_8x8);
filter_s8 = vrshr_n_s8(filter1_s8, 1);
filter_s8 = vand_s8(filter_s8, hev_8x8);
oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
out_f4_pq0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
out_f4_pq1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
}
{
// filter 8
uint16x8_t out_pq0, out_pq1, out_pq2;
uint8x8_t q0p0, q1p1, q2p2;
out = vaddl_u8(*p3q3, *p2q2);
out = vaddw_u8(out, *p1q1);
out = vaddw_u8(out, *p0q0);
// reverse p and q
q0p0 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0)));
q1p1 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p1q1)));
q2p2 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p2q2)));
out = vaddw_u8(out, q0p0);
out_pq1 = vaddw_u8(out, *p3q3);
out_pq2 = vaddw_u8(out_pq1, *p3q3);
out_pq2 = vaddw_u8(out_pq2, *p2q2);
out_pq1 = vaddw_u8(out_pq1, *p1q1);
out_pq1 = vaddw_u8(out_pq1, q1p1);
out_pq0 = vaddw_u8(out, *p0q0);
out_pq0 = vaddw_u8(out_pq0, q1p1);
out_pq0 = vaddw_u8(out_pq0, q2p2);
out_f7_pq0 = vrshrn_n_u16(out_pq0, 3);
out_f7_pq1 = vrshrn_n_u16(out_pq1, 3);
out_f7_pq2 = vrshrn_n_u16(out_pq2, 3);
}
{
uint8x8_t filter4_cond, filter8_cond;
filter8_cond = vand_u8(flat_8x8, mask_8x8);
filter4_cond = vmvn_u8(filter8_cond);
// filter4 outputs
*p0q0 = vbsl_u8(filter4_cond, out_f4_pq0, *p0q0);
*p1q1 = vbsl_u8(filter4_cond, out_f4_pq1, *p1q1);
// filter8 outputs
*p0q0 = vbsl_u8(filter8_cond, out_f7_pq0, *p0q0);
*p1q1 = vbsl_u8(filter8_cond, out_f7_pq1, *p1q1);
*p2q2 = vbsl_u8(filter8_cond, out_f7_pq2, *p2q2);
}
}
static void lpf_6_neon(uint8x8_t *p2q2, uint8x8_t *p1q1, uint8x8_t *p0q0,
const uint8_t blimit, const uint8_t limit,
const uint8_t thresh) {
uint16x8_t out;
uint8x8_t out_f6_pq0, out_f6_pq1;
uint8x8_t out_f4_pq0, out_f4_pq1;
uint8x8_t mask_8x8, flat_8x8;
// Calculate filter masks
mask_8x8 = lpf_mask3_chroma(*p2q2, *p1q1, *p0q0, blimit, limit);
flat_8x8 = lpf_flat_mask3(*p2q2, *p1q1, *p0q0);
{
// filter 4
int32x2x2_t ps0_qs0, ps1_qs1;
int16x8_t filter_s16;
const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
uint8x8_t temp0_8x8, temp1_8x8;
int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
int8x8_t op0, oq0, op1, oq1;
int8x8_t pq_s0, pq_s1;
int8x8_t filter_s8, filter1_s8, filter2_s8;
int8x8_t hev_8x8;
const int8x8_t sign_mask = vdup_n_s8(0x80);
const int8x8_t val_4 = vdup_n_s8(4);
const int8x8_t val_3 = vdup_n_s8(3);
pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
// hev_mask
temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
// add outer taps if we have high edge variance
filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
filter_s8 = vand_s8(filter_s8, hev_8x8);
// inner taps
temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
filter_s16 = vmovl_s8(filter_s8);
filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
filter_s8 = vqmovn_s16(filter_s16);
filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
filter1_s8 = vqadd_s8(filter_s8, val_4);
filter2_s8 = vqadd_s8(filter_s8, val_3);
filter1_s8 = vshr_n_s8(filter1_s8, 3);
filter2_s8 = vshr_n_s8(filter2_s8, 3);
oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
filter_s8 = vrshr_n_s8(filter1_s8, 1);
filter_s8 = vbic_s8(filter_s8, hev_8x8);
oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
out_f4_pq0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
out_f4_pq1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
}
{
// filter 6
uint16x8_t out_pq0, out_pq1;
uint8x8_t pq_rev;
out = vaddl_u8(*p0q0, *p1q1);
out = vaddq_u16(out, out);
out = vaddw_u8(out, *p2q2);
pq_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0)));
out = vaddw_u8(out, pq_rev);
out_pq0 = vaddw_u8(out, pq_rev);
pq_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p1q1)));
out_pq0 = vaddw_u8(out_pq0, pq_rev);
out_pq1 = vaddw_u8(out, *p2q2);
out_pq1 = vaddw_u8(out_pq1, *p2q2);
out_f6_pq0 = vrshrn_n_u16(out_pq0, 3);
out_f6_pq1 = vrshrn_n_u16(out_pq1, 3);
}
{
uint8x8_t filter4_cond, filter6_cond;
filter6_cond = vand_u8(flat_8x8, mask_8x8);
filter4_cond = vmvn_u8(filter6_cond);
// filter4 outputs
*p0q0 = vbsl_u8(filter4_cond, out_f4_pq0, *p0q0);
*p1q1 = vbsl_u8(filter4_cond, out_f4_pq1, *p1q1);
// filter6 outputs
*p0q0 = vbsl_u8(filter6_cond, out_f6_pq0, *p0q0);
*p1q1 = vbsl_u8(filter6_cond, out_f6_pq1, *p1q1);
}
}
void aom_lpf_vertical_14_neon(uint8_t *src, int stride, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
uint8x16_t row0, row1, row2, row3;
uint8x8_t pxp3, p6p2, p5p1, p4p0;
uint8x8_t q0q4, q1q5, q2q6, q3qy;
uint32x2x2_t p6q6_p2q2, p5q5_p1q1, p4q4_p0q0, pxqx_p3q3;
uint32x2_t pq_rev;
uint8x8_t p0q0, p1q1, p2q2, p3q3, p4q4, p5q5, p6q6;
// row0: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
// row1: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
// row2: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
// row3: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
load_u8_8x16(src - 8, stride, &row0, &row1, &row2, &row3);
pxp3 = vget_low_u8(row0);
p6p2 = vget_low_u8(row1);
p5p1 = vget_low_u8(row2);
p4p0 = vget_low_u8(row3);
transpose_u8_8x4(&pxp3, &p6p2, &p5p1, &p4p0);
q0q4 = vget_high_u8(row0);
q1q5 = vget_high_u8(row1);
q2q6 = vget_high_u8(row2);
q3qy = vget_high_u8(row3);
transpose_u8_8x4(&q0q4, &q1q5, &q2q6, &q3qy);
pq_rev = vrev64_u32(vreinterpret_u32_u8(q3qy));
pxqx_p3q3 = vtrn_u32(vreinterpret_u32_u8(pxp3), pq_rev);
pq_rev = vrev64_u32(vreinterpret_u32_u8(q1q5));
p5q5_p1q1 = vtrn_u32(vreinterpret_u32_u8(p5p1), pq_rev);
pq_rev = vrev64_u32(vreinterpret_u32_u8(q0q4));
p4q4_p0q0 = vtrn_u32(vreinterpret_u32_u8(p4p0), pq_rev);
pq_rev = vrev64_u32(vreinterpret_u32_u8(q2q6));
p6q6_p2q2 = vtrn_u32(vreinterpret_u32_u8(p6p2), pq_rev);
p0q0 = vreinterpret_u8_u32(p4q4_p0q0.val[1]);
p1q1 = vreinterpret_u8_u32(p5q5_p1q1.val[1]);
p2q2 = vreinterpret_u8_u32(p6q6_p2q2.val[1]);
p3q3 = vreinterpret_u8_u32(pxqx_p3q3.val[1]);
p4q4 = vreinterpret_u8_u32(p4q4_p0q0.val[0]);
p5q5 = vreinterpret_u8_u32(p5q5_p1q1.val[0]);
p6q6 = vreinterpret_u8_u32(p6q6_p2q2.val[0]);
lpf_14_neon(&p6q6, &p5q5, &p4q4, &p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit,
*thresh);
pxqx_p3q3 = vtrn_u32(pxqx_p3q3.val[0], vreinterpret_u32_u8(p3q3));
p5q5_p1q1 = vtrn_u32(vreinterpret_u32_u8(p5q5), vreinterpret_u32_u8(p1q1));
p4q4_p0q0 = vtrn_u32(vreinterpret_u32_u8(p4q4), vreinterpret_u32_u8(p0q0));
p6q6_p2q2 = vtrn_u32(vreinterpret_u32_u8(p6q6), vreinterpret_u32_u8(p2q2));
pxqx_p3q3.val[1] = vrev64_u32(pxqx_p3q3.val[1]);
p5q5_p1q1.val[1] = vrev64_u32(p5q5_p1q1.val[1]);
p4q4_p0q0.val[1] = vrev64_u32(p4q4_p0q0.val[1]);
p6q6_p2q2.val[1] = vrev64_u32(p6q6_p2q2.val[1]);
q0q4 = vreinterpret_u8_u32(p4q4_p0q0.val[1]);
q1q5 = vreinterpret_u8_u32(p5q5_p1q1.val[1]);
q2q6 = vreinterpret_u8_u32(p6q6_p2q2.val[1]);
q3qy = vreinterpret_u8_u32(pxqx_p3q3.val[1]);
transpose_u8_8x4(&q0q4, &q1q5, &q2q6, &q3qy);
pxp3 = vreinterpret_u8_u32(pxqx_p3q3.val[0]);
p6p2 = vreinterpret_u8_u32(p6q6_p2q2.val[0]);
p5p1 = vreinterpret_u8_u32(p5q5_p1q1.val[0]);
p4p0 = vreinterpret_u8_u32(p4q4_p0q0.val[0]);
transpose_u8_8x4(&pxp3, &p6p2, &p5p1, &p4p0);
row0 = vcombine_u8(pxp3, q0q4);
row1 = vcombine_u8(p6p2, q1q5);
row2 = vcombine_u8(p5p1, q2q6);
row3 = vcombine_u8(p4p0, q3qy);
store_u8_8x16(src - 8, stride, row0, row1, row2, row3);
}
void aom_lpf_vertical_8_neon(uint8_t *src, int stride, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
uint32x2x2_t p2q2_p1q1, p3q3_p0q0;
uint32x2_t pq_rev;
uint8x8_t p3q0, p2q1, p1q2, p0q3;
uint8x8_t p0q0, p1q1, p2q2, p3q3;
// row0: p3 p2 p1 p0 | q0 q1 q2 q3
// row1: p3 p2 p1 p0 | q0 q1 q2 q3
// row2: p3 p2 p1 p0 | q0 q1 q2 q3
// row3: p3 p2 p1 p0 | q0 q1 q2 q3
load_u8_8x4(src - 4, stride, &p3q0, &p2q1, &p1q2, &p0q3);
transpose_u8_8x4(&p3q0, &p2q1, &p1q2, &p0q3);
pq_rev = vrev64_u32(vreinterpret_u32_u8(p0q3));
p3q3_p0q0 = vtrn_u32(vreinterpret_u32_u8(p3q0), pq_rev);
pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q2));
p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q1), pq_rev);
p0q0 = vreinterpret_u8_u32(vrev64_u32(p3q3_p0q0.val[1]));
p1q1 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1]));
p2q2 = vreinterpret_u8_u32(p2q2_p1q1.val[0]);
p3q3 = vreinterpret_u8_u32(p3q3_p0q0.val[0]);
lpf_8_neon(&p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
pq_rev = vrev64_u32(vreinterpret_u32_u8(p0q0));
p3q3_p0q0 = vtrn_u32(vreinterpret_u32_u8(p3q3), pq_rev);
pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q1));
p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q2), pq_rev);
p0q3 = vreinterpret_u8_u32(vrev64_u32(p3q3_p0q0.val[1]));
p1q2 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1]));
p2q1 = vreinterpret_u8_u32(p2q2_p1q1.val[0]);
p3q0 = vreinterpret_u8_u32(p3q3_p0q0.val[0]);
transpose_u8_8x4(&p3q0, &p2q1, &p1q2, &p0q3);
store_u8_8x4(src - 4, stride, p3q0, p2q1, p1q2, p0q3);
}
void aom_lpf_horizontal_8_neon(uint8_t *src, int stride, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
uint8x8_t p0q0, p1q1, p2q2, p3q3;
p3q3 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 4 * stride)));
p2q2 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 3 * stride)));
p1q1 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 2 * stride)));
p0q0 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 1 * stride)));
p0q0 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 0 * stride),
vreinterpret_u32_u8(p0q0), 1));
p1q1 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 1 * stride),
vreinterpret_u32_u8(p1q1), 1));
p2q2 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 2 * stride),
vreinterpret_u32_u8(p2q2), 1));
p3q3 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 3 * stride),
vreinterpret_u32_u8(p3q3), 1));
lpf_8_neon(&p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
vst1_lane_u32((uint32_t *)(src - 4 * stride), vreinterpret_u32_u8(p3q3), 0);
vst1_lane_u32((uint32_t *)(src - 3 * stride), vreinterpret_u32_u8(p2q2), 0);
vst1_lane_u32((uint32_t *)(src - 2 * stride), vreinterpret_u32_u8(p1q1), 0);
vst1_lane_u32((uint32_t *)(src - 1 * stride), vreinterpret_u32_u8(p0q0), 0);
vst1_lane_u32((uint32_t *)(src + 0 * stride), vreinterpret_u32_u8(p0q0), 1);
vst1_lane_u32((uint32_t *)(src + 1 * stride), vreinterpret_u32_u8(p1q1), 1);
vst1_lane_u32((uint32_t *)(src + 2 * stride), vreinterpret_u32_u8(p2q2), 1);
vst1_lane_u32((uint32_t *)(src + 3 * stride), vreinterpret_u32_u8(p3q3), 1);
}
void aom_lpf_horizontal_6_neon(uint8_t *src, int stride, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
uint8x8_t p0q0, p1q1, p2q2;
p2q2 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 3 * stride)));
p1q1 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 2 * stride)));
p0q0 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 1 * stride)));
p0q0 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 0 * stride),
vreinterpret_u32_u8(p0q0), 1));
p1q1 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 1 * stride),
vreinterpret_u32_u8(p1q1), 1));
p2q2 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 2 * stride),
vreinterpret_u32_u8(p2q2), 1));
lpf_6_neon(&p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
vst1_lane_u32((uint32_t *)(src - 3 * stride), vreinterpret_u32_u8(p2q2), 0);
vst1_lane_u32((uint32_t *)(src - 2 * stride), vreinterpret_u32_u8(p1q1), 0);
vst1_lane_u32((uint32_t *)(src - 1 * stride), vreinterpret_u32_u8(p0q0), 0);
vst1_lane_u32((uint32_t *)(src + 0 * stride), vreinterpret_u32_u8(p0q0), 1);
vst1_lane_u32((uint32_t *)(src + 1 * stride), vreinterpret_u32_u8(p1q1), 1);
vst1_lane_u32((uint32_t *)(src + 2 * stride), vreinterpret_u32_u8(p2q2), 1);
}
|
271304.c | /**
* This example takes a picture every 5s and print its size on serial monitor.
*/
// =============================== SETUP ======================================
// 1. Board setup (Uncomment):
// #define BOARD_WROVER_KIT
// #define BOARD_ESP32CAM_AITHINKER
/**
* 2. Kconfig setup
*
* If you have a Kconfig file, copy the content from
* https://github.com/espressif/esp32-camera/blob/master/Kconfig into it.
* In case you haven't, copy and paste this Kconfig file inside the src directory.
* This Kconfig file has definitions that allows more control over the camera and
* how it will be initialized.
*/
/**
* 3. Enable PSRAM on sdkconfig:
*
* CONFIG_ESP32_SPIRAM_SUPPORT=y
*
* More info on
* https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/kconfig.html#config-esp32-spiram-support
*/
// ================================ CODE ======================================
#include <esp_event_loop.h>
#include <esp_log.h>
#include <esp_system.h>
#include <nvs_flash.h>
#include <sys/param.h>
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_camera.h"
// WROVER-KIT PIN Map
#ifdef BOARD_WROVER_KIT
#define CAM_PIN_PWDN -1 //power down is not used
#define CAM_PIN_RESET -1 //software reset will be performed
#define CAM_PIN_XCLK 21
#define CAM_PIN_SIOD 26
#define CAM_PIN_SIOC 27
#define CAM_PIN_D7 35
#define CAM_PIN_D6 34
#define CAM_PIN_D5 39
#define CAM_PIN_D4 36
#define CAM_PIN_D3 19
#define CAM_PIN_D2 18
#define CAM_PIN_D1 5
#define CAM_PIN_D0 4
#define CAM_PIN_VSYNC 25
#define CAM_PIN_HREF 23
#define CAM_PIN_PCLK 22
#endif
// ESP32Cam (AiThinker) PIN Map
#ifdef BOARD_ESP32CAM_AITHINKER
#define CAM_PIN_PWDN 32
#define CAM_PIN_RESET -1 //software reset will be performed
#define CAM_PIN_XCLK 0
#define CAM_PIN_SIOD 26
#define CAM_PIN_SIOC 27
#define CAM_PIN_D7 35
#define CAM_PIN_D6 34
#define CAM_PIN_D5 39
#define CAM_PIN_D4 36
#define CAM_PIN_D3 21
#define CAM_PIN_D2 19
#define CAM_PIN_D1 18
#define CAM_PIN_D0 5
#define CAM_PIN_VSYNC 25
#define CAM_PIN_HREF 23
#define CAM_PIN_PCLK 22
#endif
static const char *TAG = "example:take_picture";
static camera_config_t camera_config = {
.pin_pwdn = CAM_PIN_PWDN,
.pin_reset = CAM_PIN_RESET,
.pin_xclk = CAM_PIN_XCLK,
.pin_sscb_sda = CAM_PIN_SIOD,
.pin_sscb_scl = CAM_PIN_SIOC,
.pin_d7 = CAM_PIN_D7,
.pin_d6 = CAM_PIN_D6,
.pin_d5 = CAM_PIN_D5,
.pin_d4 = CAM_PIN_D4,
.pin_d3 = CAM_PIN_D3,
.pin_d2 = CAM_PIN_D2,
.pin_d1 = CAM_PIN_D1,
.pin_d0 = CAM_PIN_D0,
.pin_vsync = CAM_PIN_VSYNC,
.pin_href = CAM_PIN_HREF,
.pin_pclk = CAM_PIN_PCLK,
//XCLK 20MHz or 10MHz for OV2640 double FPS (Experimental)
.xclk_freq_hz = 20000000,
.ledc_timer = LEDC_TIMER_0,
.ledc_channel = LEDC_CHANNEL_0,
.pixel_format = PIXFORMAT_JPEG, //YUV422,GRAYSCALE,RGB565,JPEG
.frame_size = FRAMESIZE_VGA, //QQVGA-UXGA Do not use sizes above QVGA when not JPEG
.jpeg_quality = 12, //0-63 lower number means higher quality
.fb_count = 1 //if more than one, i2s runs in continuous mode. Use only with JPEG
};
static esp_err_t init_camera()
{
//initialize the camera
esp_err_t err = esp_camera_init(&camera_config);
if (err != ESP_OK)
{
ESP_LOGE(TAG, "Camera Init Failed");
return err;
}
return ESP_OK;
}
void app_main()
{
init_camera();
while (1)
{
ESP_LOGI(TAG, "Taking picture...");
camera_fb_t *pic = esp_camera_fb_get();
// use pic->buf to access the image
ESP_LOGI(TAG, "Picture taken! Its size was: %zu bytes", pic->len);
vTaskDelay(5000 / portTICK_RATE_MS);
}
} |
844835.c | /*
* Implemente uma calculadora pós_fixada para operar sobre números complexos, for-
* necidos no formato (a,b).
*/
|
559349.c | /**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** IAR Multithreaded Library Support */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Define IAR library for tools prior to version 8. */
#if (__VER__ < 8000000)
/* IAR version 7 and below. */
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
#include "tx_mutex.h"
/* This implementation requires that the following macros are defined in the
tx_port.h file and <yvals.h> is included with the following code segments:
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#else
#define TX_THREAD_EXTENSION_2
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = __iar_dlib_perthread_allocate();
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) __iar_dlib_perthread_deallocate(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL;
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION __iar_dlib_perthread_access(0);
#else
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
This should be done automatically if TX_ENABLE_IAR_LIBRARY_SUPPORT is defined while building the ThreadX library and the
application.
Finally, the project options General Options -> Library Configuration should have the "Enable thread support in library" box selected.
*/
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#if _MULTI_THREAD
TX_MUTEX __tx_iar_system_lock_mutexes[_MAX_LOCK];
UINT __tx_iar_system_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_system_lock_no_mutexes;
UINT __tx_iar_system_lock_internal_errors;
UINT __tx_iar_system_lock_isr_caller;
/* Define the TLS access function for the IAR library. */
void _DLIB_TLS_MEMORY *__iar_dlib_perthread_access(void _DLIB_TLS_MEMORY *symbp)
{
char _DLIB_TLS_MEMORY *p = 0;
/* Is there a current thread? */
if (_tx_thread_current_ptr)
p = (char _DLIB_TLS_MEMORY *) _tx_thread_current_ptr -> tx_thread_iar_tls_pointer;
else
p = (void _DLIB_TLS_MEMORY *) __segment_begin("__DLIB_PERTHREAD");
p += __IAR_DLIB_PERTHREAD_SYMBOL_OFFSET(symbp);
return (void _DLIB_TLS_MEMORY *) p;
}
/* Define mutexes for IAR library. */
void __iar_system_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_LOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_system_lock_mutexes[__tx_iar_system_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_system_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_system_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_system_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR System Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_system_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_system_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_system_Mtxlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
void __iar_system_Mtxunlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
#if _DLIB_FILE_DESCRIPTOR
TX_MUTEX __tx_iar_file_lock_mutexes[_MAX_FLOCK];
UINT __tx_iar_file_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_file_lock_no_mutexes;
UINT __tx_iar_file_lock_internal_errors;
UINT __tx_iar_file_lock_isr_caller;
void __iar_file_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_FLOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_file_lock_mutexes[__tx_iar_file_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_file_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_file_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_file_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR File Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_file_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_file_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_file_Mtxlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
void __iar_file_Mtxunlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
#endif /* _DLIB_FILE_DESCRIPTOR */
#endif /* _MULTI_THREAD */
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#else /* IAR version 8 and above. */
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
#include "tx_mutex.h"
/* This implementation requires that the following macros are defined in the
tx_port.h file and <yvals.h> is included with the following code segments:
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#else
#define TX_THREAD_EXTENSION_2
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = __iar_dlib_perthread_allocate();
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {__iar_dlib_perthread_deallocate(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
This should be done automatically if TX_ENABLE_IAR_LIBRARY_SUPPORT is defined while building the ThreadX library and the
application.
Finally, the project options General Options -> Library Configuration should have the "Enable thread support in library" box selected.
*/
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <DLib_threads.h>
void * __aeabi_read_tp();
void* _tx_iar_create_per_thread_tls_area();
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
#pragma section="__iar_tls$$DATA"
/* Define the TLS access function for the IAR library. */
void * __aeabi_read_tp(void)
{
void *p = 0;
TX_THREAD *thread_ptr = _tx_thread_current_ptr;
if (thread_ptr)
{
p = thread_ptr->tx_thread_iar_tls_pointer;
}
else
{
p = __section_begin("__iar_tls$$DATA");
}
return p;
}
/* Define the TLS creation and destruction to use malloc/free. */
void* _tx_iar_create_per_thread_tls_area()
{
UINT tls_size = __iar_tls_size();
/* Get memory for TLS. */
void *p = malloc(tls_size);
/* Initialize TLS-area and run constructors for objects in TLS */
__iar_tls_init(p);
return p;
}
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr)
{
/* Destroy objects living in TLS */
__call_thread_dtors();
free(tls_ptr);
}
#ifndef _MAX_LOCK
#define _MAX_LOCK 4
#endif
static TX_MUTEX __tx_iar_system_lock_mutexes[_MAX_LOCK];
static UINT __tx_iar_system_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_system_lock_no_mutexes;
UINT __tx_iar_system_lock_internal_errors;
UINT __tx_iar_system_lock_isr_caller;
/* Define mutexes for IAR library. */
void __iar_system_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_LOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_system_lock_mutexes[__tx_iar_system_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_system_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_system_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_system_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR System Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_system_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_system_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_system_Mtxlock(__iar_Rmtx *m)
{
if (*m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
}
void __iar_system_Mtxunlock(__iar_Rmtx *m)
{
if (*m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
}
#if _DLIB_FILE_DESCRIPTOR
#include <stdio.h> /* Added to get access to FOPEN_MAX */
#ifndef _MAX_FLOCK
#define _MAX_FLOCK FOPEN_MAX /* Define _MAX_FLOCK as the maximum number of open files */
#endif
TX_MUTEX __tx_iar_file_lock_mutexes[_MAX_FLOCK];
UINT __tx_iar_file_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_file_lock_no_mutexes;
UINT __tx_iar_file_lock_internal_errors;
UINT __tx_iar_file_lock_isr_caller;
void __iar_file_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_FLOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_file_lock_mutexes[__tx_iar_file_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_file_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_file_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_file_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR File Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_file_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_file_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_file_Mtxlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
void __iar_file_Mtxunlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
#endif /* _DLIB_FILE_DESCRIPTOR */
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* IAR version 8 and above. */
|
256442.c | /*-
* Copyright 2005,2007,2009 Colin Percival
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <limits.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include "core.h"
#include "crypto_auth_hmacsha256.h"
#include "crypto_pwhash_scryptsalsa208sha256.h"
#include "pbkdf2-sha256.h"
#include "private/common.h"
#include "utils.h"
/**
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
*/
void
PBKDF2_SHA256(const uint8_t *passwd, size_t passwdlen, const uint8_t *salt,
size_t saltlen, uint64_t c, uint8_t *buf, size_t dkLen)
{
crypto_auth_hmacsha256_state PShctx, hctx;
size_t i;
uint8_t ivec[4];
uint8_t U[32];
uint8_t T[32];
uint64_t j;
int k;
size_t clen;
#if SIZE_MAX > 0x1fffffffe0ULL
COMPILER_ASSERT(crypto_pwhash_scryptsalsa208sha256_BYTES_MAX
<= 0x1fffffffe0ULL);
if (dkLen > 0x1fffffffe0ULL) {
sodium_misuse(); /* LCOV_EXCL_LINE */
}
#endif
crypto_auth_hmacsha256_init(&PShctx, passwd, passwdlen);
crypto_auth_hmacsha256_update(&PShctx, salt, saltlen);
for (i = 0; i * 32 < dkLen; i++) {
STORE32_BE(ivec, (uint32_t)(i + 1));
memcpy(&hctx, &PShctx, sizeof(crypto_auth_hmacsha256_state));
crypto_auth_hmacsha256_update(&hctx, ivec, 4);
crypto_auth_hmacsha256_final(&hctx, U);
memcpy(T, U, 32);
/* LCOV_EXCL_START */
for (j = 2; j <= c; j++) {
crypto_auth_hmacsha256_init(&hctx, passwd, passwdlen);
crypto_auth_hmacsha256_update(&hctx, U, 32);
crypto_auth_hmacsha256_final(&hctx, U);
for (k = 0; k < 32; k++) {
T[k] ^= U[k];
}
}
/* LCOV_EXCL_STOP */
clen = dkLen - i * 32;
if (clen > 32) {
clen = 32;
}
memcpy(&buf[i * 32], T, clen);
}
sodium_memzero((void *) &PShctx, sizeof PShctx);
}
|
195579.c | #include "all.h"
// defined in noun/hashtable.c
c3_w _ch_skip_slot(c3_w mug_w, c3_w lef_w);
/* _setup(): prepare for tests.
*/
static void
_setup(void)
{
u3m_init();
u3m_pave(c3y, c3n);
}
/* _test_bit_manipulation():
*/
static c3_i
_test_bit_manipulation()
{
c3_i ret_i = 1;
if ( sizeof(u3_noun) != sizeof(u3h_slot) ) {
fprintf(stderr, "bit manipulation: wrong size\r\n");
ret_i = 0;
}
u3h_slot a = 0;
if (u3h_slot_is_null(a) != c3y) {
fprintf(stderr, "bit manipulation: nullity\r\n");
ret_i = 0;
}
a = u3h_noun_be_warm(a);
if (u3h_slot_is_warm(a) != c3y) {
fprintf(stderr, "bit manipulation: warmth\r\n");
ret_i = 0;
}
if (u3h_slot_is_null(a) != c3n) {
fprintf(stderr, "bit manipulation: nullity 2\r\n");
ret_i = 0;
}
a = u3h_noun_be_cold(a);
if (u3h_slot_is_warm(a) != c3n) {
fprintf(stderr, "bit manipulation: coldness\r\n");
ret_i = 0;
}
return ret_i;
}
/* _test_no_cache(): test a hashtable without caching.
*/
static c3_i
_test_no_cache(void)
{
c3_i ret_i = 1;
c3_w max_w = 1000;
c3_w i_w;
u3p(u3h_root) har_p = u3h_new();
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w);
}
for ( i_w = 0; i_w < max_w; i_w++ ) {
if ( (i_w + max_w) != u3h_get(har_p, i_w) ) {
fprintf(stderr, "bit test_no_cache: get failed\r\n");
ret_i = 0;
}
}
u3h_free(har_p);
return ret_i;
}
/* _test_skip_slot():
*/
static c3_i
_test_skip_slot(void)
{
c3_i ret_i = 1;
// root table
{
c3_w mug_w = 0x17 << 25;
c3_w res_w = _ch_skip_slot(mug_w, 25);
if ( (0x18 << 25) != res_w ) {
fprintf(stderr, "bit skip_slot (a): failed\r\n");
ret_i = 0;
}
}
{
c3_w mug_w = 63 << 25; // 6 bits, all ones
c3_w res_w = _ch_skip_slot(mug_w, 25);
if ( 0 != res_w ) {
fprintf(stderr, "bit skip_slot (b): failed\r\n");
ret_i = 0;
}
}
// child nodes
{
c3_w mug_w = 17 << 20;
c3_w res_w = _ch_skip_slot(mug_w, 20);
if ( (18 << 20) != res_w ) {
fprintf(stderr, "bit skip_slot (c): failed\r\n");
ret_i = 0;
}
}
{
c3_w mug_w = 31 << 20; // 5 bits, all ones
c3_w res_w = _ch_skip_slot(mug_w, 20);
c3_assert((1 << 25) == res_w);
if ( (1 << 25) != res_w ) {
fprintf(stderr, "bit skip_slot (d): failed\r\n");
ret_i = 0;
}
}
return ret_i;
}
/* _test_cache_trimming(): ensure a caching hashtable removes stale items.
*/
static c3_i
_test_cache_trimming(void)
{
c3_i ret_i = 1;
c3_w max_w = 620;
c3_w i_w;
//u3p(u3h_root) har_p = u3h_new_cache(max_w / 2);
u3p(u3h_root) har_p = u3h_new_cache(max_w / 10 );
u3h_root* har_u = u3to(u3h_root, har_p);
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w);
}
if ( ( max_w + max_w - 1) != u3h_get(har_p, max_w - 1) ) {
fprintf(stderr, "cache_trimming (a): fail\r\n");
ret_i = 0;
}
if ( ( max_w / 10 ) != har_u->use_w ) {
fprintf(stderr, "cache_trimming (b): fail\r\n");
ret_i = 0;
}
u3h_free(har_p);
return ret_i;
}
/* _test_cache_replace_value():
*/
static c3_i
_test_cache_replace_value(void)
{
c3_i ret_i = 1;
c3_w max_w = 100;
c3_w i_w;
u3p(u3h_root) har_p = u3h_new_cache(max_w);
u3h_root* har_u = u3to(u3h_root, har_p);
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w);
}
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w + 1);
}
if ( (2 * max_w) != u3h_get(har_p, max_w - 1) ) {
fprintf(stderr, "cache_replace (a): fail\r\n");
ret_i = 0;
}
if ( max_w != har_u->use_w ) {
fprintf(stderr, "cache_replace (b): fail\r\n");
ret_i = 0;
}
u3h_free(har_p);
return ret_i;
}
static c3_i
_test_hashtable(void)
{
c3_i ret_i = 1;
ret_i &= _test_bit_manipulation();
ret_i &= _test_no_cache();
ret_i &= _test_skip_slot();
ret_i &= _test_cache_trimming();
ret_i &= _test_cache_replace_value();
return ret_i;
}
/* main(): run all test cases.
*/
int
main(int argc, char* argv[])
{
_setup();
if ( !_test_hashtable() ) {
fprintf(stderr, "test_hashtable: failed\r\n");
exit(1);
}
// GC
//
u3m_grab(u3_none);
fprintf(stderr, "test_hashtable: ok\r\n");
return 0;
}
|
475862.c | /*
* RFC 1321 compliant MD5 implementation
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
/*
* The MD5 algorithm was designed by Ron Rivest in 1991.
*
* http://www.ietf.org/rfc/rfc1321.txt
*/
#if !defined(MBEDTLS_CONFIG_FILE)
#include "mbedtls/config.h"
#else
#include MBEDTLS_CONFIG_FILE
#endif
#if defined(MBEDTLS_MD5_C)
#include "mbedtls/md5.h"
#include <string.h>
#if defined(MBEDTLS_PLATFORM_C)
#include "mbedtls/platform.h"
#else
#include <stdio.h>
#define mbedtls_printf printf
#endif /* MBEDTLS_PLATFORM_C */
#if defined(MBEDTLS_MD5_ALT)
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
#include "top.h"
#endif
/* Implementation that should never be optimized out by the compiler */
static void mbedtls_zeroize( void *v, size_t n ) {
volatile unsigned char *p = v; while( n-- ) *p++ = 0;
}
void mbedtls_md5_init( mbedtls_md5_context *ctx )
{
memset( ctx, 0, sizeof( mbedtls_md5_context ) );
}
void mbedtls_md5_free( mbedtls_md5_context *ctx )
{
if( ctx == NULL )
return;
mbedtls_zeroize( ctx, sizeof( mbedtls_md5_context ) );
}
void mbedtls_md5_clone( mbedtls_md5_context *dst,
const mbedtls_md5_context *src )
{
*dst = *src;
}
/*
* MD5 context setup
*/
int mbedtls_md5_starts_ret( mbedtls_md5_context *ctx )
{
hal_md5_status_t status = HAL_MD5_STATUS_ERROR;
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
cmnCpuClkSwitchTo96M();
#endif
do {
status = hal_md5_init(ctx);
if( status == -100 )
hal_gpt_delay_ms(1);
} while( status == -100 );
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
cmnCpuClkSwitchTo192M();
#endif
return HAL_MD5_STATUS_OK == status ? 0 : (int)status;
}
#if !defined(MBEDTLS_DEPRECATED_REMOVED)
void mbedtls_md5_starts( mbedtls_md5_context *ctx )
{
mbedtls_md5_starts_ret( ctx );
}
#endif
/*
* MD5 process buffer
*/
int mbedtls_md5_update_ret( mbedtls_md5_context *ctx,
const unsigned char *input,
size_t ilen )
{
hal_md5_status_t status = HAL_MD5_STATUS_ERROR;
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
cmnCpuClkSwitchTo96M();
#endif
do {
status = hal_md5_append( ctx, (uint8_t*)input, (uint32_t)ilen);
if( status == -100 )
hal_gpt_delay_ms(1);
} while( status == -100 );
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
cmnCpuClkSwitchTo192M();
#endif
return HAL_MD5_STATUS_OK == status ? 0 : (int)status;
}
#if !defined(MBEDTLS_DEPRECATED_REMOVED)
void mbedtls_md5_update( mbedtls_md5_context *ctx,
const unsigned char *input,
size_t ilen )
{
mbedtls_md5_update_ret( ctx, input, ilen );
}
#endif
/*
* MD5 final digest
*/
int mbedtls_md5_finish_ret( mbedtls_md5_context *ctx,
unsigned char output[16] )
{
hal_md5_status_t status = HAL_MD5_STATUS_ERROR;
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
cmnCpuClkSwitchTo96M();
#endif
do {
status = hal_md5_end( ctx, (uint8_t*)output);
if( status == -100 )
hal_gpt_delay_ms(1);
} while( status == -100 );
#ifdef MBEDTLS_HW_ALGORITHM_CHANGE_CPU_CLOCK
cmnCpuClkSwitchTo192M();
#endif
return HAL_MD5_STATUS_OK == status ? 0 : (int)status;
}
#if !defined(MBEDTLS_DEPRECATED_REMOVED)
void mbedtls_md5_finish( mbedtls_md5_context *ctx,
unsigned char output[16] )
{
mbedtls_md5_finish_ret( ctx, output );
}
#endif
int mbedtls_internal_md5_process( mbedtls_md5_context *ctx,
const unsigned char data[64] )
{
unsigned char output[16] = {0};
int ret = -1;
ret = mbedtls_md5_starts_ret(ctx);
if (0 != ret)
{
return ret;
}
ret = mbedtls_md5_update_ret(ctx, data, 64);
if (0 != ret)
{
return ret;
}
ret = mbedtls_md5_finish_ret(ctx, output);
return ret;
}
#if !defined(MBEDTLS_DEPRECATED_REMOVED)
void mbedtls_md5_process( mbedtls_md5_context *ctx,
const unsigned char data[64] )
{
mbedtls_internal_md5_process( ctx, data );
}
#endif
#endif /* MBEDTLS_MD5_ALT */
#endif /* MBEDTLS_MD5_C */
|
83763.c | /* libmypaint - The MyPaint Brush Library
* Copyright (C) 2007-2008 Martin Renold <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef MAPPING_C
#define MAPPING_C
#include "config.h"
#include <stdlib.h>
#include <assert.h>
#if MYPAINT_CONFIG_USE_GLIB
#include <glib.h>
#endif
#include "mypaint-mapping.h"
#include "helpers.h"
// user-defined mappings
// (the curves you can edit in the brush settings)
typedef struct {
// a set of control points (stepwise linear)
float xvalues[64];
float yvalues[64];
int n;
} ControlPoints;
struct MyPaintMapping {
float base_value; // FIXME: accessed directly from mypaint-brush.c
int inputs;
ControlPoints * pointsList; // one for each input
int inputs_used; // optimization
};
MyPaintMapping *
mypaint_mapping_new(int inputs_)
{
MyPaintMapping *self = (MyPaintMapping *)malloc(sizeof(MyPaintMapping));
self->inputs = inputs_;
self->pointsList = (ControlPoints *)malloc(sizeof(ControlPoints)*self->inputs);
int i = 0;
for (i=0; i<self->inputs; i++) self->pointsList[i].n = 0;
self->inputs_used = 0;
self->base_value = 0;
return self;
}
void
mypaint_mapping_free(MyPaintMapping *self)
{
free(self->pointsList);
free(self);
}
float mypaint_mapping_get_base_value(MyPaintMapping *self)
{
return self->base_value;
}
void mypaint_mapping_set_base_value(MyPaintMapping *self, float value)
{
self->base_value = value;
}
void mypaint_mapping_set_n (MyPaintMapping * self, int input, int n)
{
assert (input >= 0 && input < self->inputs);
assert (n >= 0 && n <= 64);
assert (n != 1); // cannot build a linear mapping with only one point
ControlPoints * p = self->pointsList + input;
if (n != 0 && p->n == 0) self->inputs_used++;
if (n == 0 && p->n != 0) self->inputs_used--;
assert(self->inputs_used >= 0);
assert(self->inputs_used <= self->inputs);
p->n = n;
}
int mypaint_mapping_get_n (MyPaintMapping * self, int input)
{
assert (input >= 0 && input < self->inputs);
ControlPoints * p = self->pointsList + input;
return p->n;
}
void mypaint_mapping_set_point (MyPaintMapping * self, int input, int index, float x, float y)
{
assert (input >= 0 && input < self->inputs);
assert (index >= 0 && index < 64);
ControlPoints * p = self->pointsList + input;
assert (index < p->n);
if (index > 0) {
assert (x >= p->xvalues[index-1]);
}
p->xvalues[index] = x;
p->yvalues[index] = y;
}
void mypaint_mapping_get_point (MyPaintMapping * self, int input, int index, float *x, float *y)
{
assert (input >= 0 && input < self->inputs);
assert (index >= 0 && index < 64);
ControlPoints * p = self->pointsList + input;
assert (index < p->n);
*x = p->xvalues[index];
*y = p->yvalues[index];
}
gboolean mypaint_mapping_is_constant(MyPaintMapping * self)
{
return self->inputs_used == 0;
}
int
mypaint_mapping_get_inputs_used_n(MyPaintMapping *self)
{
return self->inputs_used;
}
float mypaint_mapping_calculate (MyPaintMapping * self, float * data)
{
int j;
float result;
result = self->base_value;
// constant mapping (common case)
if (self->inputs_used == 0) return result;
for (j=0; j<self->inputs; j++) {
ControlPoints * p = self->pointsList + j;
if (p->n) {
float x, y;
x = data[j];
// find the segment with the slope that we need to use
float x0, y0, x1, y1;
x0 = p->xvalues[0];
y0 = p->yvalues[0];
x1 = p->xvalues[1];
y1 = p->yvalues[1];
int i;
for (i=2; i<p->n && x>x1; i++) {
x0 = x1;
y0 = y1;
x1 = p->xvalues[i];
y1 = p->yvalues[i];
}
if (x0 == x1 || y0 == y1) {
y = y0;
} else {
// linear interpolation
y = (y1*(x - x0) + y0*(x1 - x)) / (x1 - x0);
}
result += y;
}
}
return result;
}
// used in mypaint itself for the global pressure mapping
float mypaint_mapping_calculate_single_input (MyPaintMapping * self, float input)
{
assert(self->inputs == 1);
return mypaint_mapping_calculate(self, &input);
}
#endif //MAPPING_C
|
793207.c | /* $OpenBSD: bounce.c,v 1.64 2014/04/19 17:27:40 gilles Exp $ */
/*
* Copyright (c) 2009 Gilles Chehade <[email protected]>
* Copyright (c) 2009 Jacek Masiulaniec <[email protected]>
* Copyright (c) 2012 Eric Faurot <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "includes.h"
#include <sys/types.h>
#include <sys/queue.h>
#include <sys/tree.h>
#include <sys/socket.h>
#include <err.h>
#include <errno.h>
#include <event.h>
#include <imsg.h>
#include <inttypes.h>
#include <pwd.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <limits.h>
#include "smtpd.h"
#include "log.h"
#define BOUNCE_MAXRUN 2
#define BOUNCE_HIWAT 65535
enum {
BOUNCE_EHLO,
BOUNCE_MAIL,
BOUNCE_RCPT,
BOUNCE_DATA,
BOUNCE_DATA_NOTICE,
BOUNCE_DATA_MESSAGE,
BOUNCE_DATA_END,
BOUNCE_QUIT,
BOUNCE_CLOSE,
};
struct bounce_envelope {
TAILQ_ENTRY(bounce_envelope) entry;
uint64_t id;
struct mailaddr dest;
char *report;
uint8_t esc_class;
uint8_t esc_code;
};
struct bounce_message {
SPLAY_ENTRY(bounce_message) sp_entry;
TAILQ_ENTRY(bounce_message) entry;
uint32_t msgid;
struct delivery_bounce bounce;
char *smtpname;
char *to;
time_t timeout;
TAILQ_HEAD(, bounce_envelope) envelopes;
};
struct bounce_session {
char *smtpname;
struct bounce_message *msg;
FILE *msgfp;
int state;
struct iobuf iobuf;
struct io io;
uint64_t boundary;
};
SPLAY_HEAD(bounce_message_tree, bounce_message);
static int bounce_message_cmp(const struct bounce_message *,
const struct bounce_message *);
SPLAY_PROTOTYPE(bounce_message_tree, bounce_message, sp_entry,
bounce_message_cmp);
static void bounce_drain(void);
static void bounce_send(struct bounce_session *, const char *, ...);
static int bounce_next_message(struct bounce_session *);
static int bounce_next(struct bounce_session *);
static void bounce_delivery(struct bounce_message *, int, const char *);
static void bounce_status(struct bounce_session *, const char *, ...);
static void bounce_io(struct io *, int);
static void bounce_timeout(int, short, void *);
static void bounce_free(struct bounce_session *);
static const char *bounce_strtype(enum bounce_type);
static const char *action_str(const struct delivery_bounce *);
static struct tree wait_fd;
static struct bounce_message_tree messages;
static TAILQ_HEAD(, bounce_message) pending;
static int nmessage = 0;
static int running = 0;
static struct event ev_timer;
static void
bounce_init(void)
{
static int init = 0;
if (init == 0) {
TAILQ_INIT(&pending);
SPLAY_INIT(&messages);
tree_init(&wait_fd);
evtimer_set(&ev_timer, bounce_timeout, NULL);
init = 1;
}
}
void
bounce_add(uint64_t evpid)
{
char buf[LINE_MAX], *line;
struct envelope evp;
struct bounce_message key, *msg;
struct bounce_envelope *be;
bounce_init();
if (queue_envelope_load(evpid, &evp) == 0) {
m_create(p_scheduler, IMSG_QUEUE_DELIVERY_PERMFAIL, 0, 0, -1);
m_add_evpid(p_scheduler, evpid);
m_close(p_scheduler);
return;
}
if (evp.type != D_BOUNCE)
errx(1, "bounce: evp:%016" PRIx64 " is not of type D_BOUNCE!",
evp.id);
key.msgid = evpid_to_msgid(evpid);
key.bounce = evp.agent.bounce;
key.smtpname = evp.smtpname;
if (evp.errorline[0] == '4')
key.bounce.type = B_WARNING;
else if (evp.errorline[0] == '5')
key.bounce.type = B_ERROR;
else
key.bounce.type = B_DSN;
key.bounce.dsn_ret = evp.dsn_ret;
key.bounce.expire = evp.expire;
msg = SPLAY_FIND(bounce_message_tree, &messages, &key);
if (msg == NULL) {
msg = xcalloc(1, sizeof(*msg), "bounce_add");
msg->msgid = key.msgid;
msg->bounce = key.bounce;
TAILQ_INIT(&msg->envelopes);
msg->smtpname = xstrdup(evp.smtpname, "bounce_add");
(void)snprintf(buf, sizeof(buf), "%s@%s", evp.sender.user,
evp.sender.domain);
msg->to = xstrdup(buf, "bounce_add");
nmessage += 1;
SPLAY_INSERT(bounce_message_tree, &messages, msg);
log_debug("debug: bounce: new message %08" PRIx32,
msg->msgid);
stat_increment("bounce.message", 1);
} else
TAILQ_REMOVE(&pending, msg, entry);
line = evp.errorline;
if (strlen(line) > 4 && (*line == '1' || *line == '6'))
line += 4;
(void)snprintf(buf, sizeof(buf), "%s@%s: %s\n", evp.dest.user,
evp.dest.domain, line);
be = xmalloc(sizeof *be, "bounce_add");
be->id = evpid;
be->report = xstrdup(buf, "bounce_add");
(void)strlcpy(be->dest.user, evp.dest.user, sizeof(be->dest.user));
(void)strlcpy(be->dest.domain, evp.dest.domain,
sizeof(be->dest.domain));
be->esc_class = evp.esc_class;
be->esc_code = evp.esc_code;
TAILQ_INSERT_TAIL(&msg->envelopes, be, entry);
buf[strcspn(buf, "\n")] = '\0';
log_debug("debug: bounce: adding report %16"PRIx64": %s", be->id, buf);
msg->timeout = time(NULL) + 1;
TAILQ_INSERT_TAIL(&pending, msg, entry);
stat_increment("bounce.envelope", 1);
bounce_drain();
}
void
bounce_fd(int fd)
{
struct bounce_session *s;
struct bounce_message *msg;
log_debug("debug: bounce: got enqueue socket %d", fd);
if (fd == -1 || TAILQ_EMPTY(&pending)) {
log_debug("debug: bounce: cancelling");
if (fd != -1)
close(fd);
running -= 1;
bounce_drain();
return;
}
msg = TAILQ_FIRST(&pending);
s = xcalloc(1, sizeof(*s), "bounce_fd");
s->smtpname = xstrdup(msg->smtpname, "bounce_fd");
s->state = BOUNCE_EHLO;
iobuf_xinit(&s->iobuf, 0, 0, "bounce_run");
io_init(&s->io, fd, s, bounce_io, &s->iobuf);
io_set_timeout(&s->io, 30000);
io_set_read(&s->io);
s->boundary = generate_uid();
log_debug("debug: bounce: new session %p", s);
stat_increment("bounce.session", 1);
}
static void
bounce_timeout(int fd, short ev, void *arg)
{
log_debug("debug: bounce: timeout");
bounce_drain();
}
static void
bounce_drain()
{
struct bounce_message *msg;
struct timeval tv;
time_t t;
log_debug("debug: bounce: drain: nmessage=%d running=%d",
nmessage, running);
while (1) {
if (running >= BOUNCE_MAXRUN) {
log_debug("debug: bounce: max session reached");
return;
}
if (nmessage == 0) {
log_debug("debug: bounce: no more messages");
return;
}
if (running >= nmessage) {
log_debug("debug: bounce: enough sessions running");
return;
}
if ((msg = TAILQ_FIRST(&pending)) == NULL) {
log_debug("debug: bounce: no more pending messages");
return;
}
t = time(NULL);
if (msg->timeout > t) {
log_debug("debug: bounce: next message not ready yet");
if (!evtimer_pending(&ev_timer, NULL)) {
log_debug("debug: bounce: setting timer");
tv.tv_sec = msg->timeout - t;
tv.tv_usec = 0;
evtimer_add(&ev_timer, &tv);
}
return;
}
log_debug("debug: bounce: requesting new enqueue socket...");
m_compose(p_pony, IMSG_QUEUE_SMTP_SESSION, 0, 0, -1, NULL, 0);
running += 1;
}
}
static void
bounce_send(struct bounce_session *s, const char *fmt, ...)
{
va_list ap;
char *p;
int len;
va_start(ap, fmt);
if ((len = vasprintf(&p, fmt, ap)) == -1)
fatal("bounce: vasprintf");
va_end(ap);
log_trace(TRACE_BOUNCE, "bounce: %p: >>> %s", s, p);
iobuf_xfqueue(&s->iobuf, "bounce_send", "%s\n", p);
free(p);
}
static const char *
bounce_duration(long long int d)
{
static char buf[32];
if (d < 60) {
(void)snprintf(buf, sizeof buf, "%lld second%s", d,
(d == 1)?"":"s");
} else if (d < 3600) {
d = d / 60;
(void)snprintf(buf, sizeof buf, "%lld minute%s", d,
(d == 1)?"":"s");
}
else if (d < 3600 * 24) {
d = d / 3600;
(void)snprintf(buf, sizeof buf, "%lld hour%s", d,
(d == 1)?"":"s");
}
else {
d = d / (3600 * 24);
(void)snprintf(buf, sizeof buf, "%lld day%s", d,
(d == 1)?"":"s");
}
return (buf);
}
#define NOTICE_INTRO \
" Hi!\n\n" \
" This is the MAILER-DAEMON, please DO NOT REPLY to this e-mail.\n"
const char *notice_error =
" An error has occurred while attempting to deliver a message for\n"
" the following list of recipients:\n\n";
const char *notice_warning =
" A message is delayed for more than %s for the following\n"
" list of recipients:\n\n";
const char *notice_warning2 =
" Please note that this is only a temporary failure report.\n"
" The message is kept in the queue for up to %s.\n"
" You DO NOT NEED to re-send the message to these recipients.\n\n";
const char *notice_success =
" Your message was successfully delivered to these recipients.\n\n";
const char *notice_relay =
" Your message was relayed to these recipients.\n\n";
static int
bounce_next_message(struct bounce_session *s)
{
struct bounce_message *msg;
char buf[LINE_MAX];
int fd;
time_t now;
again:
now = time(NULL);
TAILQ_FOREACH(msg, &pending, entry) {
if (msg->timeout > now)
continue;
if (strcmp(msg->smtpname, s->smtpname))
continue;
break;
}
if (msg == NULL)
return (0);
TAILQ_REMOVE(&pending, msg, entry);
SPLAY_REMOVE(bounce_message_tree, &messages, msg);
if ((fd = queue_message_fd_r(msg->msgid)) == -1) {
bounce_delivery(msg, IMSG_QUEUE_DELIVERY_TEMPFAIL,
"Could not open message fd");
goto again;
}
if ((s->msgfp = fdopen(fd, "r")) == NULL) {
(void)snprintf(buf, sizeof(buf), "fdopen: %s", strerror(errno));
log_warn("warn: bounce: fdopen");
close(fd);
bounce_delivery(msg, IMSG_QUEUE_DELIVERY_TEMPFAIL, buf);
goto again;
}
s->msg = msg;
return (1);
}
static int
bounce_next(struct bounce_session *s)
{
struct bounce_envelope *evp;
char *line;
size_t len, n;
switch (s->state) {
case BOUNCE_EHLO:
bounce_send(s, "EHLO %s", s->smtpname);
s->state = BOUNCE_MAIL;
break;
case BOUNCE_MAIL:
case BOUNCE_DATA_END:
log_debug("debug: bounce: %p: getting next message...", s);
if (bounce_next_message(s) == 0) {
log_debug("debug: bounce: %p: no more messages", s);
bounce_send(s, "QUIT");
s->state = BOUNCE_CLOSE;
break;
}
log_debug("debug: bounce: %p: found message %08"PRIx32,
s, s->msg->msgid);
bounce_send(s, "MAIL FROM: <>");
s->state = BOUNCE_RCPT;
break;
case BOUNCE_RCPT:
bounce_send(s, "RCPT TO: <%s>", s->msg->to);
s->state = BOUNCE_DATA;
break;
case BOUNCE_DATA:
bounce_send(s, "DATA");
s->state = BOUNCE_DATA_NOTICE;
break;
case BOUNCE_DATA_NOTICE:
/* Construct an appropriate notice. */
iobuf_xfqueue(&s->iobuf, "bounce_next: HEADER",
"Subject: Delivery status notification: %s\n"
"From: Mailer Daemon <MAILER-DAEMON@%s>\n"
"To: %s\n"
"Date: %s\n"
"\n"
"This is a MIME-encapsulated message.\n"
"\n",
bounce_strtype(s->msg->bounce.type),
s->smtpname,
s->msg->to,
time_to_text(time(NULL)));
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"--%16" PRIu64 "/%s\n"
"Content-Description: Notification\n"
"Content-Type: text/plain; charset=us-ascii\n"
"\n"
NOTICE_INTRO
"\n",
s->boundary, s->smtpname);
switch (s->msg->bounce.type) {
case B_ERROR:
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
notice_error);
break;
case B_WARNING:
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
notice_warning,
bounce_duration(s->msg->bounce.delay));
break;
case B_DSN:
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
s->msg->bounce.mta_without_dsn ?
notice_relay : notice_success);
break;
default:
log_warn("warn: bounce: unknown bounce_type");
}
TAILQ_FOREACH(evp, &s->msg->envelopes, entry) {
iobuf_xfqueue(&s->iobuf,
"bounce_next: DATA_NOTICE",
"%s", evp->report);
}
iobuf_xfqueue(&s->iobuf, "bounce_next: DATA_NOTICE", "\n");
if (s->msg->bounce.type == B_WARNING)
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
notice_warning2,
bounce_duration(s->msg->bounce.expire));
iobuf_xfqueue(&s->iobuf, "bounce_next: DATA_NOTICE",
" Below is a copy of the original message:\n"
"\n");
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"--%16" PRIu64 "/%s\n"
"Content-Description: Delivery Report\n"
"Content-Type: message/delivery-status\n"
"\n",
s->boundary, s->smtpname);
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"Reporting-MTA: dns; %s\n"
"Arrival-Date: %s\n"
"\n",
s->smtpname,
time_to_text(time(NULL)));
TAILQ_FOREACH(evp, &s->msg->envelopes, entry) {
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"Final-Recipient: rfc822; %s@%s\n"
"Action: %s\n"
"Status: %s\n"
"Diagnostic-Code: smtp; %s\n"
"\n",
evp->dest.user,
evp->dest.domain,
action_str(&s->msg->bounce),
esc_code(evp->esc_class, evp->esc_code),
esc_description(evp->esc_code));
}
log_trace(TRACE_BOUNCE, "bounce: %p: >>> [... %zu bytes ...]",
s, iobuf_queued(&s->iobuf));
s->state = BOUNCE_DATA_MESSAGE;
break;
case BOUNCE_DATA_MESSAGE:
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"--%16" PRIu64 "/%s\n"
"Content-Type: message/rfc822\n"
"\n",
s->boundary, s->smtpname);
n = iobuf_queued(&s->iobuf);
while (iobuf_queued(&s->iobuf) < BOUNCE_HIWAT) {
line = fgetln(s->msgfp, &len);
if (line == NULL)
break;
if (len == 1 && line[0] == '\n' && /* end of headers */
s->msg->bounce.type == B_DSN &&
s->msg->bounce.dsn_ret == DSN_RETHDRS) {
fclose(s->msgfp);
s->msgfp = NULL;
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"\n--%16" PRIu64 "/%s--\n", s->boundary,
s->smtpname);
bounce_send(s, ".");
s->state = BOUNCE_DATA_END;
return (0);
}
line[len - 1] = '\0';
iobuf_xfqueue(&s->iobuf,
"bounce_next: DATA_MESSAGE", "%s%s\n",
(len == 2 && line[0] == '.') ? "." : "", line);
}
if (ferror(s->msgfp)) {
fclose(s->msgfp);
s->msgfp = NULL;
bounce_delivery(s->msg, IMSG_QUEUE_DELIVERY_TEMPFAIL,
"Error reading message");
s->msg = NULL;
return (-1);
}
iobuf_xfqueue(&s->iobuf, "bounce_next: BODY",
"\n--%16" PRIu64 "/%s--\n", s->boundary, s->smtpname);
log_trace(TRACE_BOUNCE, "bounce: %p: >>> [... %zu bytes ...]",
s, iobuf_queued(&s->iobuf) - n);
if (feof(s->msgfp)) {
fclose(s->msgfp);
s->msgfp = NULL;
bounce_send(s, ".");
s->state = BOUNCE_DATA_END;
}
break;
case BOUNCE_QUIT:
bounce_send(s, "QUIT");
s->state = BOUNCE_CLOSE;
break;
default:
fatalx("bounce: bad state");
}
return (0);
}
static void
bounce_delivery(struct bounce_message *msg, int delivery, const char *status)
{
struct bounce_envelope *be;
struct envelope evp;
size_t n;
const char *f;
n = 0;
while ((be = TAILQ_FIRST(&msg->envelopes))) {
if (delivery == IMSG_QUEUE_DELIVERY_TEMPFAIL) {
if (queue_envelope_load(be->id, &evp) == 0) {
fatalx("could not reload envelope!");
}
evp.retry++;
evp.lasttry = msg->timeout;
envelope_set_errormsg(&evp, "%s", status);
queue_envelope_update(&evp);
m_create(p_scheduler, delivery, 0, 0, -1);
m_add_envelope(p_scheduler, &evp);
m_close(p_scheduler);
} else {
m_create(p_scheduler, delivery, 0, 0, -1);
m_add_evpid(p_scheduler, be->id);
m_close(p_scheduler);
queue_envelope_delete(be->id);
}
TAILQ_REMOVE(&msg->envelopes, be, entry);
free(be->report);
free(be);
n += 1;
}
if (delivery == IMSG_QUEUE_DELIVERY_TEMPFAIL)
f = "TempFail";
else if (delivery == IMSG_QUEUE_DELIVERY_PERMFAIL)
f = "PermFail";
else
f = NULL;
if (f)
log_warnx("warn: %s injecting failure report on message %08"
PRIx32 " to <%s> for %zu envelope%s: %s",
f, msg->msgid, msg->to, n, n > 1 ? "s":"", status);
nmessage -= 1;
stat_decrement("bounce.message", 1);
stat_decrement("bounce.envelope", n);
free(msg->smtpname);
free(msg->to);
free(msg);
}
static void
bounce_status(struct bounce_session *s, const char *fmt, ...)
{
va_list ap;
char *status;
int len, delivery;
/* Ignore if there is no message */
if (s->msg == NULL)
return;
va_start(ap, fmt);
if ((len = vasprintf(&status, fmt, ap)) == -1)
fatal("bounce: vasprintf");
va_end(ap);
if (*status == '2')
delivery = IMSG_QUEUE_DELIVERY_OK;
else if (*status == '5' || *status == '6')
delivery = IMSG_QUEUE_DELIVERY_PERMFAIL;
else
delivery = IMSG_QUEUE_DELIVERY_TEMPFAIL;
bounce_delivery(s->msg, delivery, status);
s->msg = NULL;
if (s->msgfp)
fclose(s->msgfp);
free(status);
}
static void
bounce_free(struct bounce_session *s)
{
log_debug("debug: bounce: %p: deleting session", s);
iobuf_clear(&s->iobuf);
io_clear(&s->io);
free(s->smtpname);
free(s);
running -= 1;
stat_decrement("bounce.session", 1);
bounce_drain();
}
static void
bounce_io(struct io *io, int evt)
{
struct bounce_session *s = io->arg;
const char *error;
char *line, *msg;
int cont;
size_t len;
log_trace(TRACE_IO, "bounce: %p: %s %s", s, io_strevent(evt),
io_strio(io));
switch (evt) {
case IO_DATAIN:
nextline:
line = iobuf_getline(&s->iobuf, &len);
if (line == NULL && iobuf_len(&s->iobuf) >= LINE_MAX) {
bounce_status(s, "Input too long");
bounce_free(s);
return;
}
if (line == NULL) {
iobuf_normalize(&s->iobuf);
break;
}
log_trace(TRACE_BOUNCE, "bounce: %p: <<< %s", s, line);
if ((error = parse_smtp_response(line, len, &msg, &cont))) {
bounce_status(s, "Bad response: %s", error);
bounce_free(s);
return;
}
if (cont)
goto nextline;
if (s->state == BOUNCE_CLOSE) {
bounce_free(s);
return;
}
if (line[0] != '2' && line[0] != '3') { /* fail */
bounce_status(s, "%s", line);
s->state = BOUNCE_QUIT;
} else if (s->state == BOUNCE_DATA_END) { /* accepted */
bounce_status(s, "%s", line);
}
if (bounce_next(s) == -1) {
bounce_free(s);
return;
}
io_set_write(io);
break;
case IO_LOWAT:
if (s->state == BOUNCE_DATA_MESSAGE)
if (bounce_next(s) == -1) {
bounce_free(s);
return;
}
if (iobuf_queued(&s->iobuf) == 0)
io_set_read(io);
break;
default:
bounce_status(s, "442 i/o error %d", evt);
bounce_free(s);
break;
}
}
static int
bounce_message_cmp(const struct bounce_message *a,
const struct bounce_message *b)
{
int r;
if (a->msgid < b->msgid)
return (-1);
if (a->msgid > b->msgid)
return (1);
if ((r = strcmp(a->smtpname, b->smtpname)))
return (r);
return memcmp(&a->bounce, &b->bounce, sizeof (a->bounce));
}
static const char *
action_str(const struct delivery_bounce *b)
{
switch (b->type) {
case B_ERROR:
return ("failed");
case B_WARNING:
return ("delayed");
case B_DSN:
if (b->mta_without_dsn)
return ("relayed");
return ("delivered");
default:
log_warn("warn: bounce: unknown bounce_type");
return ("");
}
}
static const char *
bounce_strtype(enum bounce_type t)
{
switch (t) {
case B_ERROR:
return ("error");
case B_WARNING:
return ("warning");
case B_DSN:
return ("dsn");
default:
log_warn("warn: bounce: unknown bounce_type");
return ("");
}
}
SPLAY_GENERATE(bounce_message_tree, bounce_message, sp_entry,
bounce_message_cmp);
|
969096.c | /*******************************************************************************
** ICAP.c implements the Interaction Capping criterion from
**
** "Machine Learning Based on Attribute Interactions"
** A. Jakulin, PhD Thesis (2005)
**
** Initial Version - 19/08/2010
** Updated - 12/02/2013 - patched the use of DBL_MAX
** 22/02/2014 - Moved feature index increment to mex code.
** 22/02/2014 - Patched calloc.
** 12/03/2016 - Changed initial value of maxMI to -1.0 to prevent segfaults when I(X;Y) = 0.0 for all X.
** 17/12/2016 - Added feature scores.
**
** Author - Adam Pocock
**
** Part of the FEAture Selection Toolbox (FEAST), please reference
** "Conditional Likelihood Maximisation: A Unifying Framework for Information
** Theoretic Feature Selection"
** G. Brown, A. Pocock, M.-J. Zhao, M. Lujan
** Journal of Machine Learning Research (JMLR), 2012
**
** Please check www.github.com/Craigacp/FEAST for updates.
**
** Copyright (c) 2010-2017, A. Pocock, G. Brown, The University of Manchester
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without modification,
** are permitted provided that the following conditions are met:
**
** - Redistributions of source code must retain the above copyright notice, this
** list of conditions and the following disclaimer.
** - Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
** - Neither the name of The University of Manchester nor the names of its
** contributors may be used to endorse or promote products derived from this
** software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
** DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
** ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
** ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**
*******************************************************************************/
#include "FEAST/FSAlgorithms.h"
#include "FEAST/FSToolbox.h"
/* MIToolbox includes */
#include "MIToolbox/ArrayOperations.h"
#include "MIToolbox/MutualInformation.h"
uint* ICAP(uint k, uint noOfSamples, uint noOfFeatures, uint **featureMatrix, uint *classColumn, uint *outputFeatures, double *featureScores) {
char *selectedFeatures = (char *) checkedCalloc(noOfFeatures,sizeof(char));
/*holds the class MI values*/
double *classMI = (double *) checkedCalloc(noOfFeatures,sizeof(double));
/*holds the intra feature MI values*/
int sizeOfMatrix = k*noOfFeatures;
double *featureInteractionMatrix = (double *) checkedCalloc(sizeOfMatrix,sizeof(double));
/*Changed to ensure it always picks a feature*/
double maxMI = -1.0;
int maxMICounter = -1;
double score, currentScore, mi, cmi;
int currentHighestFeature, arrayPosition;
int i, j, m;
for (i = 0; i < sizeOfMatrix; i++) {
featureInteractionMatrix[i] = -1;
}/*for featureInteractionMatrix - blank to -1*/
/*SETUP COMPLETE*/
/*Algorithm starts here*/
for (i = 0; i < noOfFeatures; i++) {
classMI[i] = calcMutualInformation(featureMatrix[i], classColumn, noOfSamples);
if (classMI[i] > maxMI) {
maxMI = classMI[i];
maxMICounter = i;
}/*if bigger than current maximum*/
}/*for noOfFeatures - filling classMI*/
selectedFeatures[maxMICounter] = 1;
outputFeatures[0] = maxMICounter;
featureScores[0] = maxMI;
/*************
** Now we have populated the classMI array, and selected the highest
** MI feature as the first output feature
*************/
for (i = 1; i < k; i++) {
/**********************************************************************
** to ensure it selects some features
**if this is zero then it will not pick features where the redundancy is greater than the
**relevance
**********************************************************************/
score = -DBL_MAX;
currentHighestFeature = 0;
currentScore = 0.0;
for (j = 0; j < noOfFeatures; j++) {
/*if we haven't selected j*/
if (!selectedFeatures[j]) {
currentScore = classMI[j];
for (m = 0; m < i; m++) {
arrayPosition = m * noOfFeatures + j;
if (featureInteractionMatrix[arrayPosition] == -1) {
/*work out interaction*/
/*double calcMutualInformation(uint *firstVector, uint *secondVector, int vectorLength);*/
mi = calcMutualInformation(featureMatrix[outputFeatures[m]], featureMatrix[j], noOfSamples);
/*double calcConditionalMutualInformation(uint *firstVector, uint *targetVector, uint *conditionVector, int vectorLength);*/
cmi = calcConditionalMutualInformation(featureMatrix[outputFeatures[m]], featureMatrix[j], classColumn, noOfSamples);
featureInteractionMatrix[arrayPosition] = cmi - mi;
}/*if not already known*/
if (featureInteractionMatrix[arrayPosition] < 0) {
currentScore += featureInteractionMatrix[arrayPosition];
}
}/*for the number of already selected features*/
if (currentScore > score) {
score = currentScore;
currentHighestFeature = j;
}
}/*if j is unselected*/
}/*for number of features*/
selectedFeatures[currentHighestFeature] = 1;
outputFeatures[i] = currentHighestFeature;
featureScores[i] = score;
}/*for the number of features to select*/
FREE_FUNC(classMI);
FREE_FUNC(featureInteractionMatrix);
FREE_FUNC(selectedFeatures);
classMI = NULL;
featureInteractionMatrix = NULL;
selectedFeatures = NULL;
return outputFeatures;
}/*ICAP(uint,uint,uint,uint[][],uint[],uint[],double[])*/
double* discICAP(uint k, uint noOfSamples, uint noOfFeatures, double **featureMatrix, double *classColumn, double *outputFeatures, double *featureScores) {
uint *intFeatures = (uint *) checkedCalloc(noOfSamples*noOfFeatures,sizeof(uint));
uint *intClass = (uint *) checkedCalloc(noOfSamples,sizeof(uint));
uint *intOutputs = (uint *) checkedCalloc(k,sizeof(uint));
uint **intFeature2D = (uint**) checkedCalloc(noOfFeatures,sizeof(uint*));
int i;
for (i = 0; i < noOfFeatures; i++) {
intFeature2D[i] = intFeatures + i*noOfSamples;
normaliseArray(featureMatrix[i],intFeature2D[i],noOfSamples);
}
normaliseArray(classColumn,intClass,noOfSamples);
ICAP(k, noOfSamples, noOfFeatures, intFeature2D, intClass, intOutputs, featureScores);
for (i = 0; i < k; i++) {
outputFeatures[i] = intOutputs[i];
}
FREE_FUNC(intFeatures);
FREE_FUNC(intClass);
FREE_FUNC(intOutputs);
FREE_FUNC(intFeature2D);
intFeatures = NULL;
intClass = NULL;
intOutputs = NULL;
intFeature2D = NULL;
return outputFeatures;
}/*discICAP(int,int,int,double[][],double[],double[],double[])*/
|
152779.c | # include<stdio.h>
int main()
{int bhaskar;
// hey welcome you all
printf("Hello BATHUKA BHASKAR");
return 0;
} |
988658.c | /*
* FLV muxer
* Copyright (c) 2003 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/intfloat.h"
#include "libavutil/avassert.h"
#include "avc.h"
#include "avformat.h"
#include "flv.h"
#include "internal.h"
#include "metadata.h"
static const AVCodecTag flv_video_codec_ids[] = {
{ AV_CODEC_ID_FLV1, FLV_CODECID_H263 },
{ AV_CODEC_ID_H263, FLV_CODECID_REALH263 },
{ AV_CODEC_ID_MPEG4, FLV_CODECID_MPEG4 },
{ AV_CODEC_ID_FLASHSV, FLV_CODECID_SCREEN },
{ AV_CODEC_ID_FLASHSV2, FLV_CODECID_SCREEN2 },
{ AV_CODEC_ID_VP6F, FLV_CODECID_VP6 },
{ AV_CODEC_ID_VP6, FLV_CODECID_VP6 },
{ AV_CODEC_ID_VP6A, FLV_CODECID_VP6A },
{ AV_CODEC_ID_H264, FLV_CODECID_H264 },
{ AV_CODEC_ID_NONE, 0 }
};
static const AVCodecTag flv_audio_codec_ids[] = {
{ AV_CODEC_ID_MP3, FLV_CODECID_MP3 >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_U8, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_S16BE, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_S16LE, FLV_CODECID_PCM_LE >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_ADPCM_SWF, FLV_CODECID_ADPCM >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_AAC, FLV_CODECID_AAC >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_NELLYMOSER, FLV_CODECID_NELLYMOSER >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_MULAW, FLV_CODECID_PCM_MULAW >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_PCM_ALAW, FLV_CODECID_PCM_ALAW >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_SPEEX, FLV_CODECID_SPEEX >> FLV_AUDIO_CODECID_OFFSET },
{ AV_CODEC_ID_NONE, 0 }
};
typedef struct FLVContext {
int reserved;
int64_t duration_offset;
int64_t filesize_offset;
int64_t duration;
int64_t delay; ///< first dts delay (needed for AVC & Speex)
} FLVContext;
typedef struct FLVStreamContext {
int64_t last_ts; ///< last timestamp for each stream
} FLVStreamContext;
static int get_audio_flags(AVFormatContext *s, AVCodecContext *enc)
{
int flags = (enc->bits_per_coded_sample == 16) ? FLV_SAMPLESSIZE_16BIT
: FLV_SAMPLESSIZE_8BIT;
if (enc->codec_id == AV_CODEC_ID_AAC) // specs force these parameters
return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ |
FLV_SAMPLESSIZE_16BIT | FLV_STEREO;
else if (enc->codec_id == AV_CODEC_ID_SPEEX) {
if (enc->sample_rate != 16000) {
av_log(s, AV_LOG_ERROR,
"FLV only supports wideband (16kHz) Speex audio\n");
return AVERROR(EINVAL);
}
if (enc->channels != 1) {
av_log(s, AV_LOG_ERROR, "FLV only supports mono Speex audio\n");
return AVERROR(EINVAL);
}
return FLV_CODECID_SPEEX | FLV_SAMPLERATE_11025HZ | FLV_SAMPLESSIZE_16BIT;
} else {
switch (enc->sample_rate) {
case 44100:
flags |= FLV_SAMPLERATE_44100HZ;
break;
case 22050:
flags |= FLV_SAMPLERATE_22050HZ;
break;
case 11025:
flags |= FLV_SAMPLERATE_11025HZ;
break;
case 16000: // nellymoser only
case 8000: // nellymoser only
case 5512: // not MP3
if (enc->codec_id != AV_CODEC_ID_MP3) {
flags |= FLV_SAMPLERATE_SPECIAL;
break;
}
default:
av_log(s, AV_LOG_ERROR,
"FLV does not support sample rate %d, "
"choose from (44100, 22050, 11025)\n", enc->sample_rate);
return AVERROR(EINVAL);
}
}
if (enc->channels > 1)
flags |= FLV_STEREO;
switch (enc->codec_id) {
case AV_CODEC_ID_MP3:
flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_U8:
flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_8BIT;
break;
case AV_CODEC_ID_PCM_S16BE:
flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_S16LE:
flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_ADPCM_SWF:
flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_NELLYMOSER:
if (enc->sample_rate == 8000)
flags |= FLV_CODECID_NELLYMOSER_8KHZ_MONO | FLV_SAMPLESSIZE_16BIT;
else if (enc->sample_rate == 16000)
flags |= FLV_CODECID_NELLYMOSER_16KHZ_MONO | FLV_SAMPLESSIZE_16BIT;
else
flags |= FLV_CODECID_NELLYMOSER | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_MULAW:
flags = FLV_CODECID_PCM_MULAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT;
break;
case AV_CODEC_ID_PCM_ALAW:
flags = FLV_CODECID_PCM_ALAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT;
break;
case 0:
flags |= enc->codec_tag << 4;
break;
default:
av_log(s, AV_LOG_ERROR, "Audio codec '%s' not compatible with FLV\n",
avcodec_get_name(enc->codec_id));
return AVERROR(EINVAL);
}
return flags;
}
static void put_amf_string(AVIOContext *pb, const char *str)
{
size_t len = strlen(str);
avio_wb16(pb, len);
avio_write(pb, str, len);
}
static void put_avc_eos_tag(AVIOContext *pb, unsigned ts)
{
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
avio_wb24(pb, 5); /* Tag Data Size */
avio_wb24(pb, ts); /* lower 24 bits of timestamp in ms */
avio_w8(pb, (ts >> 24) & 0x7F); /* MSB of ts in ms */
avio_wb24(pb, 0); /* StreamId = 0 */
avio_w8(pb, 23); /* ub[4] FrameType = 1, ub[4] CodecId = 7 */
avio_w8(pb, 2); /* AVC end of sequence */
avio_wb24(pb, 0); /* Always 0 for AVC EOS. */
avio_wb32(pb, 16); /* Size of FLV tag */
}
static void put_amf_double(AVIOContext *pb, double d)
{
avio_w8(pb, AMF_DATA_TYPE_NUMBER);
avio_wb64(pb, av_double2int(d));
}
static void put_amf_bool(AVIOContext *pb, int b)
{
avio_w8(pb, AMF_DATA_TYPE_BOOL);
avio_w8(pb, !!b);
}
static int flv_write_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
FLVContext *flv = s->priv_data;
AVCodecContext *audio_enc = NULL, *video_enc = NULL, *data_enc = NULL;
int i, metadata_count = 0;
double framerate = 0.0;
int64_t metadata_size_pos, data_size, metadata_count_pos;
AVDictionaryEntry *tag = NULL;
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
FLVStreamContext *sc;
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (s->streams[i]->avg_frame_rate.den &&
s->streams[i]->avg_frame_rate.num) {
framerate = av_q2d(s->streams[i]->avg_frame_rate);
} else {
framerate = 1 / av_q2d(s->streams[i]->codec->time_base);
}
if (video_enc) {
av_log(s, AV_LOG_ERROR,
"at most one video stream is supported in flv\n");
return AVERROR(EINVAL);
}
video_enc = enc;
if (enc->codec_tag == 0) {
av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
avcodec_get_name(enc->codec_id), i);
return AVERROR(EINVAL);
}
break;
case AVMEDIA_TYPE_AUDIO:
if (audio_enc) {
av_log(s, AV_LOG_ERROR,
"at most one audio stream is supported in flv\n");
return AVERROR(EINVAL);
}
audio_enc = enc;
if (get_audio_flags(s, enc) < 0)
return AVERROR_INVALIDDATA;
break;
case AVMEDIA_TYPE_DATA:
if (enc->codec_id != AV_CODEC_ID_TEXT) {
av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
avcodec_get_name(enc->codec_id), i);
return AVERROR_INVALIDDATA;
}
data_enc = enc;
break;
default:
av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
av_get_media_type_string(enc->codec_type), i);
return AVERROR(EINVAL);
}
avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */
sc = av_mallocz(sizeof(FLVStreamContext));
if (!sc)
return AVERROR(ENOMEM);
s->streams[i]->priv_data = sc;
sc->last_ts = -1;
}
flv->delay = AV_NOPTS_VALUE;
avio_write(pb, "FLV", 3);
avio_w8(pb, 1);
avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc +
FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
avio_wb32(pb, 9);
avio_wb32(pb, 0);
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_tag == 5) {
avio_w8(pb, 8); // message type
avio_wb24(pb, 0); // include flags
avio_wb24(pb, 0); // time stamp
avio_wb32(pb, 0); // reserved
avio_wb32(pb, 11); // size
flv->reserved = 5;
}
/* write meta_tag */
avio_w8(pb, 18); // tag type META
metadata_size_pos = avio_tell(pb);
avio_wb24(pb, 0); // size of data part (sum of all parts below)
avio_wb24(pb, 0); // timestamp
avio_wb32(pb, 0); // reserved
/* now data of data_size size */
/* first event name as a string */
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onMetaData"); // 12 bytes
/* mixed array (hash) with size and string/type/data tuples */
avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
metadata_count_pos = avio_tell(pb);
metadata_count = 5 * !!video_enc +
5 * !!audio_enc +
1 * !!data_enc +
2; // +2 for duration and file size
avio_wb32(pb, metadata_count);
put_amf_string(pb, "duration");
flv->duration_offset= avio_tell(pb);
// fill in the guessed duration, it'll be corrected later if incorrect
put_amf_double(pb, s->duration / AV_TIME_BASE);
if (video_enc) {
put_amf_string(pb, "width");
put_amf_double(pb, video_enc->width);
put_amf_string(pb, "height");
put_amf_double(pb, video_enc->height);
put_amf_string(pb, "videodatarate");
put_amf_double(pb, video_enc->bit_rate / 1024.0);
put_amf_string(pb, "framerate");
put_amf_double(pb, framerate);
put_amf_string(pb, "videocodecid");
put_amf_double(pb, video_enc->codec_tag);
}
if (audio_enc) {
put_amf_string(pb, "audiodatarate");
put_amf_double(pb, audio_enc->bit_rate / 1024.0);
put_amf_string(pb, "audiosamplerate");
put_amf_double(pb, audio_enc->sample_rate);
put_amf_string(pb, "audiosamplesize");
put_amf_double(pb, audio_enc->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16);
put_amf_string(pb, "stereo");
put_amf_bool(pb, audio_enc->channels == 2);
put_amf_string(pb, "audiocodecid");
put_amf_double(pb, audio_enc->codec_tag);
}
if (data_enc) {
put_amf_string(pb, "datastream");
put_amf_double(pb, 0.0);
}
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
if( !strcmp(tag->key, "width")
||!strcmp(tag->key, "height")
||!strcmp(tag->key, "videodatarate")
||!strcmp(tag->key, "framerate")
||!strcmp(tag->key, "videocodecid")
||!strcmp(tag->key, "audiodatarate")
||!strcmp(tag->key, "audiosamplerate")
||!strcmp(tag->key, "audiosamplesize")
||!strcmp(tag->key, "stereo")
||!strcmp(tag->key, "audiocodecid")
||!strcmp(tag->key, "duration")
||!strcmp(tag->key, "onMetaData")
){
av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key);
continue;
}
put_amf_string(pb, tag->key);
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, tag->value);
metadata_count++;
}
put_amf_string(pb, "filesize");
flv->filesize_offset = avio_tell(pb);
put_amf_double(pb, 0); // delayed write
put_amf_string(pb, "");
avio_w8(pb, AMF_END_OF_OBJECT);
/* write total size of tag */
data_size = avio_tell(pb) - metadata_size_pos - 10;
avio_seek(pb, metadata_count_pos, SEEK_SET);
avio_wb32(pb, metadata_count);
avio_seek(pb, metadata_size_pos, SEEK_SET);
avio_wb24(pb, data_size);
avio_skip(pb, data_size + 10 - 3);
avio_wb32(pb, data_size + 11);
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_id == AV_CODEC_ID_AAC || enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
int64_t pos;
avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
avio_wb24(pb, 0); // size patched later
avio_wb24(pb, 0); // ts
avio_w8(pb, 0); // ts ext
avio_wb24(pb, 0); // streamid
pos = avio_tell(pb);
if (enc->codec_id == AV_CODEC_ID_AAC) {
avio_w8(pb, get_audio_flags(s, enc));
avio_w8(pb, 0); // AAC sequence header
avio_write(pb, enc->extradata, enc->extradata_size);
} else {
avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
avio_w8(pb, 0); // AVC sequence header
avio_wb24(pb, 0); // composition time
ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
}
data_size = avio_tell(pb) - pos;
avio_seek(pb, -data_size - 10, SEEK_CUR);
avio_wb24(pb, data_size);
avio_skip(pb, data_size + 10 - 3);
avio_wb32(pb, data_size + 11); // previous tag size
}
}
return 0;
}
static int flv_write_trailer(AVFormatContext *s)
{
int64_t file_size;
AVIOContext *pb = s->pb;
FLVContext *flv = s->priv_data;
int i;
/* Add EOS tag */
for (i = 0; i < s->nb_streams; i++) {
AVCodecContext *enc = s->streams[i]->codec;
FLVStreamContext *sc = s->streams[i]->priv_data;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO &&
(enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4))
put_avc_eos_tag(pb, sc->last_ts);
}
file_size = avio_tell(pb);
/* update information */
if (avio_seek(pb, flv->duration_offset, SEEK_SET) < 0)
av_log(s, AV_LOG_WARNING, "Failed to update header with correct duration.\n");
else
put_amf_double(pb, flv->duration / (double)1000);
if (avio_seek(pb, flv->filesize_offset, SEEK_SET) < 0)
av_log(s, AV_LOG_WARNING, "Failed to update header with correct filesize.\n");
else
put_amf_double(pb, file_size);
avio_seek(pb, file_size, SEEK_SET);
return 0;
}
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
FLVContext *flv = s->priv_data;
FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data;
unsigned ts;
int size = pkt->size;
uint8_t *data = NULL;
int flags = -1, flags_size, ret;
if (enc->codec_id == AV_CODEC_ID_VP6 || enc->codec_id == AV_CODEC_ID_VP6F ||
enc->codec_id == AV_CODEC_ID_VP6A || enc->codec_id == AV_CODEC_ID_AAC)
flags_size = 2;
else if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4)
flags_size = 5;
else
flags_size = 1;
switch (enc->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
flags = enc->codec_tag;
if (flags == 0) {
av_log(s, AV_LOG_ERROR,
"Video codec '%s' is not compatible with FLV\n",
avcodec_get_name(enc->codec_id));
return AVERROR(EINVAL);
}
flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
break;
case AVMEDIA_TYPE_AUDIO:
flags = get_audio_flags(s, enc);
av_assert0(size);
avio_w8(pb, FLV_TAG_TYPE_AUDIO);
break;
case AVMEDIA_TYPE_DATA:
avio_w8(pb, FLV_TAG_TYPE_META);
break;
default:
return AVERROR(EINVAL);
}
if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
/* check if extradata looks like mp4 formated */
if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1)
if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0)
return ret;
} else if (enc->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 &&
(AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
if (!s->streams[pkt->stream_index]->nb_frames) {
av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: "
"use audio bitstream filter 'aac_adtstoasc' to fix it "
"('-bsf:a aac_adtstoasc' option with ffmpeg)\n");
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_WARNING, "aac bitstream error\n");
}
if (flv->delay == AV_NOPTS_VALUE)
flv->delay = -pkt->dts;
if (pkt->dts < -flv->delay) {
av_log(s, AV_LOG_WARNING,
"Packets are not in the proper order with respect to DTS\n");
return AVERROR(EINVAL);
}
ts = pkt->dts + flv->delay; // add delay to force positive dts
/* check Speex packet duration */
if (enc->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160)
av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than "
"8 frames per packet. Adobe Flash "
"Player cannot handle this!\n");
if (sc->last_ts < ts)
sc->last_ts = ts;
avio_wb24(pb, size + flags_size);
avio_wb24(pb, ts);
avio_w8(pb, (ts >> 24) & 0x7F); // timestamps are 32 bits _signed_
avio_wb24(pb, flv->reserved);
if (enc->codec_type == AVMEDIA_TYPE_DATA) {
int data_size;
int metadata_size_pos = avio_tell(pb);
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onTextData");
avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
avio_wb32(pb, 2);
put_amf_string(pb, "type");
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "Text");
put_amf_string(pb, "text");
avio_w8(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, pkt->data);
put_amf_string(pb, "");
avio_w8(pb, AMF_END_OF_OBJECT);
/* write total size of tag */
data_size = avio_tell(pb) - metadata_size_pos;
avio_seek(pb, metadata_size_pos - 10, SEEK_SET);
avio_wb24(pb, data_size);
avio_seek(pb, data_size + 10 - 3, SEEK_CUR);
avio_wb32(pb, data_size + 11);
} else {
av_assert1(flags>=0);
avio_w8(pb,flags);
if (enc->codec_id == AV_CODEC_ID_VP6)
avio_w8(pb,0);
if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A)
avio_w8(pb, enc->extradata_size ? enc->extradata[0] : 0);
else if (enc->codec_id == AV_CODEC_ID_AAC)
avio_w8(pb,1); // AAC raw
else if (enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
avio_w8(pb,1); // AVC NALU
avio_wb24(pb,pkt->pts - pkt->dts);
}
avio_write(pb, data ? data : pkt->data, size);
avio_wb32(pb, size + flags_size + 11); // previous tag size
flv->duration = FFMAX(flv->duration,
pkt->pts + flv->delay + pkt->duration);
}
av_free(data);
return pb->error;
}
AVOutputFormat ff_flv_muxer = {
.name = "flv",
.long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"),
.mime_type = "video/x-flv",
.extensions = "flv",
.priv_data_size = sizeof(FLVContext),
.audio_codec = CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_ADPCM_SWF,
.video_codec = AV_CODEC_ID_FLV1,
.write_header = flv_write_header,
.write_packet = flv_write_packet,
.write_trailer = flv_write_trailer,
.codec_tag = (const AVCodecTag* const []) {
flv_video_codec_ids, flv_audio_codec_ids, 0
},
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS |
AVFMT_TS_NONSTRICT,
};
|
514255.c | /*
* Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/**
* DOC: wlan_hdd_nan_datapath.c
*
* WLAN Host Device Driver nan datapath API implementation
*/
#include <wlan_hdd_includes.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include "wlan_hdd_includes.h"
#include "wlan_hdd_p2p.h"
#include "wma_api.h"
#include "wlan_hdd_assoc.h"
#include "sme_nan_datapath.h"
#include "wlan_hdd_object_manager.h"
#include <qca_vendor.h>
#include "os_if_nan.h"
#include "wlan_nan_api.h"
#include "nan_public_structs.h"
#include <cdp_txrx_misc.h>
/**
* hdd_ndp_print_ini_config()- Print nan datapath specific INI configuration
* @hdd_ctx: handle to hdd context
*
* Return: None
*/
void hdd_ndp_print_ini_config(struct hdd_context *hdd_ctx)
{
hdd_debug("Name = [%s] Value = [%u]", CFG_ENABLE_NAN_DATAPATH_NAME,
hdd_ctx->config->enable_nan_datapath);
hdd_debug("Name = [%s] Value = [%u]", CFG_ENABLE_NAN_NDI_CHANNEL_NAME,
hdd_ctx->config->nan_datapath_ndi_channel);
}
/**
* hdd_nan_datapath_target_config() - Configure NAN datapath features
* @hdd_ctx: Pointer to HDD context
* @cfg: Pointer to target device capability information
*
* NAN datapath functionality is enabled if it is enabled in
* .ini file and also supported on target device.
*
* Return: None
*/
void hdd_nan_datapath_target_config(struct hdd_context *hdd_ctx,
struct wma_tgt_cfg *cfg)
{
hdd_ctx->nan_datapath_enabled =
hdd_ctx->config->enable_nan_datapath &&
cfg->nan_datapath_enabled;
hdd_debug("final: %d, host: %d, fw: %d",
hdd_ctx->nan_datapath_enabled,
hdd_ctx->config->enable_nan_datapath,
cfg->nan_datapath_enabled);
}
/**
* hdd_close_ndi() - close NAN Data interface
* @adapter: adapter context
*
* Close the adapter if start BSS fails
*
* Returns: 0 on success, negative error code otherwise
*/
static int hdd_close_ndi(struct hdd_adapter *adapter)
{
int errno;
struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
hdd_enter();
/* check if the adapter is in NAN Data mode */
if (QDF_NDI_MODE != adapter->device_mode) {
hdd_err("Interface is not in NDI mode");
return -EINVAL;
}
wlan_hdd_netif_queue_control(adapter,
WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_CONTROL_PATH);
#ifdef WLAN_OPEN_SOURCE
cancel_work_sync(&adapter->ipv4_notifier_work);
#endif
hdd_deregister_tx_flow_control(adapter);
#ifdef WLAN_NS_OFFLOAD
#ifdef WLAN_OPEN_SOURCE
cancel_work_sync(&adapter->ipv6_notifier_work);
#endif
#endif
errno = hdd_vdev_destroy(adapter);
if (errno)
hdd_err("failed to destroy vdev: %d", errno);
/* We are good to close the adapter */
hdd_close_adapter(hdd_ctx, adapter, true);
hdd_exit();
return 0;
}
/**
* hdd_is_ndp_allowed() - Indicates if NDP is allowed
* @hdd_ctx: hdd context
*
* NDP is not allowed with any other role active except STA.
*
* Return: true if allowed, false otherwise
*/
static bool hdd_is_ndp_allowed(struct hdd_context *hdd_ctx)
{
struct hdd_adapter *adapter;
struct hdd_station_ctx *sta_ctx;
hdd_for_each_adapter(hdd_ctx, adapter) {
switch (adapter->device_mode) {
case QDF_P2P_GO_MODE:
case QDF_SAP_MODE:
if (test_bit(SOFTAP_BSS_STARTED,
&adapter->event_flags))
return false;
break;
case QDF_P2P_CLIENT_MODE:
case QDF_IBSS_MODE:
sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (hdd_conn_is_connected(sta_ctx) ||
hdd_is_connecting(sta_ctx))
return false;
break;
default:
break;
}
}
return true;
}
/**
* hdd_ndi_start_bss() - Start BSS on NAN data interface
* @adapter: adapter context
* @operating_channel: channel on which the BSS to be started
*
* Return: 0 on success, error value on failure
*/
static int hdd_ndi_start_bss(struct hdd_adapter *adapter,
uint8_t operating_channel)
{
QDF_STATUS status;
uint32_t roam_id;
struct csr_roam_profile *roam_profile;
mac_handle_t mac_handle;
hdd_enter();
roam_profile = hdd_roam_profile(adapter);
if (HDD_WMM_USER_MODE_NO_QOS ==
(WLAN_HDD_GET_CTX(adapter))->config->WmmMode) {
/* QoS not enabled in cfg file*/
roam_profile->uapsd_mask = 0;
} else {
/* QoS enabled, update uapsd mask from cfg file*/
roam_profile->uapsd_mask =
(WLAN_HDD_GET_CTX(adapter))->config->UapsdMask;
}
roam_profile->csrPersona = adapter->device_mode;
if (!operating_channel)
operating_channel = NAN_SOCIAL_CHANNEL_2_4GHZ;
roam_profile->ChannelInfo.numOfChannels = 1;
roam_profile->ChannelInfo.ChannelList = &operating_channel;
roam_profile->SSIDs.numOfSSIDs = 1;
roam_profile->SSIDs.SSIDList->SSID.length = 0;
roam_profile->phyMode = eCSR_DOT11_MODE_11ac;
roam_profile->BSSType = eCSR_BSS_TYPE_NDI;
roam_profile->BSSIDs.numOfBSSIDs = 1;
qdf_mem_copy((void *)(roam_profile->BSSIDs.bssid),
&adapter->mac_addr.bytes[0],
QDF_MAC_ADDR_SIZE);
roam_profile->AuthType.numEntries = 1;
roam_profile->AuthType.authType[0] = eCSR_AUTH_TYPE_OPEN_SYSTEM;
roam_profile->EncryptionType.numEntries = 1;
roam_profile->EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
mac_handle = hdd_adapter_get_mac_handle(adapter);
status = sme_roam_connect(mac_handle, adapter->session_id,
roam_profile, &roam_id);
if (QDF_IS_STATUS_ERROR(status)) {
hdd_err("NDI sme_RoamConnect session %d failed with status %d -> NotConnected",
adapter->session_id, status);
/* change back to NotConnected */
hdd_conn_set_connection_state(adapter,
eConnectionState_NotConnected);
} else {
hdd_info("sme_RoamConnect issued successfully for NDI");
}
roam_profile->ChannelInfo.ChannelList = NULL;
roam_profile->ChannelInfo.numOfChannels = 0;
hdd_exit();
return 0;
}
/**
* hdd_get_random_nan_mac_addr() - generate random non pre-existent mac address
* @hdd_ctx: hdd context pointer
* @mac_addr: mac address buffer to populate
*
* Return: status of operation
*/
static int hdd_get_random_nan_mac_addr(struct hdd_context *hdd_ctx,
struct qdf_mac_addr *mac_addr)
{
struct hdd_adapter *adapter;
uint8_t pos, bit_pos, byte_pos, mask;
uint8_t i, attempts, max_attempt = 16;
bool found;
for (attempts = 0; attempts < max_attempt; attempts++) {
found = false;
/* if NDI is present next addr is required to be 1 bit apart */
adapter = hdd_get_adapter(hdd_ctx, QDF_NDI_MODE);
if (adapter) {
hdd_debug("NDI already exists, deriving next mac");
qdf_mem_copy(mac_addr, &adapter->mac_addr,
sizeof(*mac_addr));
cds_rand_get_bytes(0, &pos, sizeof(pos));
/* skipping byte 0, 5 leaves 8*4=32 positions */
pos = pos % 32;
bit_pos = pos % 8;
byte_pos = pos / 8;
mask = 1 << bit_pos;
/* flip the required bit */
mac_addr->bytes[byte_pos + 1] ^= mask;
} else {
cds_rand_get_bytes(0, (uint8_t *)mac_addr,
sizeof(*mac_addr));
/*
* Reset multicast bit (bit-0) and set
* locally-administered bit
*/
mac_addr->bytes[0] = 0x2;
/*
* to avoid potential conflict with FW's generated NMI
* mac addr, host sets LSB if 6th byte to 0
*/
mac_addr->bytes[5] &= 0xFE;
}
for (i = 0; i < hdd_ctx->num_provisioned_addr; i++) {
if ((!qdf_mem_cmp(hdd_ctx->
provisioned_mac_addr[i].bytes,
mac_addr, sizeof(*mac_addr)))) {
found = true;
break;
}
}
if (found)
continue;
for (i = 0; i < hdd_ctx->num_derived_addr; i++) {
if ((!qdf_mem_cmp(hdd_ctx->
derived_mac_addr[i].bytes,
mac_addr, sizeof(*mac_addr)))) {
found = true;
break;
}
}
if (found)
continue;
adapter = hdd_get_adapter_by_macaddr(hdd_ctx, mac_addr->bytes);
if (!adapter)
return 0;
}
hdd_err("unable to get non-pre-existing mac address in %d attempts",
max_attempt);
return -EINVAL;
}
void hdd_ndp_event_handler(struct hdd_adapter *adapter,
struct csr_roam_info *roam_info,
uint32_t roam_id, eRoamCmdStatus roam_status,
eCsrRoamResult roam_result)
{
bool success;
struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(adapter->vdev);
uint8_t sta_id;
if (roam_status == eCSR_ROAM_NDP_STATUS_UPDATE) {
switch (roam_result) {
case eCSR_ROAM_RESULT_NDI_CREATE_RSP:
success = (roam_info->ndp.ndi_create_params.status ==
NAN_DATAPATH_RSP_STATUS_SUCCESS);
hdd_debug("posting ndi create status: %d (%s) to umac",
success, success ? "Success" : "Failure");
sta_id = roam_info->ndp.ndi_create_params.sta_id;
os_if_nan_post_ndi_create_rsp(psoc, adapter->session_id,
success, sta_id);
return;
case eCSR_ROAM_RESULT_NDI_DELETE_RSP:
success = (roam_info->ndp.ndi_create_params.status ==
NAN_DATAPATH_RSP_STATUS_SUCCESS);
hdd_debug("posting ndi delete status: %d (%s) to umac",
success, success ? "Success" : "Failure");
os_if_nan_post_ndi_delete_rsp(psoc, adapter->session_id,
success);
return;
default:
hdd_err("in correct roam_result: %d", roam_result);
return;
}
} else {
hdd_err("in correct roam_status: %d", roam_status);
return;
}
}
/**
* __wlan_hdd_cfg80211_process_ndp_cmds() - handle NDP request
* @wiphy: pointer to wireless wiphy structure.
* @wdev: pointer to wireless_dev structure.
* @data: Pointer to the data to be passed via vendor interface
* @data_len:Length of the data to be passed
*
* This function is invoked to handle vendor command
*
* Return: 0 on success, negative errno on failure
*/
static int __wlan_hdd_cfg80211_process_ndp_cmd(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int data_len)
{
int ret_val;
struct hdd_context *hdd_ctx = wiphy_priv(wiphy);
ret_val = wlan_hdd_validate_context(hdd_ctx);
if (ret_val)
return ret_val;
if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
hdd_err_rl("Command not allowed in FTM mode");
return -EPERM;
}
if (!WLAN_HDD_IS_NDP_ENABLED(hdd_ctx)) {
hdd_debug("NAN datapath is not enabled");
return -EPERM;
}
/* NAN data path coexists only with STA interface */
if (false == hdd_is_ndp_allowed(hdd_ctx)) {
hdd_err_rl("Unsupported concurrency for NAN datapath");
return -EPERM;
}
/* NAN data path coexists only with STA interface */
if (false == hdd_is_ndp_allowed(hdd_ctx)) {
hdd_err_rl("Unsupported concurrency for NAN datapath");
return -EPERM;
}
return os_if_nan_process_ndp_cmd(hdd_ctx->psoc,
data, data_len);
}
/**
* wlan_hdd_cfg80211_process_ndp_cmd() - handle NDP request
* @wiphy: pointer to wireless wiphy structure.
* @wdev: pointer to wireless_dev structure.
* @data: Pointer to the data to be passed via vendor interface
* @data_len:Length of the data to be passed
*
* This function is called to send a NAN request to
* firmware. This is an SSR-protected wrapper function.
*
* Return: 0 on success, negative errno on failure
*/
int wlan_hdd_cfg80211_process_ndp_cmd(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int data_len)
{
int ret;
cds_ssr_protect(__func__);
ret = __wlan_hdd_cfg80211_process_ndp_cmd(wiphy, wdev, data, data_len);
cds_ssr_unprotect(__func__);
return ret;
}
static int update_ndi_state(struct hdd_adapter *adapter, uint32_t state)
{
return os_if_nan_set_ndi_state(adapter->vdev, state);
}
/**
* hdd_init_nan_data_mode() - initialize nan data mode
* @adapter: adapter context
*
* Returns: 0 on success negative error code on error
*/
int hdd_init_nan_data_mode(struct hdd_adapter *adapter)
{
struct net_device *wlan_dev = adapter->dev;
struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
QDF_STATUS status;
int32_t ret_val;
mac_handle_t mac_handle;
ret_val = hdd_vdev_create(adapter, hdd_sme_roam_callback, adapter);
if (ret_val) {
hdd_err("failed to create vdev: %d", ret_val);
return ret_val;
}
mac_handle = hdd_ctx->mac_handle;
/* Configure self HT/VHT capabilities */
sme_set_curr_device_mode(mac_handle, adapter->device_mode);
sme_set_pdev_ht_vht_ies(mac_handle, hdd_ctx->config->enable2x2);
sme_set_vdev_ies_per_band(mac_handle, adapter->session_id);
hdd_roam_profile_init(adapter);
hdd_register_wext(wlan_dev);
status = hdd_init_tx_rx(adapter);
if (QDF_STATUS_SUCCESS != status) {
hdd_err("hdd_init_tx_rx() init failed, status %d", status);
ret_val = -EAGAIN;
goto error_init_txrx;
}
set_bit(INIT_TX_RX_SUCCESS, &adapter->event_flags);
status = hdd_wmm_adapter_init(adapter);
if (QDF_STATUS_SUCCESS != status) {
hdd_err("hdd_wmm_adapter_init() failed, status %d", status);
ret_val = -EAGAIN;
goto error_wmm_init;
}
set_bit(WMM_INIT_DONE, &adapter->event_flags);
ret_val = wma_cli_set_command((int)adapter->session_id,
(int)WMI_PDEV_PARAM_BURST_ENABLE,
(int)HDD_ENABLE_SIFS_BURST_DEFAULT,
PDEV_CMD);
if (0 != ret_val)
hdd_err("WMI_PDEV_PARAM_BURST_ENABLE set failed %d", ret_val);
update_ndi_state(adapter, NAN_DATA_NDI_CREATING_STATE);
return ret_val;
error_wmm_init:
clear_bit(INIT_TX_RX_SUCCESS, &adapter->event_flags);
hdd_deinit_tx_rx(adapter);
error_init_txrx:
hdd_unregister_wext(wlan_dev);
QDF_BUG(!hdd_vdev_destroy(adapter));
return ret_val;
}
int hdd_ndi_open(char *iface_name)
{
struct hdd_adapter *adapter;
struct qdf_mac_addr random_ndi_mac;
struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
uint8_t *ndi_mac_addr;
hdd_enter();
if (!hdd_ctx) {
hdd_err("hdd_ctx null");
return -EINVAL;
}
if (hdd_ctx->config->is_ndi_mac_randomized) {
if (hdd_get_random_nan_mac_addr(hdd_ctx, &random_ndi_mac)) {
hdd_err("get random mac address failed");
return -EFAULT;
}
ndi_mac_addr = &random_ndi_mac.bytes[0];
} else {
ndi_mac_addr = wlan_hdd_get_intf_addr(hdd_ctx, QDF_NDI_MODE);
if (!ndi_mac_addr) {
hdd_err("get intf address failed");
return -EFAULT;
}
}
adapter = hdd_open_adapter(hdd_ctx, QDF_NDI_MODE, iface_name,
ndi_mac_addr, NET_NAME_UNKNOWN, true);
if (!adapter) {
hdd_err("hdd_open_adapter failed");
return -EINVAL;
}
hdd_exit();
return 0;
}
int hdd_ndi_start(char *iface_name, uint16_t transaction_id)
{
int ret;
uint8_t op_channel;
QDF_STATUS status;
struct hdd_adapter *adapter;
struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
hdd_enter();
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return -EINVAL;
}
op_channel = hdd_ctx->config->nan_datapath_ndi_channel;
adapter = hdd_get_adapter_by_iface_name(hdd_ctx, iface_name);
if (!adapter) {
hdd_err("adapter is null");
return -EINVAL;
}
/* create nan vdev */
status = hdd_init_nan_data_mode(adapter);
if (QDF_STATUS_SUCCESS != status) {
hdd_err("failed to init nan data intf, status :%d", status);
ret = -EFAULT;
goto err_handler;
}
/*
* Create transaction id is required to be saved since the firmware
* does not honor the transaction id for create request
*/
ucfg_nan_set_ndp_create_transaction_id(adapter->vdev,
transaction_id);
ucfg_nan_set_ndi_state(adapter->vdev,
NAN_DATA_NDI_CREATING_STATE);
/*
* The NAN data interface has been created at this point.
* Unlike traditional device modes, where the higher application
* layer initiates connect / join / start, the NAN data
* interface does not have any such formal requests. The NDI
* create request is responsible for starting the BSS as well.
*/
if (op_channel != NAN_SOCIAL_CHANNEL_2_4GHZ &&
op_channel != NAN_SOCIAL_CHANNEL_5GHZ_LOWER_BAND &&
op_channel != NAN_SOCIAL_CHANNEL_5GHZ_UPPER_BAND) {
/* start NDI on the default 2.4 GHz social channel */
op_channel = NAN_SOCIAL_CHANNEL_2_4GHZ;
}
if (hdd_ndi_start_bss(adapter, op_channel)) {
hdd_err("NDI start bss failed");
ret = -EFAULT;
goto err_handler;
}
hdd_exit();
return 0;
err_handler:
/* Start BSS failed, delete the interface */
hdd_close_ndi(adapter);
return ret;
}
int hdd_ndi_delete(uint8_t vdev_id, char *iface_name, uint16_t transaction_id)
{
int ret;
struct hdd_adapter *adapter;
struct hdd_station_ctx *sta_ctx;
struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
uint8_t sta_id;
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return -EINVAL;
}
/* check if adapter by vdev_id is valid NDI */
adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
if (!adapter || !WLAN_HDD_IS_NDI(adapter)) {
hdd_err("NAN data interface %s is not available", iface_name);
return -EINVAL;
}
sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (!sta_ctx) {
hdd_err("sta_ctx is NULL");
return -EINVAL;
}
sta_id = sta_ctx->broadcast_staid;
if (sta_id >= HDD_MAX_ADAPTERS) {
hdd_err("Error: Invalid sta id %u", sta_id);
return -EINVAL;
}
/* Since, the interface is being deleted, remove the broadcast id. */
hdd_ctx->sta_to_adapter[sta_id] = NULL;
os_if_nan_set_ndp_delete_transaction_id(adapter->vdev,
transaction_id);
os_if_nan_set_ndi_state(adapter->vdev, NAN_DATA_NDI_DELETING_STATE);
/* Delete the interface */
ret = __wlan_hdd_del_virtual_intf(hdd_ctx->wiphy, &adapter->wdev);
if (ret)
hdd_err("NDI delete request failed");
else
hdd_err("NDI delete request successfully issued");
return ret;
}
#ifdef WLAN_FEATURE_NAN
static void hdd_nan_config_keep_alive_period(uint8_t vdev_id,
struct hdd_context *hdd_ctx)
{
sme_cli_set_command(vdev_id, WMI_VDEV_PARAM_NDP_KEEPALIVE_TIMEOUT,
hdd_ctx->config->ndp_keep_alive_period, VDEV_CMD);
}
#else
static inline void hdd_nan_config_keep_alive_period(
uint8_t vdev_id,
struct hdd_context *hdd_ctx)
{
}
#endif
void hdd_ndi_drv_ndi_create_rsp_handler(uint8_t vdev_id,
struct nan_datapath_inf_create_rsp *ndi_rsp)
{
struct hdd_context *hdd_ctx;
struct hdd_adapter *adapter;
struct hdd_station_ctx *sta_ctx;
struct csr_roam_info *roam_info;
struct bss_description tmp_bss_descp = {0};
struct qdf_mac_addr bc_mac_addr = QDF_MAC_ADDR_BCAST_INIT;
uint8_t sta_id;
hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return;
}
adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
if (!adapter) {
hdd_err("adapter is null");
return;
}
sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (!sta_ctx) {
hdd_err("sta_ctx is null");
return;
}
sta_id = ndi_rsp->sta_id;
if (sta_id >= HDD_MAX_ADAPTERS) {
hdd_err("Error: Invalid sta id %u", sta_id);
return;
}
roam_info = qdf_mem_malloc(sizeof(*roam_info));
if (!roam_info)
return;
if (ndi_rsp->status == QDF_STATUS_SUCCESS) {
hdd_alert("NDI interface successfully created");
os_if_nan_set_ndp_create_transaction_id(adapter->vdev, 0);
os_if_nan_set_ndi_state(adapter->vdev,
NAN_DATA_NDI_CREATED_STATE);
wlan_hdd_netif_queue_control(adapter,
WLAN_START_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_CONTROL_PATH);
sme_cli_set_command(vdev_id,
WMI_VDEV_PARAM_NDP_INACTIVITY_TIMEOUT,
hdd_ctx->config->ndp_inactivity_timeout,
VDEV_CMD);
hdd_nan_config_keep_alive_period(vdev_id, hdd_ctx);
} else {
hdd_alert("NDI interface creation failed with reason %d",
ndi_rsp->reason /* create_reason */);
}
sta_ctx->broadcast_staid = sta_id;
hdd_save_peer(sta_ctx, sta_id, &bc_mac_addr);
hdd_roam_register_sta(adapter, roam_info, sta_id,
&bc_mac_addr, &tmp_bss_descp);
hdd_ctx->sta_to_adapter[sta_id] = adapter;
qdf_mem_free(roam_info);
}
void hdd_ndi_close(uint8_t vdev_id)
{
struct hdd_context *hdd_ctx;
struct hdd_adapter *adapter;
hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return;
}
adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
if (!adapter) {
hdd_err("adapter is null");
return;
}
hdd_close_ndi(adapter);
}
void hdd_ndi_drv_ndi_delete_rsp_handler(uint8_t vdev_id)
{
struct hdd_context *hdd_ctx;
struct hdd_adapter *adapter;
struct hdd_station_ctx *sta_ctx;
uint8_t sta_id;
hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return;
}
adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
if (!adapter) {
hdd_err("adapter is null");
return;
}
sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (!sta_ctx) {
hdd_err("sta_ctx is null");
return;
}
sta_id = sta_ctx->broadcast_staid;
if (sta_id < HDD_MAX_ADAPTERS) {
hdd_ctx->sta_to_adapter[sta_id] = NULL;
hdd_roam_deregister_sta(adapter, sta_id);
hdd_delete_peer(sta_ctx, sta_id);
sta_ctx->broadcast_staid = HDD_WLAN_INVALID_STA_ID;
}
wlan_hdd_netif_queue_control(adapter,
WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_CONTROL_PATH);
complete(&adapter->disconnect_comp_var);
}
void hdd_ndp_session_end_handler(struct hdd_adapter *adapter)
{
os_if_nan_ndi_session_end(adapter->vdev);
}
int hdd_ndp_get_peer_idx(uint8_t vdev_id, struct qdf_mac_addr *addr)
{
struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
struct hdd_adapter *adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
return hdd_get_peer_idx(sta_ctx, addr);
}
/**
* hdd_ndp_new_peer_handler() - NDP new peer indication handler
* @adapter: pointer to adapter context
* @ind_params: indication parameters
*
* Return: none
*/
int hdd_ndp_new_peer_handler(uint8_t vdev_id, uint16_t sta_id,
struct qdf_mac_addr *peer_mac_addr, bool fist_peer)
{
struct hdd_context *hdd_ctx;
struct hdd_adapter *adapter;
struct hdd_station_ctx *sta_ctx;
struct bss_description tmp_bss_descp = {0};
struct csr_roam_info *roam_info;
hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return -EINVAL;
}
adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
if (!adapter) {
hdd_err("adapter is null");
return -EINVAL;
}
sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (!sta_ctx) {
hdd_err("sta_ctx is null");
return -EINVAL;
}
if (sta_id >= HDD_MAX_ADAPTERS) {
hdd_err("Error: Invalid sta_id: %u", sta_id);
return -EINVAL;
}
/* save peer in ndp ctx */
if (false == hdd_save_peer(sta_ctx, sta_id, peer_mac_addr)) {
hdd_err("Ndp peer table full. cannot save new peer");
return -EPERM;
}
roam_info = qdf_mem_malloc(sizeof(*roam_info));
if (!roam_info)
return -ENOMEM;
/* this function is called for each new peer */
hdd_roam_register_sta(adapter, roam_info, sta_id,
peer_mac_addr, &tmp_bss_descp);
hdd_ctx->sta_to_adapter[sta_id] = adapter;
/* perform following steps for first new peer ind */
if (fist_peer) {
hdd_debug("Set ctx connection state to connected");
/* Disable LRO/GRO for NDI Mode */
if (hdd_ctx->ol_enable) {
hdd_info("Disable LRO/GRO in NDI Mode");
hdd_disable_rx_ol_in_concurrency(true);
}
hdd_bus_bw_compute_prev_txrx_stats(adapter);
hdd_bus_bw_compute_timer_start(hdd_ctx);
sta_ctx->conn_info.connState = eConnectionState_NdiConnected;
hdd_wmm_connect(adapter, roam_info, eCSR_BSS_TYPE_NDI);
wlan_hdd_netif_queue_control(adapter,
WLAN_WAKE_ALL_NETIF_QUEUE, WLAN_CONTROL_PATH);
}
qdf_mem_free(roam_info);
return 0;
}
void hdd_cleanup_ndi(struct hdd_context *hdd_ctx,
struct hdd_adapter *adapter)
{
struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (sta_ctx->conn_info.connState != eConnectionState_NdiConnected) {
hdd_debug("NDI has no NDPs");
return;
}
sta_ctx->conn_info.connState = eConnectionState_NdiDisconnected;
hdd_conn_set_connection_state(adapter,
eConnectionState_NdiDisconnected);
hdd_debug("Stop netif tx queues.");
wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
WLAN_CONTROL_PATH);
hdd_bus_bw_compute_reset_prev_txrx_stats(adapter);
hdd_bus_bw_compute_timer_try_stop(hdd_ctx);
if (hdd_ctx->ol_enable &&
((policy_mgr_get_connection_count(hdd_ctx->psoc) == 0) ||
((policy_mgr_get_connection_count(hdd_ctx->psoc) == 1) &&
(policy_mgr_mode_specific_connection_count(
hdd_ctx->psoc,
PM_STA_MODE,
NULL) == 1)))) {
hdd_info("Enable LRO/GRO");
hdd_disable_rx_ol_in_concurrency(false);
}
}
/**
* hdd_ndp_peer_departed_handler() - Handle NDP peer departed indication
* @adapter: pointer to adapter context
* @ind_params: indication parameters
*
* Return: none
*/
void hdd_ndp_peer_departed_handler(uint8_t vdev_id, uint16_t sta_id,
struct qdf_mac_addr *peer_mac_addr, bool last_peer)
{
struct hdd_context *hdd_ctx;
struct hdd_adapter *adapter;
struct hdd_station_ctx *sta_ctx;
hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
if (!hdd_ctx) {
hdd_err("hdd_ctx is null");
return;
}
adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id);
if (!adapter) {
hdd_err("adapter is null");
return;
}
sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if (!sta_ctx) {
hdd_err("sta_ctx is null");
return;
}
if (sta_id >= HDD_MAX_ADAPTERS) {
hdd_err("Error: Invalid sta_id: %u", sta_id);
return;
}
hdd_roam_deregister_sta(adapter, sta_id);
hdd_delete_peer(sta_ctx, sta_id);
hdd_ctx->sta_to_adapter[sta_id] = NULL;
if (last_peer) {
hdd_debug("No more ndp peers.");
hdd_cleanup_ndi(hdd_ctx, adapter);
qdf_event_set(&adapter->peer_cleanup_done);
}
}
|
63225.c | /* A Bison parser, made by GNU Bison 3.0.4. */
/* Bison implementation for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "3.0.4"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 1
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Substitute the variable and function names. */
#define yyparse glcpp_parser_parse
#define yylex glcpp_parser_lex
#define yyerror glcpp_parser_error
#define yydebug glcpp_parser_debug
#define yynerrs glcpp_parser_nerrs
/* Copy the first part of user declarations. */
#line 1 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:339 */
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <inttypes.h>
#include "glcpp.h"
#include "main/core.h" /* for struct gl_extensions */
#include "main/mtypes.h" /* for gl_api enum */
static void
yyerror(YYLTYPE *locp, glcpp_parser_t *parser, const char *error);
static void
_define_object_macro(glcpp_parser_t *parser,
YYLTYPE *loc,
const char *macro,
token_list_t *replacements);
static void
_define_function_macro(glcpp_parser_t *parser,
YYLTYPE *loc,
const char *macro,
string_list_t *parameters,
token_list_t *replacements);
static string_list_t *
_string_list_create(glcpp_parser_t *parser);
static void
_string_list_append_item(glcpp_parser_t *parser, string_list_t *list,
const char *str);
static int
_string_list_contains(string_list_t *list, const char *member, int *index);
static const char *
_string_list_has_duplicate(string_list_t *list);
static int
_string_list_length(string_list_t *list);
static int
_string_list_equal(string_list_t *a, string_list_t *b);
static argument_list_t *
_argument_list_create(glcpp_parser_t *parser);
static void
_argument_list_append(glcpp_parser_t *parser, argument_list_t *list,
token_list_t *argument);
static int
_argument_list_length(argument_list_t *list);
static token_list_t *
_argument_list_member_at(argument_list_t *list, int index);
static token_t *
_token_create_str(glcpp_parser_t *parser, int type, char *str);
static token_t *
_token_create_ival(glcpp_parser_t *parser, int type, int ival);
static token_list_t *
_token_list_create(glcpp_parser_t *parser);
static void
_token_list_append(glcpp_parser_t *parser, token_list_t *list, token_t *token);
static void
_token_list_append_list(token_list_t *list, token_list_t *tail);
static int
_token_list_equal_ignoring_space(token_list_t *a, token_list_t *b);
static void
_parser_active_list_push(glcpp_parser_t *parser, const char *identifier,
token_node_t *marker);
static void
_parser_active_list_pop(glcpp_parser_t *parser);
static int
_parser_active_list_contains(glcpp_parser_t *parser, const char *identifier);
typedef enum {
EXPANSION_MODE_IGNORE_DEFINED,
EXPANSION_MODE_EVALUATE_DEFINED
} expansion_mode_t;
/* Expand list, and begin lexing from the result (after first
* prefixing a token of type 'head_token_type').
*/
static void
_glcpp_parser_expand_and_lex_from(glcpp_parser_t *parser, int head_token_type,
token_list_t *list, expansion_mode_t mode);
/* Perform macro expansion in-place on the given list. */
static void
_glcpp_parser_expand_token_list(glcpp_parser_t *parser, token_list_t *list,
expansion_mode_t mode);
static void
_glcpp_parser_print_expanded_token_list(glcpp_parser_t *parser,
token_list_t *list);
static void
_glcpp_parser_skip_stack_push_if(glcpp_parser_t *parser, YYLTYPE *loc,
int condition);
static void
_glcpp_parser_skip_stack_change_if(glcpp_parser_t *parser, YYLTYPE *loc,
const char *type, int condition);
static void
_glcpp_parser_skip_stack_pop(glcpp_parser_t *parser, YYLTYPE *loc);
static void
_glcpp_parser_handle_version_declaration(glcpp_parser_t *parser, intmax_t version,
const char *ident, bool explicitly_set);
static int
glcpp_parser_lex(YYSTYPE *yylval, YYLTYPE *yylloc, glcpp_parser_t *parser);
static void
glcpp_parser_lex_from(glcpp_parser_t *parser, token_list_t *list);
static void
add_builtin_define(glcpp_parser_t *parser, const char *name, int value);
#line 229 "glsl/glcpp/glcpp-parse.c" /* yacc.c:339 */
# ifndef YY_NULLPTR
# if defined __cplusplus && 201103L <= __cplusplus
# define YY_NULLPTR nullptr
# else
# define YY_NULLPTR 0
# endif
# endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 1
#endif
/* In a future release of Bison, this section will be replaced
by #include "glcpp-parse.h". */
#ifndef YY_GLCPP_PARSER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED
# define YY_GLCPP_PARSER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED
/* Debug traces. */
#ifndef YYDEBUG
# define YYDEBUG 1
#endif
#if YYDEBUG
extern int glcpp_parser_debug;
#endif
/* Token type. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
DEFINED = 258,
ELIF_EXPANDED = 259,
HASH_TOKEN = 260,
DEFINE_TOKEN = 261,
FUNC_IDENTIFIER = 262,
OBJ_IDENTIFIER = 263,
ELIF = 264,
ELSE = 265,
ENDIF = 266,
ERROR_TOKEN = 267,
IF = 268,
IFDEF = 269,
IFNDEF = 270,
LINE = 271,
PRAGMA = 272,
UNDEF = 273,
VERSION_TOKEN = 274,
GARBAGE = 275,
IDENTIFIER = 276,
IF_EXPANDED = 277,
INTEGER = 278,
INTEGER_STRING = 279,
LINE_EXPANDED = 280,
NEWLINE = 281,
OTHER = 282,
PLACEHOLDER = 283,
SPACE = 284,
PLUS_PLUS = 285,
MINUS_MINUS = 286,
PASTE = 287,
OR = 288,
AND = 289,
EQUAL = 290,
NOT_EQUAL = 291,
LESS_OR_EQUAL = 292,
GREATER_OR_EQUAL = 293,
LEFT_SHIFT = 294,
RIGHT_SHIFT = 295,
UNARY = 296
};
#endif
/* Value type. */
/* Location type. */
#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
typedef struct YYLTYPE YYLTYPE;
struct YYLTYPE
{
int first_line;
int first_column;
int last_line;
int last_column;
};
# define YYLTYPE_IS_DECLARED 1
# define YYLTYPE_IS_TRIVIAL 1
#endif
int glcpp_parser_parse (glcpp_parser_t *parser);
#endif /* !YY_GLCPP_PARSER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED */
/* Copy the second part of user declarations. */
#line 330 "glsl/glcpp/glcpp-parse.c" /* yacc.c:358 */
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#else
typedef signed char yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if defined YYENABLE_NLS && YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
# endif
# endif
# ifndef YY_
# define YY_(Msgid) Msgid
# endif
#endif
#ifndef YY_ATTRIBUTE
# if (defined __GNUC__ \
&& (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
|| defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
# else
# define YY_ATTRIBUTE(Spec) /* empty */
# endif
#endif
#ifndef YY_ATTRIBUTE_PURE
# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
#endif
#ifndef YY_ATTRIBUTE_UNUSED
# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
#endif
#if !defined _Noreturn \
&& (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
# if defined _MSC_VER && 1200 <= _MSC_VER
# define _Noreturn __declspec (noreturn)
# else
# define _Noreturn YY_ATTRIBUTE ((__noreturn__))
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(E) ((void) (E))
#else
# define YYUSE(E) /* empty */
#endif
#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
/* Suppress an incorrect diagnostic about yylval being uninitialized. */
# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
_Pragma ("GCC diagnostic push") \
_Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
_Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
_Pragma ("GCC diagnostic pop")
#else
# define YY_INITIAL_VALUE(Value) Value
#endif
#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
# define YY_IGNORE_MAYBE_UNINITIALIZED_END
#endif
#ifndef YY_INITIAL_VALUE
# define YY_INITIAL_VALUE(Value) /* Nothing. */
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
/* Use EXIT_SUCCESS as a witness for stdlib.h. */
# ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's 'empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined EXIT_SUCCESS \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined EXIT_SUCCESS
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined EXIT_SUCCESS
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \
&& defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
YYLTYPE yyls_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \
+ 2 * YYSTACK_GAP_MAXIMUM)
# define YYCOPY_NEEDED 1
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (0)
#endif
#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
/* Copy COUNT objects from SRC to DST. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(Dst, Src, Count) \
__builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
# else
# define YYCOPY(Dst, Src, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(Dst)[yyi] = (Src)[yyi]; \
} \
while (0)
# endif
# endif
#endif /* !YYCOPY_NEEDED */
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 2
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 707
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 64
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 20
/* YYNRULES -- Number of rules. */
#define YYNRULES 113
/* YYNSTATES -- Number of states. */
#define YYNSTATES 180
/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
by yylex, with out-of-bounds checking. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 296
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
as returned by yylex, without out-of-bounds checking. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 54, 2, 2, 2, 50, 37, 2,
52, 53, 48, 46, 56, 47, 61, 49, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 62,
40, 63, 41, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 57, 2, 58, 36, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 59, 35, 60, 55, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
38, 39, 42, 43, 44, 45, 51
};
#if YYDEBUG
/* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
static const yytype_uint16 yyrline[] =
{
0, 202, 202, 204, 208, 209, 210, 214, 218, 223,
228, 236, 249, 252, 255, 261, 264, 265, 278, 279,
317, 338, 348, 354, 360, 386, 406, 406, 419, 419,
422, 428, 434, 437, 443, 446, 449, 455, 464, 469,
480, 484, 491, 502, 513, 520, 527, 534, 541, 548,
555, 562, 569, 576, 583, 590, 597, 604, 616, 628,
635, 639, 643, 647, 651, 657, 661, 668, 669, 673,
674, 677, 679, 685, 690, 697, 701, 705, 709, 713,
717, 724, 725, 726, 727, 728, 729, 730, 731, 732,
733, 734, 735, 736, 737, 738, 739, 740, 741, 742,
743, 744, 745, 746, 747, 748, 749, 750, 751, 752,
753, 754, 755, 756
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || 1
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "DEFINED", "ELIF_EXPANDED", "HASH_TOKEN",
"DEFINE_TOKEN", "FUNC_IDENTIFIER", "OBJ_IDENTIFIER", "ELIF", "ELSE",
"ENDIF", "ERROR_TOKEN", "IF", "IFDEF", "IFNDEF", "LINE", "PRAGMA",
"UNDEF", "VERSION_TOKEN", "GARBAGE", "IDENTIFIER", "IF_EXPANDED",
"INTEGER", "INTEGER_STRING", "LINE_EXPANDED", "NEWLINE", "OTHER",
"PLACEHOLDER", "SPACE", "PLUS_PLUS", "MINUS_MINUS", "PASTE", "OR", "AND",
"'|'", "'^'", "'&'", "EQUAL", "NOT_EQUAL", "'<'", "'>'", "LESS_OR_EQUAL",
"GREATER_OR_EQUAL", "LEFT_SHIFT", "RIGHT_SHIFT", "'+'", "'-'", "'*'",
"'/'", "'%'", "UNARY", "'('", "')'", "'!'", "'~'", "','", "'['", "']'",
"'{'", "'}'", "'.'", "';'", "'='", "$accept", "input", "line",
"expanded_line", "define", "control_line", "control_line_success", "$@1",
"$@2", "control_line_error", "integer_constant", "version_constant",
"expression", "identifier_list", "text_line", "replacement_list", "junk",
"pp_tokens", "preprocessing_token", "operator", YY_NULLPTR
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[NUM] -- (External) token number corresponding to the
(internal) symbol number NUM (which must be that of a token). */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
285, 286, 287, 288, 289, 124, 94, 38, 290, 291,
60, 62, 292, 293, 294, 295, 43, 45, 42, 47,
37, 296, 40, 41, 33, 126, 44, 91, 93, 123,
125, 46, 59, 61
};
# endif
#define YYPACT_NINF -135
#define yypact_value_is_default(Yystate) \
(!!((Yystate) == (-135)))
#define YYTABLE_NINF -1
#define yytable_value_is_error(Yytable_value) \
0
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
static const yytype_int16 yypact[] =
{
-135, 103, -135, -135, -18, 595, -135, -18, -135, -15,
-135, -135, 30, -135, -135, -135, -135, -135, -135, -135,
-135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
-135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
-135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
-135, -135, 151, -135, -135, -135, -135, -135, -18, -18,
-18, -18, -18, -135, 525, 24, 199, -135, -135, 12,
247, 38, 39, 487, 37, 43, 44, 487, -135, 550,
25, -135, -135, -135, -135, -135, -135, -23, -135, -135,
-135, -18, -18, -18, -18, -18, -18, -18, -18, -18,
-18, -18, -18, -18, -18, -18, -18, -18, -18, 15,
487, -135, -135, -135, 295, 49, 51, -135, -135, 343,
487, 487, 391, -135, 52, -135, 36, 439, -135, -135,
79, -135, 588, 604, 619, 633, 646, 657, 657, -3,
-3, -3, -3, 34, 34, 63, 63, -135, -135, -135,
-14, 83, 487, -135, -135, -135, -135, 88, 487, 89,
-135, -135, 90, -135, -135, -135, -135, 487, 5, -135,
-135, -135, -135, 91, 487, 97, -135, 95, -135, -135
};
/* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
Performed when YYTABLE does not specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
2, 0, 1, 78, 0, 0, 75, 0, 76, 0,
67, 79, 80, 112, 113, 111, 107, 106, 105, 104,
88, 102, 103, 98, 99, 100, 101, 96, 97, 90,
91, 89, 94, 95, 83, 84, 93, 92, 109, 81,
82, 85, 86, 87, 108, 110, 3, 7, 4, 15,
16, 6, 0, 73, 77, 41, 38, 37, 0, 0,
0, 0, 0, 40, 0, 0, 0, 26, 28, 0,
0, 0, 0, 0, 0, 0, 0, 0, 32, 0,
0, 5, 68, 80, 74, 63, 62, 0, 60, 61,
9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
69, 35, 18, 25, 0, 0, 0, 34, 21, 0,
71, 71, 0, 33, 0, 39, 0, 0, 8, 10,
0, 64, 42, 43, 44, 45, 46, 48, 47, 52,
51, 50, 49, 54, 53, 56, 55, 59, 58, 57,
0, 0, 70, 24, 27, 29, 20, 0, 72, 0,
17, 19, 0, 30, 36, 11, 65, 69, 0, 12,
22, 23, 31, 0, 69, 0, 13, 0, 66, 14
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-135, -135, -135, -135, -135, 58, -135, -135, -135, -135,
-7, -135, -6, -135, -135, -134, 1, -1, -48, -135
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 1, 46, 47, 112, 48, 49, 115, 116, 50,
63, 126, 64, 168, 51, 151, 157, 152, 53, 54
};
/* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule whose
number is the opposite. If YYTABLE_NINF, syntax error. */
static const yytype_uint8 yytable[] =
{
52, 79, 80, 55, 84, 56, 57, 166, 56, 57,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 58, 59,
131, 109, 110, 173, 60, 5, 61, 62, 117, 167,
177, 102, 103, 104, 105, 106, 107, 108, 56, 57,
111, 129, 85, 86, 87, 88, 89, 162, 174, 120,
121, 175, 163, 123, 124, 114, 84, 150, 125, 119,
81, 84, 122, 130, 84, 154, 127, 155, 161, 84,
104, 105, 106, 107, 108, 132, 133, 134, 135, 136,
137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 2, 84, 165, 3, 4, 5, 169,
84, 106, 107, 108, 170, 171, 172, 176, 178, 158,
158, 179, 159, 0, 6, 7, 0, 8, 9, 10,
11, 0, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 3, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 0, 0, 0,
0, 0, 6, 0, 0, 8, 0, 82, 11, 0,
83, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 3, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 0, 0, 0, 0, 0,
6, 0, 0, 8, 0, 113, 11, 0, 83, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
3, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 0, 0, 0, 0, 0, 6, 0,
0, 8, 0, 118, 11, 0, 83, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 3, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 0, 0, 0, 0, 0, 6, 0, 0, 8,
0, 153, 11, 0, 83, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 3, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 0,
0, 0, 0, 0, 6, 0, 0, 8, 0, 156,
11, 0, 83, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 3, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 0, 0, 0,
0, 0, 6, 0, 0, 8, 0, 160, 11, 0,
83, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 3, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 0, 0, 0, 0, 0,
6, 0, 0, 8, 0, 164, 11, 0, 83, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
3, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 0, 0, 0, 0, 0, 6, 0,
0, 8, 0, 0, 11, 0, 83, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 0, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 90, 0, 0, 0, 0, 0, 0, 91, 92,
93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
103, 104, 105, 106, 107, 108, 128, 0, 0, 0,
0, 0, 0, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
108, 65, 0, 0, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 0, 0, 0, 0,
0, 78, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 93,
94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 94, 95, 96, 97, 98,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108
};
static const yytype_int16 yycheck[] =
{
1, 7, 9, 21, 52, 23, 24, 21, 23, 24,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 46, 47,
53, 7, 8, 167, 52, 5, 54, 55, 26, 53,
174, 44, 45, 46, 47, 48, 49, 50, 23, 24,
26, 26, 58, 59, 60, 61, 62, 21, 53, 21,
21, 56, 26, 26, 21, 66, 114, 52, 24, 70,
12, 119, 73, 80, 122, 26, 77, 26, 26, 127,
46, 47, 48, 49, 50, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
106, 107, 108, 0, 152, 26, 3, 4, 5, 26,
158, 48, 49, 50, 26, 26, 26, 26, 21, 120,
121, 26, 121, -1, 21, 22, -1, 24, 25, 26,
27, -1, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 3, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, -1, -1, -1,
-1, -1, 21, -1, -1, 24, -1, 26, 27, -1,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 3, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, -1, -1, -1, -1, -1,
21, -1, -1, 24, -1, 26, 27, -1, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
3, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, -1, -1, -1, -1, -1, 21, -1,
-1, 24, -1, 26, 27, -1, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 3, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, -1, -1, -1, -1, -1, 21, -1, -1, 24,
-1, 26, 27, -1, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 3, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, -1,
-1, -1, -1, -1, 21, -1, -1, 24, -1, 26,
27, -1, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 3, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, -1, -1, -1,
-1, -1, 21, -1, -1, 24, -1, 26, 27, -1,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 3, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, -1, -1, -1, -1, -1,
21, -1, -1, 24, -1, 26, 27, -1, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
3, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, -1, -1, -1, -1, -1, 21, -1,
-1, 24, -1, -1, 27, -1, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, -1, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 26, -1, -1, -1, -1, -1, -1, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 26, -1, -1, -1,
-1, -1, -1, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 6, -1, -1, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, -1, -1, -1, -1,
-1, 26, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 65, 0, 3, 4, 5, 21, 22, 24, 25,
26, 27, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 66, 67, 69, 70,
73, 78, 81, 82, 83, 21, 23, 24, 46, 47,
52, 54, 55, 74, 76, 6, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 26, 76,
74, 69, 26, 29, 82, 76, 76, 76, 76, 76,
26, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 7,
8, 26, 68, 26, 81, 71, 72, 26, 26, 81,
21, 21, 81, 26, 21, 24, 75, 81, 26, 26,
74, 53, 76, 76, 76, 76, 76, 76, 76, 76,
76, 76, 76, 76, 76, 76, 76, 76, 76, 76,
52, 79, 81, 26, 26, 26, 26, 80, 81, 80,
26, 26, 21, 26, 26, 26, 21, 53, 77, 26,
26, 26, 26, 79, 53, 56, 26, 79, 21, 26
};
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 64, 65, 65, 66, 66, 66, 66, 67, 67,
67, 67, 68, 68, 68, 69, 69, 69, 70, 70,
70, 70, 70, 70, 70, 70, 71, 70, 72, 70,
70, 70, 70, 70, 73, 73, 73, 74, 74, 75,
76, 76, 76, 76, 76, 76, 76, 76, 76, 76,
76, 76, 76, 76, 76, 76, 76, 76, 76, 76,
76, 76, 76, 76, 76, 77, 77, 78, 78, 79,
79, 80, 80, 81, 81, 82, 82, 82, 82, 82,
82, 83, 83, 83, 83, 83, 83, 83, 83, 83,
83, 83, 83, 83, 83, 83, 83, 83, 83, 83,
83, 83, 83, 83, 83, 83, 83, 83, 83, 83,
83, 83, 83, 83
};
/* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 0, 2, 1, 2, 1, 1, 3, 3,
3, 4, 3, 5, 6, 1, 1, 4, 3, 4,
4, 3, 5, 5, 4, 3, 0, 4, 0, 4,
4, 5, 2, 3, 3, 3, 4, 1, 1, 1,
1, 1, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 3, 1, 3, 1, 2, 0,
1, 0, 1, 1, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY) \
{ \
yychar = (Token); \
yylval = (Value); \
YYPOPSTACK (yylen); \
yystate = *yyssp; \
goto yybackup; \
} \
else \
{ \
yyerror (&yylloc, parser, YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (0)
/* Error token number */
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (N) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (0)
#endif
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (0)
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
/* Print *YYLOCP on YYO. Private, do not rely on its existence. */
YY_ATTRIBUTE_UNUSED
static unsigned
yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp)
{
unsigned res = 0;
int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0;
if (0 <= yylocp->first_line)
{
res += YYFPRINTF (yyo, "%d", yylocp->first_line);
if (0 <= yylocp->first_column)
res += YYFPRINTF (yyo, ".%d", yylocp->first_column);
}
if (0 <= yylocp->last_line)
{
if (yylocp->first_line < yylocp->last_line)
{
res += YYFPRINTF (yyo, "-%d", yylocp->last_line);
if (0 <= end_col)
res += YYFPRINTF (yyo, ".%d", end_col);
}
else if (0 <= end_col && yylocp->first_column < end_col)
res += YYFPRINTF (yyo, "-%d", end_col);
}
return res;
}
# define YY_LOCATION_PRINT(File, Loc) \
yy_location_print_ (File, &(Loc))
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value, Location, parser); \
YYFPRINTF (stderr, "\n"); \
} \
} while (0)
/*----------------------------------------.
| Print this symbol's value on YYOUTPUT. |
`----------------------------------------*/
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, glcpp_parser_t *parser)
{
FILE *yyo = yyoutput;
YYUSE (yyo);
YYUSE (yylocationp);
YYUSE (parser);
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# endif
YYUSE (yytype);
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, glcpp_parser_t *parser)
{
YYFPRINTF (yyoutput, "%s %s (",
yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
YY_LOCATION_PRINT (yyoutput, *yylocationp);
YYFPRINTF (yyoutput, ": ");
yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp, parser);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (0)
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
static void
yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule, glcpp_parser_t *parser)
{
unsigned long int yylno = yyrline[yyrule];
int yynrhs = yyr2[yyrule];
int yyi;
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr,
yystos[yyssp[yyi + 1 - yynrhs]],
&(yyvsp[(yyi + 1) - (yynrhs)])
, &(yylsp[(yyi + 1) - (yynrhs)]) , parser);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyssp, yyvsp, yylsp, Rule, parser); \
} while (0)
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
static YYSIZE_T
yystrlen (const char *yystr)
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
static char *
yystpcpy (char *yydest, const char *yysrc)
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
about the unexpected token YYTOKEN for the state stack whose top is
YYSSP.
Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
not large enough to hold the message. In that case, also set
*YYMSG_ALLOC to the required number of bytes. Return 2 if the
required number of bytes is too large to store. */
static int
yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
yytype_int16 *yyssp, int yytoken)
{
YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
YYSIZE_T yysize = yysize0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
/* Internationalized format string. */
const char *yyformat = YY_NULLPTR;
/* Arguments of yyformat. */
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
/* Number of reported tokens (one for the "unexpected", one per
"expected"). */
int yycount = 0;
/* There are many possibilities here to consider:
- If this state is a consistent state with a default action, then
the only way this function was invoked is if the default action
is an error action. In that case, don't check for expected
tokens because there are none.
- The only way there can be no lookahead present (in yychar) is if
this state is a consistent state with a default action. Thus,
detecting the absence of a lookahead is sufficient to determine
that there is no unexpected or expected token to report. In that
case, just report a simple "syntax error".
- Don't assume there isn't a lookahead just because this state is a
consistent state with a default action. There might have been a
previous inconsistent state, consistent state with a non-default
action, or user semantic action that manipulated yychar.
- Of course, the expected token list depends on states to have
correct lookahead information, and it depends on the parser not
to perform extra reductions after fetching a lookahead from the
scanner and before detecting a syntax error. Thus, state merging
(from LALR or IELR) and default reductions corrupt the expected
token list. However, the list is correct for canonical LR with
one exception: it will still contain any token that will not be
accepted due to an error action in a later state.
*/
if (yytoken != YYEMPTY)
{
int yyn = yypact[*yyssp];
yyarg[yycount++] = yytname[yytoken];
if (!yypact_value_is_default (yyn))
{
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. In other words, skip the first -YYN actions for
this state because they are default actions. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yyx;
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
&& !yytable_value_is_error (yytable[yyx + yyn]))
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
break;
}
yyarg[yycount++] = yytname[yyx];
{
YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
if (! (yysize <= yysize1
&& yysize1 <= YYSTACK_ALLOC_MAXIMUM))
return 2;
yysize = yysize1;
}
}
}
}
switch (yycount)
{
# define YYCASE_(N, S) \
case N: \
yyformat = S; \
break
YYCASE_(0, YY_("syntax error"));
YYCASE_(1, YY_("syntax error, unexpected %s"));
YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
# undef YYCASE_
}
{
YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
return 2;
yysize = yysize1;
}
if (*yymsg_alloc < yysize)
{
*yymsg_alloc = 2 * yysize;
if (! (yysize <= *yymsg_alloc
&& *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
*yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
return 1;
}
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
{
char *yyp = *yymsg;
int yyi = 0;
while ((*yyp = *yyformat) != '\0')
if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyformat += 2;
}
else
{
yyp++;
yyformat++;
}
}
return 0;
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp, glcpp_parser_t *parser)
{
YYUSE (yyvaluep);
YYUSE (yylocationp);
YYUSE (parser);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
YYUSE (yytype);
YY_IGNORE_MAYBE_UNINITIALIZED_END
}
/*----------.
| yyparse. |
`----------*/
int
yyparse (glcpp_parser_t *parser)
{
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
/* Default value used for initialization, for pacifying older GCCs
or non-GCC compilers. */
YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
/* Location data for the lookahead symbol. */
static YYLTYPE yyloc_default
# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
= { 1, 1, 1, 1 }
# endif
;
YYLTYPE yylloc = yyloc_default;
/* Number of syntax errors so far. */
int yynerrs;
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
'yyss': related to states.
'yyvs': related to semantic values.
'yyls': related to locations.
Refer to the stacks through separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
/* The location stack. */
YYLTYPE yylsa[YYINITDEPTH];
YYLTYPE *yyls;
YYLTYPE *yylsp;
/* The locations where the error started and ended. */
YYLTYPE yyerror_range[3];
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken = 0;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
YYLTYPE yyloc;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N), yylsp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yyssp = yyss = yyssa;
yyvsp = yyvs = yyvsa;
yylsp = yyls = yylsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* User initialization code. */
#line 162 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1429 */
{
yylloc.first_line = 1;
yylloc.first_column = 1;
yylloc.last_line = 1;
yylloc.last_column = 1;
yylloc.source = 0;
}
#line 1569 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1429 */
yylsp[0] = yylloc;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
YYLTYPE *yyls1 = yyls;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yyls1, yysize * sizeof (*yylsp),
&yystacksize);
yyls = yyls1;
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
YYSTACK_RELOCATE (yyls_alloc, yyls);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
yylsp = yyls + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = yylex (&yylval, &yylloc, parser);
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yytable_value_is_error (yyn))
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
*++yylsp = yylloc;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
'$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
/* Default location. */
YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 6:
#line 210 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
_glcpp_parser_print_expanded_token_list (parser, (yyvsp[0].token_list));
ralloc_asprintf_rewrite_tail (&parser->output, &parser->output_length, "\n");
}
#line 1761 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 8:
#line 218 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (parser->is_gles && (yyvsp[-1].expression_value).undefined_macro)
glcpp_error(& (yylsp[-2]), parser, "undefined macro %s in expression (illegal in GLES)", (yyvsp[-1].expression_value).undefined_macro);
_glcpp_parser_skip_stack_push_if (parser, & (yylsp[-2]), (yyvsp[-1].expression_value).value);
}
#line 1771 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 9:
#line 223 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (parser->is_gles && (yyvsp[-1].expression_value).undefined_macro)
glcpp_error(& (yylsp[-2]), parser, "undefined macro %s in expression (illegal in GLES)", (yyvsp[-1].expression_value).undefined_macro);
_glcpp_parser_skip_stack_change_if (parser, & (yylsp[-2]), "elif", (yyvsp[-1].expression_value).value);
}
#line 1781 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 10:
#line 228 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
parser->has_new_line_number = 1;
parser->new_line_number = (yyvsp[-1].ival);
ralloc_asprintf_rewrite_tail (&parser->output,
&parser->output_length,
"#line %" PRIiMAX "\n",
(yyvsp[-1].ival));
}
#line 1794 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 11:
#line 236 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
parser->has_new_line_number = 1;
parser->new_line_number = (yyvsp[-2].ival);
parser->has_new_source_number = 1;
parser->new_source_number = (yyvsp[-1].ival);
ralloc_asprintf_rewrite_tail (&parser->output,
&parser->output_length,
"#line %" PRIiMAX " %" PRIiMAX "\n",
(yyvsp[-2].ival), (yyvsp[-1].ival));
}
#line 1809 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 12:
#line 249 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
_define_object_macro (parser, & (yylsp[-2]), (yyvsp[-2].str), (yyvsp[-1].token_list));
}
#line 1817 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 13:
#line 252 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
_define_function_macro (parser, & (yylsp[-4]), (yyvsp[-4].str), NULL, (yyvsp[-1].token_list));
}
#line 1825 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 14:
#line 255 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
_define_function_macro (parser, & (yylsp[-5]), (yyvsp[-5].str), (yyvsp[-3].string_list), (yyvsp[-1].token_list));
}
#line 1833 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 15:
#line 261 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
ralloc_asprintf_rewrite_tail (&parser->output, &parser->output_length, "\n");
}
#line 1841 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 17:
#line 265 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (parser->skip_stack == NULL ||
parser->skip_stack->type == SKIP_NO_SKIP)
{
_glcpp_parser_expand_and_lex_from (parser,
LINE_EXPANDED, (yyvsp[-1].token_list),
EXPANSION_MODE_IGNORE_DEFINED);
}
}
#line 1856 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 19:
#line 279 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
struct hash_entry *entry;
/* Section 3.4 (Preprocessor) of the GLSL ES 3.00 spec says:
*
* It is an error to undefine or to redefine a built-in
* (pre-defined) macro name.
*
* The GLSL ES 1.00 spec does not contain this text.
*
* Section 3.3 (Preprocessor) of the GLSL 1.30 spec says:
*
* #define and #undef functionality are defined as is
* standard for C++ preprocessors for macro definitions
* both with and without macro parameters.
*
* At least as far as I can tell GCC allow '#undef __FILE__'.
* Furthermore, there are desktop OpenGL conformance tests
* that expect '#undef __VERSION__' and '#undef
* GL_core_profile' to work.
*
* Only disallow #undef of pre-defined macros on GLSL ES >=
* 3.00 shaders.
*/
if (parser->is_gles &&
parser->version >= 300 &&
(strcmp("__LINE__", (yyvsp[-1].str)) == 0
|| strcmp("__FILE__", (yyvsp[-1].str)) == 0
|| strcmp("__VERSION__", (yyvsp[-1].str)) == 0
|| strncmp("GL_", (yyvsp[-1].str), 3) == 0))
glcpp_error(& (yylsp[-3]), parser, "Built-in (pre-defined)"
" macro names cannot be undefined.");
entry = _mesa_hash_table_search (parser->defines, (yyvsp[-1].str));
if (entry) {
_mesa_hash_table_remove (parser->defines, entry);
}
}
#line 1899 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 20:
#line 317 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
/* Be careful to only evaluate the 'if' expression if
* we are not skipping. When we are skipping, we
* simply push a new 0-valued 'if' onto the skip
* stack.
*
* This avoids generating diagnostics for invalid
* expressions that are being skipped. */
if (parser->skip_stack == NULL ||
parser->skip_stack->type == SKIP_NO_SKIP)
{
_glcpp_parser_expand_and_lex_from (parser,
IF_EXPANDED, (yyvsp[-1].token_list),
EXPANSION_MODE_EVALUATE_DEFINED);
}
else
{
_glcpp_parser_skip_stack_push_if (parser, & (yylsp[-3]), 0);
parser->skip_stack->type = SKIP_TO_ENDIF;
}
}
#line 1925 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 21:
#line 338 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
/* #if without an expression is only an error if we
* are not skipping */
if (parser->skip_stack == NULL ||
parser->skip_stack->type == SKIP_NO_SKIP)
{
glcpp_error(& (yylsp[-2]), parser, "#if with no expression");
}
_glcpp_parser_skip_stack_push_if (parser, & (yylsp[-2]), 0);
}
#line 1940 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 22:
#line 348 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
struct hash_entry *entry =
_mesa_hash_table_search(parser->defines, (yyvsp[-2].str));
macro_t *macro = entry ? entry->data : NULL;
_glcpp_parser_skip_stack_push_if (parser, & (yylsp[-4]), macro != NULL);
}
#line 1951 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 23:
#line 354 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
struct hash_entry *entry =
_mesa_hash_table_search(parser->defines, (yyvsp[-2].str));
macro_t *macro = entry ? entry->data : NULL;
_glcpp_parser_skip_stack_push_if (parser, & (yylsp[-2]), macro == NULL);
}
#line 1962 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 24:
#line 360 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
/* Be careful to only evaluate the 'elif' expression
* if we are not skipping. When we are skipping, we
* simply change to a 0-valued 'elif' on the skip
* stack.
*
* This avoids generating diagnostics for invalid
* expressions that are being skipped. */
if (parser->skip_stack &&
parser->skip_stack->type == SKIP_TO_ELSE)
{
_glcpp_parser_expand_and_lex_from (parser,
ELIF_EXPANDED, (yyvsp[-1].token_list),
EXPANSION_MODE_EVALUATE_DEFINED);
}
else if (parser->skip_stack &&
parser->skip_stack->has_else)
{
glcpp_error(& (yylsp[-3]), parser, "#elif after #else");
}
else
{
_glcpp_parser_skip_stack_change_if (parser, & (yylsp[-3]),
"elif", 0);
}
}
#line 1993 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 25:
#line 386 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
/* #elif without an expression is an error unless we
* are skipping. */
if (parser->skip_stack &&
parser->skip_stack->type == SKIP_TO_ELSE)
{
glcpp_error(& (yylsp[-2]), parser, "#elif with no expression");
}
else if (parser->skip_stack &&
parser->skip_stack->has_else)
{
glcpp_error(& (yylsp[-2]), parser, "#elif after #else");
}
else
{
_glcpp_parser_skip_stack_change_if (parser, & (yylsp[-2]),
"elif", 0);
glcpp_warning(& (yylsp[-2]), parser, "ignoring illegal #elif without expression");
}
}
#line 2018 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 26:
#line 406 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ parser->lexing_directive = 1; }
#line 2024 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 27:
#line 406 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (parser->skip_stack &&
parser->skip_stack->has_else)
{
glcpp_error(& (yylsp[-3]), parser, "multiple #else");
}
else
{
_glcpp_parser_skip_stack_change_if (parser, & (yylsp[-3]), "else", 1);
if (parser->skip_stack)
parser->skip_stack->has_else = true;
}
}
#line 2042 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 28:
#line 419 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
_glcpp_parser_skip_stack_pop (parser, & (yylsp[-1]));
}
#line 2050 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 30:
#line 422 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (parser->version_set) {
glcpp_error(& (yylsp[-3]), parser, "#version must appear on the first line");
}
_glcpp_parser_handle_version_declaration(parser, (yyvsp[-1].ival), NULL, true);
}
#line 2061 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 31:
#line 428 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (parser->version_set) {
glcpp_error(& (yylsp[-4]), parser, "#version must appear on the first line");
}
_glcpp_parser_handle_version_declaration(parser, (yyvsp[-2].ival), (yyvsp[-1].str), true);
}
#line 2072 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 32:
#line 434 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
glcpp_parser_resolve_implicit_version(parser);
}
#line 2080 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 33:
#line 437 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
ralloc_asprintf_rewrite_tail (&parser->output, &parser->output_length, "#%s", (yyvsp[-1].str));
}
#line 2088 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 34:
#line 443 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
glcpp_error(& (yylsp[-2]), parser, "#%s", (yyvsp[-1].str));
}
#line 2096 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 35:
#line 446 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
glcpp_error (& (yylsp[-2]), parser, "#define without macro name");
}
#line 2104 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 36:
#line 449 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
glcpp_error (& (yylsp[-3]), parser, "Illegal non-directive after #");
}
#line 2112 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 37:
#line 455 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if (strlen ((yyvsp[0].str)) >= 3 && strncmp ((yyvsp[0].str), "0x", 2) == 0) {
(yyval.ival) = strtoll ((yyvsp[0].str) + 2, NULL, 16);
} else if ((yyvsp[0].str)[0] == '0') {
(yyval.ival) = strtoll ((yyvsp[0].str), NULL, 8);
} else {
(yyval.ival) = strtoll ((yyvsp[0].str), NULL, 10);
}
}
#line 2126 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 38:
#line 464 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.ival) = (yyvsp[0].ival);
}
#line 2134 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 39:
#line 469 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
/* Both octal and hexadecimal constants begin with 0. */
if ((yyvsp[0].str)[0] == '0' && (yyvsp[0].str)[1] != '\0') {
glcpp_error(&(yylsp[0]), parser, "invalid #version \"%s\" (not a decimal constant)", (yyvsp[0].str));
(yyval.ival) = 0;
} else {
(yyval.ival) = strtoll((yyvsp[0].str), NULL, 10);
}
}
#line 2148 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 40:
#line 480 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[0].ival);
(yyval.expression_value).undefined_macro = NULL;
}
#line 2157 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 41:
#line 484 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = 0;
if (parser->is_gles)
(yyval.expression_value).undefined_macro = linear_strdup(parser->linalloc, (yyvsp[0].str));
else
(yyval.expression_value).undefined_macro = NULL;
}
#line 2169 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 42:
#line 491 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value || (yyvsp[0].expression_value).value;
/* Short-circuit: Only flag undefined from right side
* if left side evaluates to false.
*/
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else if (! (yyvsp[-2].expression_value).value)
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2185 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 43:
#line 502 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value && (yyvsp[0].expression_value).value;
/* Short-circuit: Only flag undefined from right-side
* if left side evaluates to true.
*/
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else if ((yyvsp[-2].expression_value).value)
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2201 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 44:
#line 513 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value | (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2213 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 45:
#line 520 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value ^ (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2225 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 46:
#line 527 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value & (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2237 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 47:
#line 534 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value != (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2249 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 48:
#line 541 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value == (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2261 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 49:
#line 548 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value >= (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2273 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 50:
#line 555 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value <= (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2285 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 51:
#line 562 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value > (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2297 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 52:
#line 569 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value < (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2309 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 53:
#line 576 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value >> (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2321 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 54:
#line 583 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value << (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2333 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 55:
#line 590 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value - (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2345 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 56:
#line 597 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value + (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2357 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 57:
#line 604 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if ((yyvsp[0].expression_value).value == 0) {
yyerror (& (yylsp[-2]), parser,
"zero modulus in preprocessor directive");
} else {
(yyval.expression_value).value = (yyvsp[-2].expression_value).value % (yyvsp[0].expression_value).value;
}
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2374 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 58:
#line 616 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
if ((yyvsp[0].expression_value).value == 0) {
yyerror (& (yylsp[-2]), parser,
"division by 0 in preprocessor directive");
} else {
(yyval.expression_value).value = (yyvsp[-2].expression_value).value / (yyvsp[0].expression_value).value;
}
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2391 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 59:
#line 628 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = (yyvsp[-2].expression_value).value * (yyvsp[0].expression_value).value;
if ((yyvsp[-2].expression_value).undefined_macro)
(yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
else
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2403 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 60:
#line 635 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = ! (yyvsp[0].expression_value).value;
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2412 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 61:
#line 639 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = ~ (yyvsp[0].expression_value).value;
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2421 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 62:
#line 643 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = - (yyvsp[0].expression_value).value;
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2430 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 63:
#line 647 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value).value = + (yyvsp[0].expression_value).value;
(yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
}
#line 2439 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 64:
#line 651 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.expression_value) = (yyvsp[-1].expression_value);
}
#line 2447 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 65:
#line 657 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.string_list) = _string_list_create (parser);
_string_list_append_item (parser, (yyval.string_list), (yyvsp[0].str));
}
#line 2456 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 66:
#line 661 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.string_list) = (yyvsp[-2].string_list);
_string_list_append_item (parser, (yyval.string_list), (yyvsp[0].str));
}
#line 2465 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 67:
#line 668 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.token_list) = NULL; }
#line 2471 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 69:
#line 673 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.token_list) = NULL; }
#line 2477 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 72:
#line 679 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
glcpp_error(&(yylsp[0]), parser, "extra tokens at end of directive");
}
#line 2485 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 73:
#line 685 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
parser->space_tokens = 1;
(yyval.token_list) = _token_list_create (parser);
_token_list_append (parser, (yyval.token_list), (yyvsp[0].token));
}
#line 2495 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 74:
#line 690 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token_list) = (yyvsp[-1].token_list);
_token_list_append (parser, (yyval.token_list), (yyvsp[0].token));
}
#line 2504 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 75:
#line 697 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token) = _token_create_str (parser, IDENTIFIER, (yyvsp[0].str));
(yyval.token)->location = yylloc;
}
#line 2513 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 76:
#line 701 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token) = _token_create_str (parser, INTEGER_STRING, (yyvsp[0].str));
(yyval.token)->location = yylloc;
}
#line 2522 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 77:
#line 705 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token) = _token_create_ival (parser, (yyvsp[0].ival), (yyvsp[0].ival));
(yyval.token)->location = yylloc;
}
#line 2531 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 78:
#line 709 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token) = _token_create_ival (parser, DEFINED, DEFINED);
(yyval.token)->location = yylloc;
}
#line 2540 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 79:
#line 713 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token) = _token_create_str (parser, OTHER, (yyvsp[0].str));
(yyval.token)->location = yylloc;
}
#line 2549 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 80:
#line 717 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{
(yyval.token) = _token_create_ival (parser, SPACE, SPACE);
(yyval.token)->location = yylloc;
}
#line 2558 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 81:
#line 724 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '['; }
#line 2564 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 82:
#line 725 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = ']'; }
#line 2570 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 83:
#line 726 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '('; }
#line 2576 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 84:
#line 727 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = ')'; }
#line 2582 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 85:
#line 728 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '{'; }
#line 2588 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 86:
#line 729 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '}'; }
#line 2594 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 87:
#line 730 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '.'; }
#line 2600 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 88:
#line 731 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '&'; }
#line 2606 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 89:
#line 732 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '*'; }
#line 2612 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 90:
#line 733 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '+'; }
#line 2618 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 91:
#line 734 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '-'; }
#line 2624 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 92:
#line 735 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '~'; }
#line 2630 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 93:
#line 736 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '!'; }
#line 2636 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 94:
#line 737 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '/'; }
#line 2642 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 95:
#line 738 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '%'; }
#line 2648 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 96:
#line 739 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = LEFT_SHIFT; }
#line 2654 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 97:
#line 740 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = RIGHT_SHIFT; }
#line 2660 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 98:
#line 741 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '<'; }
#line 2666 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 99:
#line 742 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '>'; }
#line 2672 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 100:
#line 743 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = LESS_OR_EQUAL; }
#line 2678 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 101:
#line 744 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = GREATER_OR_EQUAL; }
#line 2684 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 102:
#line 745 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = EQUAL; }
#line 2690 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 103:
#line 746 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = NOT_EQUAL; }
#line 2696 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 104:
#line 747 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '^'; }
#line 2702 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 105:
#line 748 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '|'; }
#line 2708 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 106:
#line 749 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = AND; }
#line 2714 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 107:
#line 750 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = OR; }
#line 2720 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 108:
#line 751 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = ';'; }
#line 2726 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 109:
#line 752 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = ','; }
#line 2732 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 110:
#line 753 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = '='; }
#line 2738 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 111:
#line 754 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = PASTE; }
#line 2744 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 112:
#line 755 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = PLUS_PLUS; }
#line 2750 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
case 113:
#line 756 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1646 */
{ (yyval.ival) = MINUS_MINUS; }
#line 2756 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
break;
#line 2760 "glsl/glcpp/glcpp-parse.c" /* yacc.c:1646 */
default: break;
}
/* User semantic actions sometimes alter yychar, and that requires
that yytoken be updated with the new translation. We take the
approach of translating immediately before every use of yytoken.
One alternative is translating here after every semantic action,
but that translation would be missed if the semantic action invokes
YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
incorrect destructor might then be invoked immediately. In the
case of YYERROR or YYBACKUP, subsequent parser actions might lead
to an incorrect destructor call or verbose syntax error message
before the lookahead is translated. */
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
*++yylsp = yyloc;
/* Now 'shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*--------------------------------------.
| yyerrlab -- here on detecting error. |
`--------------------------------------*/
yyerrlab:
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (&yylloc, parser, YY_("syntax error"));
#else
# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
yyssp, yytoken)
{
char const *yymsgp = YY_("syntax error");
int yysyntax_error_status;
yysyntax_error_status = YYSYNTAX_ERROR;
if (yysyntax_error_status == 0)
yymsgp = yymsg;
else if (yysyntax_error_status == 1)
{
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
if (!yymsg)
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
yysyntax_error_status = 2;
}
else
{
yysyntax_error_status = YYSYNTAX_ERROR;
yymsgp = yymsg;
}
}
yyerror (&yylloc, parser, yymsgp);
if (yysyntax_error_status == 2)
goto yyexhaustedlab;
}
# undef YYSYNTAX_ERROR
#endif
}
yyerror_range[1] = yylloc;
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval, &yylloc, parser);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
yyerror_range[1] = yylsp[1-yylen];
/* Do not reclaim the symbols of the rule whose action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (!yypact_value_is_default (yyn))
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yyerror_range[1] = *yylsp;
yydestruct ("Error: popping",
yystos[yystate], yyvsp, yylsp, parser);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
yyerror_range[2] = yylloc;
/* Using YYLLOC is tempting, but would change the location of
the lookahead. YYLOC is available though. */
YYLLOC_DEFAULT (yyloc, yyerror_range, 2);
*++yylsp = yyloc;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined yyoverflow || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (&yylloc, parser, YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
{
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = YYTRANSLATE (yychar);
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval, &yylloc, parser);
}
/* Do not reclaim the symbols of the rule whose action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp, yylsp, parser);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
return yyresult;
}
#line 759 "./glsl/glcpp/glcpp-parse.y" /* yacc.c:1906 */
string_list_t *
_string_list_create(glcpp_parser_t *parser)
{
string_list_t *list;
list = linear_alloc_child(parser->linalloc, sizeof(string_list_t));
list->head = NULL;
list->tail = NULL;
return list;
}
void
_string_list_append_item(glcpp_parser_t *parser, string_list_t *list,
const char *str)
{
string_node_t *node;
node = linear_alloc_child(parser->linalloc, sizeof(string_node_t));
node->str = linear_strdup(parser->linalloc, str);
node->next = NULL;
if (list->head == NULL) {
list->head = node;
} else {
list->tail->next = node;
}
list->tail = node;
}
int
_string_list_contains(string_list_t *list, const char *member, int *index)
{
string_node_t *node;
int i;
if (list == NULL)
return 0;
for (i = 0, node = list->head; node; i++, node = node->next) {
if (strcmp (node->str, member) == 0) {
if (index)
*index = i;
return 1;
}
}
return 0;
}
/* Return duplicate string in list (if any), NULL otherwise. */
const char *
_string_list_has_duplicate(string_list_t *list)
{
string_node_t *node, *dup;
if (list == NULL)
return NULL;
for (node = list->head; node; node = node->next) {
for (dup = node->next; dup; dup = dup->next) {
if (strcmp (node->str, dup->str) == 0)
return node->str;
}
}
return NULL;
}
int
_string_list_length(string_list_t *list)
{
int length = 0;
string_node_t *node;
if (list == NULL)
return 0;
for (node = list->head; node; node = node->next)
length++;
return length;
}
int
_string_list_equal(string_list_t *a, string_list_t *b)
{
string_node_t *node_a, *node_b;
if (a == NULL && b == NULL)
return 1;
if (a == NULL || b == NULL)
return 0;
for (node_a = a->head, node_b = b->head;
node_a && node_b;
node_a = node_a->next, node_b = node_b->next)
{
if (strcmp (node_a->str, node_b->str))
return 0;
}
/* Catch the case of lists being different lengths, (which
* would cause the loop above to terminate after the shorter
* list). */
return node_a == node_b;
}
argument_list_t *
_argument_list_create(glcpp_parser_t *parser)
{
argument_list_t *list;
list = linear_alloc_child(parser->linalloc, sizeof(argument_list_t));
list->head = NULL;
list->tail = NULL;
return list;
}
void
_argument_list_append(glcpp_parser_t *parser,
argument_list_t *list, token_list_t *argument)
{
argument_node_t *node;
node = linear_alloc_child(parser->linalloc, sizeof(argument_node_t));
node->argument = argument;
node->next = NULL;
if (list->head == NULL) {
list->head = node;
} else {
list->tail->next = node;
}
list->tail = node;
}
int
_argument_list_length(argument_list_t *list)
{
int length = 0;
argument_node_t *node;
if (list == NULL)
return 0;
for (node = list->head; node; node = node->next)
length++;
return length;
}
token_list_t *
_argument_list_member_at(argument_list_t *list, int index)
{
argument_node_t *node;
int i;
if (list == NULL)
return NULL;
node = list->head;
for (i = 0; i < index; i++) {
node = node->next;
if (node == NULL)
break;
}
if (node)
return node->argument;
return NULL;
}
token_t *
_token_create_str(glcpp_parser_t *parser, int type, char *str)
{
token_t *token;
token = linear_alloc_child(parser->linalloc, sizeof(token_t));
token->type = type;
token->value.str = str;
return token;
}
token_t *
_token_create_ival(glcpp_parser_t *parser, int type, int ival)
{
token_t *token;
token = linear_alloc_child(parser->linalloc, sizeof(token_t));
token->type = type;
token->value.ival = ival;
return token;
}
token_list_t *
_token_list_create(glcpp_parser_t *parser)
{
token_list_t *list;
list = linear_alloc_child(parser->linalloc, sizeof(token_list_t));
list->head = NULL;
list->tail = NULL;
list->non_space_tail = NULL;
return list;
}
void
_token_list_append(glcpp_parser_t *parser, token_list_t *list, token_t *token)
{
token_node_t *node;
node = linear_alloc_child(parser->linalloc, sizeof(token_node_t));
node->token = token;
node->next = NULL;
if (list->head == NULL) {
list->head = node;
} else {
list->tail->next = node;
}
list->tail = node;
if (token->type != SPACE)
list->non_space_tail = node;
}
void
_token_list_append_list(token_list_t *list, token_list_t *tail)
{
if (tail == NULL || tail->head == NULL)
return;
if (list->head == NULL) {
list->head = tail->head;
} else {
list->tail->next = tail->head;
}
list->tail = tail->tail;
list->non_space_tail = tail->non_space_tail;
}
static token_list_t *
_token_list_copy(glcpp_parser_t *parser, token_list_t *other)
{
token_list_t *copy;
token_node_t *node;
if (other == NULL)
return NULL;
copy = _token_list_create (parser);
for (node = other->head; node; node = node->next) {
token_t *new_token = linear_alloc_child(parser->linalloc, sizeof(token_t));
*new_token = *node->token;
_token_list_append (parser, copy, new_token);
}
return copy;
}
static void
_token_list_trim_trailing_space(token_list_t *list)
{
if (list->non_space_tail) {
list->non_space_tail->next = NULL;
list->tail = list->non_space_tail;
}
}
static int
_token_list_is_empty_ignoring_space(token_list_t *l)
{
token_node_t *n;
if (l == NULL)
return 1;
n = l->head;
while (n != NULL && n->token->type == SPACE)
n = n->next;
return n == NULL;
}
int
_token_list_equal_ignoring_space(token_list_t *a, token_list_t *b)
{
token_node_t *node_a, *node_b;
if (a == NULL || b == NULL) {
int a_empty = _token_list_is_empty_ignoring_space(a);
int b_empty = _token_list_is_empty_ignoring_space(b);
return a_empty == b_empty;
}
node_a = a->head;
node_b = b->head;
while (1)
{
if (node_a == NULL && node_b == NULL)
break;
if (node_a == NULL || node_b == NULL)
return 0;
/* Make sure whitespace appears in the same places in both.
* It need not be exactly the same amount of whitespace,
* though.
*/
if (node_a->token->type == SPACE && node_b->token->type == SPACE) {
while (node_a && node_a->token->type == SPACE)
node_a = node_a->next;
while (node_b && node_b->token->type == SPACE)
node_b = node_b->next;
continue;
}
if (node_a->token->type != node_b->token->type)
return 0;
switch (node_a->token->type) {
case INTEGER:
if (node_a->token->value.ival != node_b->token->value.ival) {
return 0;
}
break;
case IDENTIFIER:
case INTEGER_STRING:
case OTHER:
if (strcmp(node_a->token->value.str, node_b->token->value.str)) {
return 0;
}
break;
}
node_a = node_a->next;
node_b = node_b->next;
}
return 1;
}
static void
_token_print(char **out, size_t *len, token_t *token)
{
if (token->type < 256) {
ralloc_asprintf_rewrite_tail (out, len, "%c", token->type);
return;
}
switch (token->type) {
case INTEGER:
ralloc_asprintf_rewrite_tail (out, len, "%" PRIiMAX, token->value.ival);
break;
case IDENTIFIER:
case INTEGER_STRING:
case OTHER:
ralloc_asprintf_rewrite_tail (out, len, "%s", token->value.str);
break;
case SPACE:
ralloc_asprintf_rewrite_tail (out, len, " ");
break;
case LEFT_SHIFT:
ralloc_asprintf_rewrite_tail (out, len, "<<");
break;
case RIGHT_SHIFT:
ralloc_asprintf_rewrite_tail (out, len, ">>");
break;
case LESS_OR_EQUAL:
ralloc_asprintf_rewrite_tail (out, len, "<=");
break;
case GREATER_OR_EQUAL:
ralloc_asprintf_rewrite_tail (out, len, ">=");
break;
case EQUAL:
ralloc_asprintf_rewrite_tail (out, len, "==");
break;
case NOT_EQUAL:
ralloc_asprintf_rewrite_tail (out, len, "!=");
break;
case AND:
ralloc_asprintf_rewrite_tail (out, len, "&&");
break;
case OR:
ralloc_asprintf_rewrite_tail (out, len, "||");
break;
case PASTE:
ralloc_asprintf_rewrite_tail (out, len, "##");
break;
case PLUS_PLUS:
ralloc_asprintf_rewrite_tail (out, len, "++");
break;
case MINUS_MINUS:
ralloc_asprintf_rewrite_tail (out, len, "--");
break;
case DEFINED:
ralloc_asprintf_rewrite_tail (out, len, "defined");
break;
case PLACEHOLDER:
/* Nothing to print. */
break;
default:
assert(!"Error: Don't know how to print token.");
break;
}
}
/* Return a new token formed by pasting 'token' and 'other'. Note that this
* function may return 'token' or 'other' directly rather than allocating
* anything new.
*
* Caution: Only very cursory error-checking is performed to see if
* the final result is a valid single token. */
static token_t *
_token_paste(glcpp_parser_t *parser, token_t *token, token_t *other)
{
token_t *combined = NULL;
/* Pasting a placeholder onto anything makes no change. */
if (other->type == PLACEHOLDER)
return token;
/* When 'token' is a placeholder, just return 'other'. */
if (token->type == PLACEHOLDER)
return other;
/* A very few single-character punctuators can be combined
* with another to form a multi-character punctuator. */
switch (token->type) {
case '<':
if (other->type == '<')
combined = _token_create_ival (parser, LEFT_SHIFT, LEFT_SHIFT);
else if (other->type == '=')
combined = _token_create_ival (parser, LESS_OR_EQUAL, LESS_OR_EQUAL);
break;
case '>':
if (other->type == '>')
combined = _token_create_ival (parser, RIGHT_SHIFT, RIGHT_SHIFT);
else if (other->type == '=')
combined = _token_create_ival (parser, GREATER_OR_EQUAL, GREATER_OR_EQUAL);
break;
case '=':
if (other->type == '=')
combined = _token_create_ival (parser, EQUAL, EQUAL);
break;
case '!':
if (other->type == '=')
combined = _token_create_ival (parser, NOT_EQUAL, NOT_EQUAL);
break;
case '&':
if (other->type == '&')
combined = _token_create_ival (parser, AND, AND);
break;
case '|':
if (other->type == '|')
combined = _token_create_ival (parser, OR, OR);
break;
}
if (combined != NULL) {
/* Inherit the location from the first token */
combined->location = token->location;
return combined;
}
/* Two string-valued (or integer) tokens can usually just be
* mashed together. (We also handle a string followed by an
* integer here as well.)
*
* There are some exceptions here. Notably, if the first token
* is an integer (or a string representing an integer), then
* the second token must also be an integer or must be a
* string representing an integer that begins with a digit.
*/
if ((token->type == IDENTIFIER || token->type == OTHER || token->type == INTEGER_STRING || token->type == INTEGER) &&
(other->type == IDENTIFIER || other->type == OTHER || other->type == INTEGER_STRING || other->type == INTEGER))
{
char *str;
int combined_type;
/* Check that pasting onto an integer doesn't create a
* non-integer, (that is, only digits can be
* pasted. */
if (token->type == INTEGER_STRING || token->type == INTEGER) {
switch (other->type) {
case INTEGER_STRING:
if (other->value.str[0] < '0' || other->value.str[0] > '9')
goto FAIL;
break;
case INTEGER:
if (other->value.ival < 0)
goto FAIL;
break;
default:
goto FAIL;
}
}
if (token->type == INTEGER)
str = linear_asprintf(parser->linalloc, "%" PRIiMAX, token->value.ival);
else
str = linear_strdup(parser->linalloc, token->value.str);
if (other->type == INTEGER)
linear_asprintf_append(parser->linalloc, &str, "%" PRIiMAX, other->value.ival);
else
linear_strcat(parser->linalloc, &str, other->value.str);
/* New token is same type as original token, unless we
* started with an integer, in which case we will be
* creating an integer-string. */
combined_type = token->type;
if (combined_type == INTEGER)
combined_type = INTEGER_STRING;
combined = _token_create_str (parser, combined_type, str);
combined->location = token->location;
return combined;
}
FAIL:
glcpp_error (&token->location, parser, "");
ralloc_asprintf_rewrite_tail (&parser->info_log, &parser->info_log_length, "Pasting \"");
_token_print (&parser->info_log, &parser->info_log_length, token);
ralloc_asprintf_rewrite_tail (&parser->info_log, &parser->info_log_length, "\" and \"");
_token_print (&parser->info_log, &parser->info_log_length, other);
ralloc_asprintf_rewrite_tail (&parser->info_log, &parser->info_log_length, "\" does not give a valid preprocessing token.\n");
return token;
}
static void
_token_list_print(glcpp_parser_t *parser, token_list_t *list)
{
token_node_t *node;
if (list == NULL)
return;
for (node = list->head; node; node = node->next)
_token_print (&parser->output, &parser->output_length, node->token);
}
void
yyerror(YYLTYPE *locp, glcpp_parser_t *parser, const char *error)
{
glcpp_error(locp, parser, "%s", error);
}
static void
add_builtin_define(glcpp_parser_t *parser, const char *name, int value)
{
token_t *tok;
token_list_t *list;
tok = _token_create_ival (parser, INTEGER, value);
list = _token_list_create(parser);
_token_list_append(parser, list, tok);
_define_object_macro(parser, NULL, name, list);
}
glcpp_parser_t *
glcpp_parser_create(const struct gl_extensions *extension_list,
glcpp_extension_iterator extensions, void *state, gl_api api)
{
glcpp_parser_t *parser;
parser = ralloc (NULL, glcpp_parser_t);
glcpp_lex_init_extra (parser, &parser->scanner);
parser->defines = _mesa_hash_table_create(NULL, _mesa_key_hash_string,
_mesa_key_string_equal);
parser->linalloc = linear_alloc_parent(parser, 0);
parser->active = NULL;
parser->lexing_directive = 0;
parser->lexing_version_directive = 0;
parser->space_tokens = 1;
parser->last_token_was_newline = 0;
parser->last_token_was_space = 0;
parser->first_non_space_token_this_line = 1;
parser->newline_as_space = 0;
parser->in_control_line = 0;
parser->paren_count = 0;
parser->commented_newlines = 0;
parser->skip_stack = NULL;
parser->skipping = 0;
parser->lex_from_list = NULL;
parser->lex_from_node = NULL;
parser->output = ralloc_strdup(parser, "");
parser->output_length = 0;
parser->info_log = ralloc_strdup(parser, "");
parser->info_log_length = 0;
parser->error = 0;
parser->extensions = extensions;
parser->extension_list = extension_list;
parser->state = state;
parser->api = api;
parser->version = 0;
parser->version_set = false;
parser->has_new_line_number = 0;
parser->new_line_number = 1;
parser->has_new_source_number = 0;
parser->new_source_number = 0;
parser->is_gles = false;
return parser;
}
void
glcpp_parser_destroy(glcpp_parser_t *parser)
{
glcpp_lex_destroy (parser->scanner);
_mesa_hash_table_destroy(parser->defines, NULL);
ralloc_free (parser);
}
typedef enum function_status
{
FUNCTION_STATUS_SUCCESS,
FUNCTION_NOT_A_FUNCTION,
FUNCTION_UNBALANCED_PARENTHESES
} function_status_t;
/* Find a set of function-like macro arguments by looking for a
* balanced set of parentheses.
*
* When called, 'node' should be the opening-parenthesis token, (or
* perhaps preceeding SPACE tokens). Upon successful return *last will
* be the last consumed node, (corresponding to the closing right
* parenthesis).
*
* Return values:
*
* FUNCTION_STATUS_SUCCESS:
*
* Successfully parsed a set of function arguments.
*
* FUNCTION_NOT_A_FUNCTION:
*
* Macro name not followed by a '('. This is not an error, but
* simply that the macro name should be treated as a non-macro.
*
* FUNCTION_UNBALANCED_PARENTHESES
*
* Macro name is not followed by a balanced set of parentheses.
*/
static function_status_t
_arguments_parse(glcpp_parser_t *parser,
argument_list_t *arguments, token_node_t *node,
token_node_t **last)
{
token_list_t *argument;
int paren_count;
node = node->next;
/* Ignore whitespace before first parenthesis. */
while (node && node->token->type == SPACE)
node = node->next;
if (node == NULL || node->token->type != '(')
return FUNCTION_NOT_A_FUNCTION;
node = node->next;
argument = _token_list_create (parser);
_argument_list_append (parser, arguments, argument);
for (paren_count = 1; node; node = node->next) {
if (node->token->type == '(') {
paren_count++;
} else if (node->token->type == ')') {
paren_count--;
if (paren_count == 0)
break;
}
if (node->token->type == ',' && paren_count == 1) {
_token_list_trim_trailing_space (argument);
argument = _token_list_create (parser);
_argument_list_append (parser, arguments, argument);
} else {
if (argument->head == NULL) {
/* Don't treat initial whitespace as part of the argument. */
if (node->token->type == SPACE)
continue;
}
_token_list_append(parser, argument, node->token);
}
}
if (paren_count)
return FUNCTION_UNBALANCED_PARENTHESES;
*last = node;
return FUNCTION_STATUS_SUCCESS;
}
static token_list_t *
_token_list_create_with_one_ival(glcpp_parser_t *parser, int type, int ival)
{
token_list_t *list;
token_t *node;
list = _token_list_create(parser);
node = _token_create_ival(parser, type, ival);
_token_list_append(parser, list, node);
return list;
}
static token_list_t *
_token_list_create_with_one_space(glcpp_parser_t *parser)
{
return _token_list_create_with_one_ival(parser, SPACE, SPACE);
}
static token_list_t *
_token_list_create_with_one_integer(glcpp_parser_t *parser, int ival)
{
return _token_list_create_with_one_ival(parser, INTEGER, ival);
}
/* Evaluate a DEFINED token node (based on subsequent tokens in the list).
*
* Note: This function must only be called when "node" is a DEFINED token,
* (and will abort with an assertion failure otherwise).
*
* If "node" is followed, (ignoring any SPACE tokens), by an IDENTIFIER token
* (optionally preceded and followed by '(' and ')' tokens) then the following
* occurs:
*
* If the identifier is a defined macro, this function returns 1.
*
* If the identifier is not a defined macro, this function returns 0.
*
* In either case, *last will be updated to the last node in the list
* consumed by the evaluation, (either the token of the identifier or the
* token of the closing parenthesis).
*
* In all other cases, (such as "node is the final node of the list", or
* "missing closing parenthesis", etc.), this function generates a
* preprocessor error, returns -1 and *last will not be set.
*/
static int
_glcpp_parser_evaluate_defined(glcpp_parser_t *parser, token_node_t *node,
token_node_t **last)
{
token_node_t *argument, *defined = node;
assert(node->token->type == DEFINED);
node = node->next;
/* Ignore whitespace after DEFINED token. */
while (node && node->token->type == SPACE)
node = node->next;
if (node == NULL)
goto FAIL;
if (node->token->type == IDENTIFIER || node->token->type == OTHER) {
argument = node;
} else if (node->token->type == '(') {
node = node->next;
/* Ignore whitespace after '(' token. */
while (node && node->token->type == SPACE)
node = node->next;
if (node == NULL || (node->token->type != IDENTIFIER &&
node->token->type != OTHER)) {
goto FAIL;
}
argument = node;
node = node->next;
/* Ignore whitespace after identifier, before ')' token. */
while (node && node->token->type == SPACE)
node = node->next;
if (node == NULL || node->token->type != ')')
goto FAIL;
} else {
goto FAIL;
}
*last = node;
return _mesa_hash_table_search(parser->defines,
argument->token->value.str) ? 1 : 0;
FAIL:
glcpp_error (&defined->token->location, parser,
"\"defined\" not followed by an identifier");
return -1;
}
/* Evaluate all DEFINED nodes in a given list, modifying the list in place.
*/
static void
_glcpp_parser_evaluate_defined_in_list(glcpp_parser_t *parser,
token_list_t *list)
{
token_node_t *node, *node_prev, *replacement, *last = NULL;
int value;
if (list == NULL)
return;
node_prev = NULL;
node = list->head;
while (node) {
if (node->token->type != DEFINED)
goto NEXT;
value = _glcpp_parser_evaluate_defined (parser, node, &last);
if (value == -1)
goto NEXT;
replacement = linear_alloc_child(parser->linalloc, sizeof(token_node_t));
replacement->token = _token_create_ival (parser, INTEGER, value);
/* Splice replacement node into list, replacing from "node"
* through "last". */
if (node_prev)
node_prev->next = replacement;
else
list->head = replacement;
replacement->next = last->next;
if (last == list->tail)
list->tail = replacement;
node = replacement;
NEXT:
node_prev = node;
node = node->next;
}
}
/* Perform macro expansion on 'list', placing the resulting tokens
* into a new list which is initialized with a first token of type
* 'head_token_type'. Then begin lexing from the resulting list,
* (return to the current lexing source when this list is exhausted).
*
* See the documentation of _glcpp_parser_expand_token_list for a description
* of the "mode" parameter.
*/
static void
_glcpp_parser_expand_and_lex_from(glcpp_parser_t *parser, int head_token_type,
token_list_t *list, expansion_mode_t mode)
{
token_list_t *expanded;
token_t *token;
expanded = _token_list_create (parser);
token = _token_create_ival (parser, head_token_type, head_token_type);
_token_list_append (parser, expanded, token);
_glcpp_parser_expand_token_list (parser, list, mode);
_token_list_append_list (expanded, list);
glcpp_parser_lex_from (parser, expanded);
}
static void
_glcpp_parser_apply_pastes(glcpp_parser_t *parser, token_list_t *list)
{
token_node_t *node;
node = list->head;
while (node) {
token_node_t *next_non_space;
/* Look ahead for a PASTE token, skipping space. */
next_non_space = node->next;
while (next_non_space && next_non_space->token->type == SPACE)
next_non_space = next_non_space->next;
if (next_non_space == NULL)
break;
if (next_non_space->token->type != PASTE) {
node = next_non_space;
continue;
}
/* Now find the next non-space token after the PASTE. */
next_non_space = next_non_space->next;
while (next_non_space && next_non_space->token->type == SPACE)
next_non_space = next_non_space->next;
if (next_non_space == NULL) {
yyerror(&node->token->location, parser, "'##' cannot appear at either end of a macro expansion\n");
return;
}
node->token = _token_paste(parser, node->token, next_non_space->token);
node->next = next_non_space->next;
if (next_non_space == list->tail)
list->tail = node;
}
list->non_space_tail = list->tail;
}
/* This is a helper function that's essentially part of the
* implementation of _glcpp_parser_expand_node. It shouldn't be called
* except for by that function.
*
* Returns NULL if node is a simple token with no expansion, (that is,
* although 'node' corresponds to an identifier defined as a
* function-like macro, it is not followed with a parenthesized
* argument list).
*
* Compute the complete expansion of node (which is a function-like
* macro) and subsequent nodes which are arguments.
*
* Returns the token list that results from the expansion and sets
* *last to the last node in the list that was consumed by the
* expansion. Specifically, *last will be set as follows: as the
* token of the closing right parenthesis.
*
* See the documentation of _glcpp_parser_expand_token_list for a description
* of the "mode" parameter.
*/
static token_list_t *
_glcpp_parser_expand_function(glcpp_parser_t *parser, token_node_t *node,
token_node_t **last, expansion_mode_t mode)
{
struct hash_entry *entry;
macro_t *macro;
const char *identifier;
argument_list_t *arguments;
function_status_t status;
token_list_t *substituted;
int parameter_index;
identifier = node->token->value.str;
entry = _mesa_hash_table_search(parser->defines, identifier);
macro = entry ? entry->data : NULL;
assert(macro->is_function);
arguments = _argument_list_create(parser);
status = _arguments_parse(parser, arguments, node, last);
switch (status) {
case FUNCTION_STATUS_SUCCESS:
break;
case FUNCTION_NOT_A_FUNCTION:
return NULL;
case FUNCTION_UNBALANCED_PARENTHESES:
glcpp_error(&node->token->location, parser, "Macro %s call has unbalanced parentheses\n", identifier);
return NULL;
}
/* Replace a macro defined as empty with a SPACE token. */
if (macro->replacements == NULL) {
return _token_list_create_with_one_space(parser);
}
if (!((_argument_list_length (arguments) ==
_string_list_length (macro->parameters)) ||
(_string_list_length (macro->parameters) == 0 &&
_argument_list_length (arguments) == 1 &&
arguments->head->argument->head == NULL))) {
glcpp_error(&node->token->location, parser,
"Error: macro %s invoked with %d arguments (expected %d)\n",
identifier, _argument_list_length (arguments),
_string_list_length(macro->parameters));
return NULL;
}
/* Perform argument substitution on the replacement list. */
substituted = _token_list_create(parser);
for (node = macro->replacements->head; node; node = node->next) {
if (node->token->type == IDENTIFIER &&
_string_list_contains(macro->parameters, node->token->value.str,
¶meter_index)) {
token_list_t *argument;
argument = _argument_list_member_at(arguments, parameter_index);
/* Before substituting, we expand the argument tokens, or append a
* placeholder token for an empty argument. */
if (argument->head) {
token_list_t *expanded_argument;
expanded_argument = _token_list_copy(parser, argument);
_glcpp_parser_expand_token_list(parser, expanded_argument, mode);
_token_list_append_list(substituted, expanded_argument);
} else {
token_t *new_token;
new_token = _token_create_ival(parser, PLACEHOLDER,
PLACEHOLDER);
_token_list_append(parser, substituted, new_token);
}
} else {
_token_list_append(parser, substituted, node->token);
}
}
/* After argument substitution, and before further expansion
* below, implement token pasting. */
_token_list_trim_trailing_space(substituted);
_glcpp_parser_apply_pastes(parser, substituted);
return substituted;
}
/* Compute the complete expansion of node, (and subsequent nodes after
* 'node' in the case that 'node' is a function-like macro and
* subsequent nodes are arguments).
*
* Returns NULL if node is a simple token with no expansion.
*
* Otherwise, returns the token list that results from the expansion
* and sets *last to the last node in the list that was consumed by
* the expansion. Specifically, *last will be set as follows:
*
* As 'node' in the case of object-like macro expansion.
*
* As the token of the closing right parenthesis in the case of
* function-like macro expansion.
*
* See the documentation of _glcpp_parser_expand_token_list for a description
* of the "mode" parameter.
*/
static token_list_t *
_glcpp_parser_expand_node(glcpp_parser_t *parser, token_node_t *node,
token_node_t **last, expansion_mode_t mode)
{
token_t *token = node->token;
const char *identifier;
struct hash_entry *entry;
macro_t *macro;
/* We only expand identifiers */
if (token->type != IDENTIFIER) {
return NULL;
}
*last = node;
identifier = token->value.str;
/* Special handling for __LINE__ and __FILE__, (not through
* the hash table). */
if (strcmp(identifier, "__LINE__") == 0)
return _token_list_create_with_one_integer(parser, node->token->location.first_line);
if (strcmp(identifier, "__FILE__") == 0)
return _token_list_create_with_one_integer(parser, node->token->location.source);
/* Look up this identifier in the hash table. */
entry = _mesa_hash_table_search(parser->defines, identifier);
macro = entry ? entry->data : NULL;
/* Not a macro, so no expansion needed. */
if (macro == NULL)
return NULL;
/* Finally, don't expand this macro if we're already actively
* expanding it, (to avoid infinite recursion). */
if (_parser_active_list_contains (parser, identifier)) {
/* We change the token type here from IDENTIFIER to OTHER to prevent any
* future expansion of this unexpanded token. */
char *str;
token_list_t *expansion;
token_t *final;
str = linear_strdup(parser->linalloc, token->value.str);
final = _token_create_str(parser, OTHER, str);
expansion = _token_list_create(parser);
_token_list_append(parser, expansion, final);
return expansion;
}
if (! macro->is_function) {
token_list_t *replacement;
/* Replace a macro defined as empty with a SPACE token. */
if (macro->replacements == NULL)
return _token_list_create_with_one_space(parser);
replacement = _token_list_copy(parser, macro->replacements);
_glcpp_parser_apply_pastes(parser, replacement);
return replacement;
}
return _glcpp_parser_expand_function(parser, node, last, mode);
}
/* Push a new identifier onto the parser's active list.
*
* Here, 'marker' is the token node that appears in the list after the
* expansion of 'identifier'. That is, when the list iterator begins
* examining 'marker', then it is time to pop this node from the
* active stack.
*/
static void
_parser_active_list_push(glcpp_parser_t *parser, const char *identifier,
token_node_t *marker)
{
active_list_t *node;
node = linear_alloc_child(parser->linalloc, sizeof(active_list_t));
node->identifier = linear_strdup(parser->linalloc, identifier);
node->marker = marker;
node->next = parser->active;
parser->active = node;
}
static void
_parser_active_list_pop(glcpp_parser_t *parser)
{
active_list_t *node = parser->active;
if (node == NULL) {
parser->active = NULL;
return;
}
node = parser->active->next;
parser->active = node;
}
static int
_parser_active_list_contains(glcpp_parser_t *parser, const char *identifier)
{
active_list_t *node;
if (parser->active == NULL)
return 0;
for (node = parser->active; node; node = node->next)
if (strcmp(node->identifier, identifier) == 0)
return 1;
return 0;
}
/* Walk over the token list replacing nodes with their expansion.
* Whenever nodes are expanded the walking will walk over the new
* nodes, continuing to expand as necessary. The results are placed in
* 'list' itself.
*
* The "mode" argument controls the handling of any DEFINED tokens that
* result from expansion as follows:
*
* EXPANSION_MODE_IGNORE_DEFINED: Any resulting DEFINED tokens will be
* left in the final list, unevaluated. This is the correct mode
* for expanding any list in any context other than a
* preprocessor conditional, (#if or #elif).
*
* EXPANSION_MODE_EVALUATE_DEFINED: Any resulting DEFINED tokens will be
* evaluated to 0 or 1 tokens depending on whether the following
* token is the name of a defined macro. If the DEFINED token is
* not followed by an (optionally parenthesized) identifier, then
* an error will be generated. This the correct mode for
* expanding any list in the context of a preprocessor
* conditional, (#if or #elif).
*/
static void
_glcpp_parser_expand_token_list(glcpp_parser_t *parser, token_list_t *list,
expansion_mode_t mode)
{
token_node_t *node_prev;
token_node_t *node, *last = NULL;
token_list_t *expansion;
active_list_t *active_initial = parser->active;
if (list == NULL)
return;
_token_list_trim_trailing_space (list);
node_prev = NULL;
node = list->head;
if (mode == EXPANSION_MODE_EVALUATE_DEFINED)
_glcpp_parser_evaluate_defined_in_list (parser, list);
while (node) {
while (parser->active && parser->active->marker == node)
_parser_active_list_pop (parser);
expansion = _glcpp_parser_expand_node (parser, node, &last, mode);
if (expansion) {
token_node_t *n;
if (mode == EXPANSION_MODE_EVALUATE_DEFINED) {
_glcpp_parser_evaluate_defined_in_list (parser, expansion);
}
for (n = node; n != last->next; n = n->next)
while (parser->active && parser->active->marker == n) {
_parser_active_list_pop (parser);
}
_parser_active_list_push(parser, node->token->value.str, last->next);
/* Splice expansion into list, supporting a simple deletion if the
* expansion is empty.
*/
if (expansion->head) {
if (node_prev)
node_prev->next = expansion->head;
else
list->head = expansion->head;
expansion->tail->next = last->next;
if (last == list->tail)
list->tail = expansion->tail;
} else {
if (node_prev)
node_prev->next = last->next;
else
list->head = last->next;
if (last == list->tail)
list->tail = NULL;
}
} else {
node_prev = node;
}
node = node_prev ? node_prev->next : list->head;
}
/* Remove any lingering effects of this invocation on the
* active list. That is, pop until the list looks like it did
* at the beginning of this function. */
while (parser->active && parser->active != active_initial)
_parser_active_list_pop (parser);
list->non_space_tail = list->tail;
}
void
_glcpp_parser_print_expanded_token_list(glcpp_parser_t *parser,
token_list_t *list)
{
if (list == NULL)
return;
_glcpp_parser_expand_token_list (parser, list, EXPANSION_MODE_IGNORE_DEFINED);
_token_list_trim_trailing_space (list);
_token_list_print (parser, list);
}
static void
_check_for_reserved_macro_name(glcpp_parser_t *parser, YYLTYPE *loc,
const char *identifier)
{
/* Section 3.3 (Preprocessor) of the GLSL 1.30 spec (and later) and
* the GLSL ES spec (all versions) say:
*
* "All macro names containing two consecutive underscores ( __ )
* are reserved for future use as predefined macro names. All
* macro names prefixed with "GL_" ("GL" followed by a single
* underscore) are also reserved."
*
* The intention is that names containing __ are reserved for internal
* use by the implementation, and names prefixed with GL_ are reserved
* for use by Khronos. Since every extension adds a name prefixed
* with GL_ (i.e., the name of the extension), that should be an
* error. Names simply containing __ are dangerous to use, but should
* be allowed.
*
* A future version of the GLSL specification will clarify this.
*/
if (strstr(identifier, "__")) {
glcpp_warning(loc, parser, "Macro names containing \"__\" are reserved "
"for use by the implementation.\n");
}
if (strncmp(identifier, "GL_", 3) == 0) {
glcpp_error (loc, parser, "Macro names starting with \"GL_\" are reserved.\n");
}
if (strcmp(identifier, "defined") == 0) {
glcpp_error (loc, parser, "\"defined\" cannot be used as a macro name");
}
}
static int
_macro_equal(macro_t *a, macro_t *b)
{
if (a->is_function != b->is_function)
return 0;
if (a->is_function) {
if (! _string_list_equal (a->parameters, b->parameters))
return 0;
}
return _token_list_equal_ignoring_space(a->replacements, b->replacements);
}
void
_define_object_macro(glcpp_parser_t *parser, YYLTYPE *loc,
const char *identifier, token_list_t *replacements)
{
macro_t *macro, *previous;
struct hash_entry *entry;
/* We define pre-defined macros before we've started parsing the actual
* file. So if there's no location defined yet, that's what were doing and
* we don't want to generate an error for using the reserved names. */
if (loc != NULL)
_check_for_reserved_macro_name(parser, loc, identifier);
macro = linear_alloc_child(parser->linalloc, sizeof(macro_t));
macro->is_function = 0;
macro->parameters = NULL;
macro->identifier = linear_strdup(parser->linalloc, identifier);
macro->replacements = replacements;
entry = _mesa_hash_table_search(parser->defines, identifier);
previous = entry ? entry->data : NULL;
if (previous) {
if (_macro_equal (macro, previous)) {
return;
}
glcpp_error (loc, parser, "Redefinition of macro %s\n", identifier);
}
_mesa_hash_table_insert (parser->defines, identifier, macro);
}
void
_define_function_macro(glcpp_parser_t *parser, YYLTYPE *loc,
const char *identifier, string_list_t *parameters,
token_list_t *replacements)
{
macro_t *macro, *previous;
struct hash_entry *entry;
const char *dup;
_check_for_reserved_macro_name(parser, loc, identifier);
/* Check for any duplicate parameter names. */
if ((dup = _string_list_has_duplicate (parameters)) != NULL) {
glcpp_error (loc, parser, "Duplicate macro parameter \"%s\"", dup);
}
macro = linear_alloc_child(parser->linalloc, sizeof(macro_t));
macro->is_function = 1;
macro->parameters = parameters;
macro->identifier = linear_strdup(parser->linalloc, identifier);
macro->replacements = replacements;
entry = _mesa_hash_table_search(parser->defines, identifier);
previous = entry ? entry->data : NULL;
if (previous) {
if (_macro_equal (macro, previous)) {
return;
}
glcpp_error (loc, parser, "Redefinition of macro %s\n", identifier);
}
_mesa_hash_table_insert(parser->defines, identifier, macro);
}
static int
glcpp_parser_lex(YYSTYPE *yylval, YYLTYPE *yylloc, glcpp_parser_t *parser)
{
token_node_t *node;
int ret;
if (parser->lex_from_list == NULL) {
ret = glcpp_lex(yylval, yylloc, parser->scanner);
/* XXX: This ugly block of code exists for the sole
* purpose of converting a NEWLINE token into a SPACE
* token, but only in the case where we have seen a
* function-like macro name, but have not yet seen its
* closing parenthesis.
*
* There's perhaps a more compact way to do this with
* mid-rule actions in the grammar.
*
* I'm definitely not pleased with the complexity of
* this code here.
*/
if (parser->newline_as_space) {
if (ret == '(') {
parser->paren_count++;
} else if (ret == ')') {
parser->paren_count--;
if (parser->paren_count == 0)
parser->newline_as_space = 0;
} else if (ret == NEWLINE) {
ret = SPACE;
} else if (ret != SPACE) {
if (parser->paren_count == 0)
parser->newline_as_space = 0;
}
} else if (parser->in_control_line) {
if (ret == NEWLINE)
parser->in_control_line = 0;
}
else if (ret == DEFINE_TOKEN || ret == UNDEF || ret == IF ||
ret == IFDEF || ret == IFNDEF || ret == ELIF || ret == ELSE ||
ret == ENDIF || ret == HASH_TOKEN) {
parser->in_control_line = 1;
} else if (ret == IDENTIFIER) {
struct hash_entry *entry = _mesa_hash_table_search(parser->defines,
yylval->str);
macro_t *macro = entry ? entry->data : NULL;
if (macro && macro->is_function) {
parser->newline_as_space = 1;
parser->paren_count = 0;
}
}
return ret;
}
node = parser->lex_from_node;
if (node == NULL) {
parser->lex_from_list = NULL;
return NEWLINE;
}
*yylval = node->token->value;
ret = node->token->type;
parser->lex_from_node = node->next;
return ret;
}
static void
glcpp_parser_lex_from(glcpp_parser_t *parser, token_list_t *list)
{
token_node_t *node;
assert (parser->lex_from_list == NULL);
/* Copy list, eliminating any space tokens. */
parser->lex_from_list = _token_list_create (parser);
for (node = list->head; node; node = node->next) {
if (node->token->type == SPACE)
continue;
_token_list_append (parser, parser->lex_from_list, node->token);
}
parser->lex_from_node = parser->lex_from_list->head;
/* It's possible the list consisted of nothing but whitespace. */
if (parser->lex_from_node == NULL) {
parser->lex_from_list = NULL;
}
}
static void
_glcpp_parser_skip_stack_push_if(glcpp_parser_t *parser, YYLTYPE *loc,
int condition)
{
skip_type_t current = SKIP_NO_SKIP;
skip_node_t *node;
if (parser->skip_stack)
current = parser->skip_stack->type;
node = linear_alloc_child(parser->linalloc, sizeof(skip_node_t));
node->loc = *loc;
if (current == SKIP_NO_SKIP) {
if (condition)
node->type = SKIP_NO_SKIP;
else
node->type = SKIP_TO_ELSE;
} else {
node->type = SKIP_TO_ENDIF;
}
node->has_else = false;
node->next = parser->skip_stack;
parser->skip_stack = node;
}
static void
_glcpp_parser_skip_stack_change_if(glcpp_parser_t *parser, YYLTYPE *loc,
const char *type, int condition)
{
if (parser->skip_stack == NULL) {
glcpp_error (loc, parser, "#%s without #if\n", type);
return;
}
if (parser->skip_stack->type == SKIP_TO_ELSE) {
if (condition)
parser->skip_stack->type = SKIP_NO_SKIP;
} else {
parser->skip_stack->type = SKIP_TO_ENDIF;
}
}
static void
_glcpp_parser_skip_stack_pop(glcpp_parser_t *parser, YYLTYPE *loc)
{
skip_node_t *node;
if (parser->skip_stack == NULL) {
glcpp_error (loc, parser, "#endif without #if\n");
return;
}
node = parser->skip_stack;
parser->skip_stack = node->next;
}
static void
_glcpp_parser_handle_version_declaration(glcpp_parser_t *parser, intmax_t version,
const char *es_identifier,
bool explicitly_set)
{
if (parser->version_set)
return;
parser->version = version;
parser->version_set = true;
add_builtin_define (parser, "__VERSION__", version);
parser->is_gles = (version == 100) ||
(es_identifier && (strcmp(es_identifier, "es") == 0));
/* Add pre-defined macros. */
if (parser->is_gles)
add_builtin_define(parser, "GL_ES", 1);
else if (version >= 150)
add_builtin_define(parser, "GL_core_profile", 1);
/* Currently, all ES2/ES3 implementations support highp in the
* fragment shader, so we always define this macro in ES2/ES3.
* If we ever get a driver that doesn't support highp, we'll
* need to add a flag to the gl_context and check that here.
*/
if (version >= 130 || parser->is_gles)
add_builtin_define (parser, "GL_FRAGMENT_PRECISION_HIGH", 1);
/* Add all the extension macros available in this context */
if (parser->extensions)
parser->extensions(parser->state, add_builtin_define, parser,
version, parser->is_gles);
if (parser->extension_list) {
/* If MESA_shader_integer_functions is supported, then the building
* blocks required for the 64x64 => 64 multiply exist. Add defines for
* those functions so that they can be tested.
*/
if (parser->extension_list->MESA_shader_integer_functions) {
add_builtin_define(parser, "__have_builtin_builtin_sign64", 1);
add_builtin_define(parser, "__have_builtin_builtin_umul64", 1);
add_builtin_define(parser, "__have_builtin_builtin_udiv64", 1);
add_builtin_define(parser, "__have_builtin_builtin_umod64", 1);
add_builtin_define(parser, "__have_builtin_builtin_idiv64", 1);
add_builtin_define(parser, "__have_builtin_builtin_imod64", 1);
}
}
if (explicitly_set) {
ralloc_asprintf_rewrite_tail(&parser->output, &parser->output_length,
"#version %" PRIiMAX "%s%s", version,
es_identifier ? " " : "",
es_identifier ? es_identifier : "");
}
}
/* GLSL version if no version is explicitly specified. */
#define IMPLICIT_GLSL_VERSION 110
/* GLSL ES version if no version is explicitly specified. */
#define IMPLICIT_GLSL_ES_VERSION 100
void
glcpp_parser_resolve_implicit_version(glcpp_parser_t *parser)
{
int language_version = parser->api == API_OPENGLES2 ?
IMPLICIT_GLSL_ES_VERSION : IMPLICIT_GLSL_VERSION;
_glcpp_parser_handle_version_declaration(parser, language_version,
NULL, false);
}
|
44563.c | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
/*
* Vim originated from Stevie version 3.6 (Fish disk 217) by GRWalter (Fred)
* It has been changed beyond recognition since then.
*
* Differences between version 7.4 and 8.x can be found with ":help version8".
* Differences between version 6.4 and 7.x can be found with ":help version7".
* Differences between version 5.8 and 6.x can be found with ":help version6".
* Differences between version 4.x and 5.x can be found with ":help version5".
* Differences between version 3.0 and 4.x can be found with ":help version4".
* All the remarks about older versions have been removed, they are not very
* interesting.
*/
#include "version.h"
char *Version = VIM_VERSION_SHORT;
static char *mediumVersion = VIM_VERSION_MEDIUM;
#if defined(HAVE_DATE_TIME) || defined(PROTO)
# if (defined(VMS) && defined(VAXC)) || defined(PROTO)
char longVersion[sizeof(VIM_VERSION_LONG_DATE) + sizeof(__DATE__)
+ sizeof(__TIME__) + 3];
void
init_longVersion(void)
{
/*
* Construct the long version string. Necessary because
* VAX C can't concatenate strings in the preprocessor.
*/
strcpy(longVersion, VIM_VERSION_LONG_DATE);
#ifdef BUILD_DATE
strcat(longVersion, BUILD_DATE);
#else
strcat(longVersion, __DATE__);
strcat(longVersion, " ");
strcat(longVersion, __TIME__);
#endif
strcat(longVersion, ")");
}
# else
void
init_longVersion(void)
{
if (longVersion == NULL)
{
#ifdef BUILD_DATE
char *date_time = BUILD_DATE;
#else
char *date_time = __DATE__ " " __TIME__;
#endif
char *msg = _("%s (%s, compiled %s)");
size_t len = strlen(msg)
+ strlen(VIM_VERSION_LONG_ONLY)
+ strlen(VIM_VERSION_DATE_ONLY)
+ strlen(date_time);
longVersion = alloc(len);
if (longVersion == NULL)
longVersion = VIM_VERSION_LONG;
else
vim_snprintf(longVersion, len, msg,
VIM_VERSION_LONG_ONLY, VIM_VERSION_DATE_ONLY, date_time);
}
}
# endif
#else
char *longVersion = VIM_VERSION_LONG;
void
init_longVersion(void)
{
// nothing to do
}
#endif
static char *(features[]) =
{
#ifdef HAVE_ACL
"+acl",
#else
"-acl",
#endif
#ifdef AMIGA // only for Amiga systems
# ifdef FEAT_ARP
"+ARP",
# else
"-ARP",
# endif
#endif
#ifdef FEAT_ARABIC
"+arabic",
#else
"-arabic",
#endif
"+autocmd",
#ifdef FEAT_AUTOCHDIR
"+autochdir",
#else
"-autochdir",
#endif
#ifdef FEAT_AUTOSERVERNAME
"+autoservername",
#else
"-autoservername",
#endif
#ifdef FEAT_BEVAL_GUI
"+balloon_eval",
#else
"-balloon_eval",
#endif
#ifdef FEAT_BEVAL_TERM
"+balloon_eval_term",
#else
"-balloon_eval_term",
#endif
#ifdef FEAT_BROWSE
"+browse",
#else
"-browse",
#endif
#ifdef NO_BUILTIN_TCAPS
"-builtin_terms",
#endif
#ifdef SOME_BUILTIN_TCAPS
"+builtin_terms",
#endif
#ifdef ALL_BUILTIN_TCAPS
"++builtin_terms",
#endif
#ifdef FEAT_BYTEOFF
"+byte_offset",
#else
"-byte_offset",
#endif
#ifdef FEAT_JOB_CHANNEL
"+channel",
#else
"-channel",
#endif
#ifdef FEAT_CINDENT
"+cindent",
#else
"-cindent",
#endif
#ifdef FEAT_CLIENTSERVER
"+clientserver",
#else
"-clientserver",
#endif
#ifdef FEAT_CLIPBOARD
"+clipboard",
#else
"-clipboard",
#endif
"+cmdline_compl",
"+cmdline_hist",
#ifdef FEAT_CMDL_INFO
"+cmdline_info",
#else
"-cmdline_info",
#endif
"+comments",
#ifdef FEAT_CONCEAL
"+conceal",
#else
"-conceal",
#endif
#ifdef FEAT_CRYPT
"+cryptv",
#else
"-cryptv",
#endif
#ifdef FEAT_CSCOPE
"+cscope",
#else
"-cscope",
#endif
"+cursorbind",
#ifdef CURSOR_SHAPE
"+cursorshape",
#else
"-cursorshape",
#endif
#if defined(FEAT_CON_DIALOG) && defined(FEAT_GUI_DIALOG)
"+dialog_con_gui",
#else
# if defined(FEAT_CON_DIALOG)
"+dialog_con",
# else
# if defined(FEAT_GUI_DIALOG)
"+dialog_gui",
# else
"-dialog",
# endif
# endif
#endif
#ifdef FEAT_DIFF
"+diff",
#else
"-diff",
#endif
#ifdef FEAT_DIGRAPHS
"+digraphs",
#else
"-digraphs",
#endif
#ifdef FEAT_GUI_MSWIN
# ifdef FEAT_DIRECTX
"+directx",
# else
"-directx",
# endif
#endif
#ifdef FEAT_DND
"+dnd",
#else
"-dnd",
#endif
#ifdef EBCDIC
"+ebcdic",
#else
"-ebcdic",
#endif
#ifdef FEAT_EMACS_TAGS
"+emacs_tags",
#else
"-emacs_tags",
#endif
#ifdef FEAT_EVAL
"+eval",
#else
"-eval",
#endif
"+ex_extra",
#ifdef FEAT_SEARCH_EXTRA
"+extra_search",
#else
"-extra_search",
#endif
"-farsi",
#ifdef FEAT_SEARCHPATH
"+file_in_path",
#else
"-file_in_path",
#endif
#ifdef FEAT_FIND_ID
"+find_in_path",
#else
"-find_in_path",
#endif
#ifdef FEAT_FLOAT
"+float",
#else
"-float",
#endif
#ifdef FEAT_FOLDING
"+folding",
#else
"-folding",
#endif
#ifdef FEAT_FOOTER
"+footer",
#else
"-footer",
#endif
// only interesting on Unix systems
#if !defined(USE_SYSTEM) && defined(UNIX)
"+fork()",
#endif
#ifdef FEAT_GETTEXT
# ifdef DYNAMIC_GETTEXT
"+gettext/dyn",
# else
"+gettext",
# endif
#else
"-gettext",
#endif
"-hangul_input",
#if (defined(HAVE_ICONV_H) && defined(USE_ICONV)) || defined(DYNAMIC_ICONV)
# ifdef DYNAMIC_ICONV
"+iconv/dyn",
# else
"+iconv",
# endif
#else
"-iconv",
#endif
"+insert_expand",
#ifdef FEAT_IPV6
"+ipv6",
#else
"-ipv6",
#endif
#ifdef FEAT_JOB_CHANNEL
"+job",
#else
"-job",
#endif
#ifdef FEAT_JUMPLIST
"+jumplist",
#else
"-jumplist",
#endif
#ifdef FEAT_KEYMAP
"+keymap",
#else
"-keymap",
#endif
#ifdef FEAT_EVAL
"+lambda",
#else
"-lambda",
#endif
#ifdef FEAT_LANGMAP
"+langmap",
#else
"-langmap",
#endif
#ifdef FEAT_LIBCALL
"+libcall",
#else
"-libcall",
#endif
#ifdef FEAT_LINEBREAK
"+linebreak",
#else
"-linebreak",
#endif
#ifdef FEAT_LISP
"+lispindent",
#else
"-lispindent",
#endif
"+listcmds",
"+localmap",
#ifdef FEAT_LUA
# ifdef DYNAMIC_LUA
"+lua/dyn",
# else
"+lua",
# endif
#else
"-lua",
#endif
#ifdef FEAT_MENU
"+menu",
#else
"-menu",
#endif
#ifdef FEAT_SESSION
"+mksession",
#else
"-mksession",
#endif
"+modify_fname",
"+mouse",
#ifdef FEAT_MOUSESHAPE
"+mouseshape",
#else
"-mouseshape",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_DEC
"+mouse_dec",
# else
"-mouse_dec",
# endif
# ifdef FEAT_MOUSE_GPM
"+mouse_gpm",
# else
"-mouse_gpm",
# endif
# ifdef FEAT_MOUSE_JSB
"+mouse_jsbterm",
# else
"-mouse_jsbterm",
# endif
# ifdef FEAT_MOUSE_NET
"+mouse_netterm",
# else
"-mouse_netterm",
# endif
#endif
#ifdef __QNX__
# ifdef FEAT_MOUSE_PTERM
"+mouse_pterm",
# else
"-mouse_pterm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
"+mouse_sgr",
# ifdef FEAT_SYSMOUSE
"+mouse_sysmouse",
# else
"-mouse_sysmouse",
# endif
# ifdef FEAT_MOUSE_URXVT
"+mouse_urxvt",
# else
"-mouse_urxvt",
# endif
"+mouse_xterm",
#endif
#ifdef FEAT_MBYTE_IME
# ifdef DYNAMIC_IME
"+multi_byte_ime/dyn",
# else
"+multi_byte_ime",
# endif
#else
"+multi_byte",
#endif
#ifdef FEAT_MULTI_LANG
"+multi_lang",
#else
"-multi_lang",
#endif
#ifdef FEAT_MZSCHEME
# ifdef DYNAMIC_MZSCHEME
"+mzscheme/dyn",
# else
"+mzscheme",
# endif
#else
"-mzscheme",
#endif
#ifdef FEAT_NETBEANS_INTG
"+netbeans_intg",
#else
"-netbeans_intg",
#endif
"+num64",
#ifdef FEAT_GUI_MSWIN
# ifdef FEAT_OLE
"+ole",
# else
"-ole",
# endif
#endif
#ifdef FEAT_EVAL
"+packages",
#else
"-packages",
#endif
#ifdef FEAT_PATH_EXTRA
"+path_extra",
#else
"-path_extra",
#endif
#ifdef FEAT_PERL
# ifdef DYNAMIC_PERL
"+perl/dyn",
# else
"+perl",
# endif
#else
"-perl",
#endif
#ifdef FEAT_PERSISTENT_UNDO
"+persistent_undo",
#else
"-persistent_undo",
#endif
#ifdef FEAT_PROP_POPUP
"+popupwin",
#else
"-popupwin",
#endif
#ifdef FEAT_PRINTER
# ifdef FEAT_POSTSCRIPT
"+postscript",
# else
"-postscript",
# endif
"+printer",
#else
"-printer",
#endif
#ifdef FEAT_PROFILE
"+profile",
#else
"-profile",
#endif
#ifdef FEAT_PYTHON
# ifdef DYNAMIC_PYTHON
"+python/dyn",
# else
"+python",
# endif
#else
"-python",
#endif
#ifdef FEAT_PYTHON3
# ifdef DYNAMIC_PYTHON3
"+python3/dyn",
# else
"+python3",
# endif
#else
"-python3",
#endif
#ifdef FEAT_QUICKFIX
"+quickfix",
#else
"-quickfix",
#endif
#ifdef FEAT_RELTIME
"+reltime",
#else
"-reltime",
#endif
#ifdef FEAT_RIGHTLEFT
"+rightleft",
#else
"-rightleft",
#endif
#ifdef FEAT_RUBY
# ifdef DYNAMIC_RUBY
"+ruby/dyn",
# else
"+ruby",
# endif
#else
"-ruby",
#endif
"+scrollbind",
#ifdef FEAT_SIGNS
"+signs",
#else
"-signs",
#endif
#ifdef FEAT_SMARTINDENT
"+smartindent",
#else
"-smartindent",
#endif
#ifdef FEAT_SOUND
"+sound",
#else
"-sound",
#endif
#ifdef FEAT_SPELL
"+spell",
#else
"-spell",
#endif
#ifdef STARTUPTIME
"+startuptime",
#else
"-startuptime",
#endif
#ifdef FEAT_STL_OPT
"+statusline",
#else
"-statusline",
#endif
"-sun_workshop",
#ifdef FEAT_SYN_HL
"+syntax",
#else
"-syntax",
#endif
// only interesting on Unix systems
#if defined(USE_SYSTEM) && defined(UNIX)
"+system()",
#endif
#ifdef FEAT_TAG_BINS
"+tag_binary",
#else
"-tag_binary",
#endif
"-tag_old_static",
"-tag_any_white",
#ifdef FEAT_TCL
# ifdef DYNAMIC_TCL
"+tcl/dyn",
# else
"+tcl",
# endif
#else
"-tcl",
#endif
#ifdef FEAT_TERMGUICOLORS
"+termguicolors",
#else
"-termguicolors",
#endif
#ifdef FEAT_TERMINAL
"+terminal",
#else
"-terminal",
#endif
#if defined(UNIX)
// only Unix can have terminfo instead of termcap
# ifdef TERMINFO
"+terminfo",
# else
"-terminfo",
# endif
#endif
#ifdef FEAT_TERMRESPONSE
"+termresponse",
#else
"-termresponse",
#endif
#ifdef FEAT_TEXTOBJ
"+textobjects",
#else
"-textobjects",
#endif
#ifdef FEAT_PROP_POPUP
"+textprop",
#else
"-textprop",
#endif
#if !defined(UNIX)
// unix always includes termcap support
# ifdef HAVE_TGETENT
"+tgetent",
# else
"-tgetent",
# endif
#endif
#ifdef FEAT_TIMERS
"+timers",
#else
"-timers",
#endif
#ifdef FEAT_TITLE
"+title",
#else
"-title",
#endif
#ifdef FEAT_TOOLBAR
"+toolbar",
#else
"-toolbar",
#endif
"+user_commands",
#ifdef FEAT_VARTABS
"+vartabs",
#else
"-vartabs",
#endif
"+vertsplit",
"+virtualedit",
"+visual",
"+visualextra",
#ifdef FEAT_VIMINFO
"+viminfo",
#else
"-viminfo",
#endif
"+vreplace",
#ifdef MSWIN
# ifdef FEAT_VTP
"+vtp",
# else
"-vtp",
# endif
#endif
#ifdef FEAT_WILDIGN
"+wildignore",
#else
"-wildignore",
#endif
#ifdef FEAT_WILDMENU
"+wildmenu",
#else
"-wildmenu",
#endif
"+windows",
#ifdef FEAT_WRITEBACKUP
"+writebackup",
#else
"-writebackup",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_X11
"+X11",
# else
"-X11",
# endif
#endif
#ifdef FEAT_XFONTSET
"+xfontset",
#else
"-xfontset",
#endif
#ifdef FEAT_XIM
"+xim",
#else
"-xim",
#endif
#ifdef MSWIN
# ifdef FEAT_XPM_W32
"+xpm_w32",
# else
"-xpm_w32",
# endif
#else
# ifdef HAVE_XPM
"+xpm",
# else
"-xpm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef USE_XSMP_INTERACT
"+xsmp_interact",
# else
# ifdef USE_XSMP
"+xsmp",
# else
"-xsmp",
# endif
# endif
# ifdef FEAT_XCLIPBOARD
"+xterm_clipboard",
# else
"-xterm_clipboard",
# endif
#endif
#ifdef FEAT_XTERM_SAVE
"+xterm_save",
#else
"-xterm_save",
#endif
NULL
};
static int included_patches[] =
{ /* Add new patch number below this line */
/**/
1869,
/**/
1868,
/**/
1867,
/**/
1866,
/**/
1865,
/**/
1864,
/**/
1863,
/**/
1862,
/**/
1861,
/**/
1860,
/**/
1859,
/**/
1858,
/**/
1857,
/**/
1856,
/**/
1855,
/**/
1854,
/**/
1853,
/**/
1852,
/**/
1851,
/**/
1850,
/**/
1849,
/**/
1848,
/**/
1847,
/**/
1846,
/**/
1845,
/**/
1844,
/**/
1843,
/**/
1842,
/**/
1841,
/**/
1840,
/**/
1839,
/**/
1838,
/**/
1837,
/**/
1836,
/**/
1835,
/**/
1834,
/**/
1833,
/**/
1832,
/**/
1831,
/**/
1830,
/**/
1829,
/**/
1828,
/**/
1827,
/**/
1826,
/**/
1825,
/**/
1824,
/**/
1823,
/**/
1822,
/**/
1821,
/**/
1820,
/**/
1819,
/**/
1818,
/**/
1817,
/**/
1816,
/**/
1815,
/**/
1814,
/**/
1813,
/**/
1812,
/**/
1811,
/**/
1810,
/**/
1809,
/**/
1808,
/**/
1807,
/**/
1806,
/**/
1805,
/**/
1804,
/**/
1803,
/**/
1802,
/**/
1801,
/**/
1800,
/**/
1799,
/**/
1798,
/**/
1797,
/**/
1796,
/**/
1795,
/**/
1794,
/**/
1793,
/**/
1792,
/**/
1791,
/**/
1790,
/**/
1789,
/**/
1788,
/**/
1787,
/**/
1786,
/**/
1785,
/**/
1784,
/**/
1783,
/**/
1782,
/**/
1781,
/**/
1780,
/**/
1779,
/**/
1778,
/**/
1777,
/**/
1776,
/**/
1775,
/**/
1774,
/**/
1773,
/**/
1772,
/**/
1771,
/**/
1770,
/**/
1769,
/**/
1768,
/**/
1767,
/**/
1766,
/**/
1765,
/**/
1764,
/**/
1763,
/**/
1762,
/**/
1761,
/**/
1760,
/**/
1759,
/**/
1758,
/**/
1757,
/**/
1756,
/**/
1755,
/**/
1754,
/**/
1753,
/**/
1752,
/**/
1751,
/**/
1750,
/**/
1749,
/**/
1748,
/**/
1747,
/**/
1746,
/**/
1745,
/**/
1744,
/**/
1743,
/**/
1742,
/**/
1741,
/**/
1740,
/**/
1739,
/**/
1738,
/**/
1737,
/**/
1736,
/**/
1735,
/**/
1734,
/**/
1733,
/**/
1732,
/**/
1731,
/**/
1730,
/**/
1729,
/**/
1728,
/**/
1727,
/**/
1726,
/**/
1725,
/**/
1724,
/**/
1723,
/**/
1722,
/**/
1721,
/**/
1720,
/**/
1719,
/**/
1718,
/**/
1717,
/**/
1716,
/**/
1715,
/**/
1714,
/**/
1713,
/**/
1712,
/**/
1711,
/**/
1710,
/**/
1709,
/**/
1708,
/**/
1707,
/**/
1706,
/**/
1705,
/**/
1704,
/**/
1703,
/**/
1702,
/**/
1701,
/**/
1700,
/**/
1699,
/**/
1698,
/**/
1697,
/**/
1696,
/**/
1695,
/**/
1694,
/**/
1693,
/**/
1692,
/**/
1691,
/**/
1690,
/**/
1689,
/**/
1688,
/**/
1687,
/**/
1686,
/**/
1685,
/**/
1684,
/**/
1683,
/**/
1682,
/**/
1681,
/**/
1680,
/**/
1679,
/**/
1678,
/**/
1677,
/**/
1676,
/**/
1675,
/**/
1674,
/**/
1673,
/**/
1672,
/**/
1671,
/**/
1670,
/**/
1669,
/**/
1668,
/**/
1667,
/**/
1666,
/**/
1665,
/**/
1664,
/**/
1663,
/**/
1662,
/**/
1661,
/**/
1660,
/**/
1659,
/**/
1658,
/**/
1657,
/**/
1656,
/**/
1655,
/**/
1654,
/**/
1653,
/**/
1652,
/**/
1651,
/**/
1650,
/**/
1649,
/**/
1648,
/**/
1647,
/**/
1646,
/**/
1645,
/**/
1644,
/**/
1643,
/**/
1642,
/**/
1641,
/**/
1640,
/**/
1639,
/**/
1638,
/**/
1637,
/**/
1636,
/**/
1635,
/**/
1634,
/**/
1633,
/**/
1632,
/**/
1631,
/**/
1630,
/**/
1629,
/**/
1628,
/**/
1627,
/**/
1626,
/**/
1625,
/**/
1624,
/**/
1623,
/**/
1622,
/**/
1621,
/**/
1620,
/**/
1619,
/**/
1618,
/**/
1617,
/**/
1616,
/**/
1615,
/**/
1614,
/**/
1613,
/**/
1612,
/**/
1611,
/**/
1610,
/**/
1609,
/**/
1608,
/**/
1607,
/**/
1606,
/**/
1605,
/**/
1604,
/**/
1603,
/**/
1602,
/**/
1601,
/**/
1600,
/**/
1599,
/**/
1598,
/**/
1597,
/**/
1596,
/**/
1595,
/**/
1594,
/**/
1593,
/**/
1592,
/**/
1591,
/**/
1590,
/**/
1589,
/**/
1588,
/**/
1587,
/**/
1586,
/**/
1585,
/**/
1584,
/**/
1583,
/**/
1582,
/**/
1581,
/**/
1580,
/**/
1579,
/**/
1578,
/**/
1577,
/**/
1576,
/**/
1575,
/**/
1574,
/**/
1573,
/**/
1572,
/**/
1571,
/**/
1570,
/**/
1569,
/**/
1568,
/**/
1567,
/**/
1566,
/**/
1565,
/**/
1564,
/**/
1563,
/**/
1562,
/**/
1561,
/**/
1560,
/**/
1559,
/**/
1558,
/**/
1557,
/**/
1556,
/**/
1555,
/**/
1554,
/**/
1553,
/**/
1552,
/**/
1551,
/**/
1550,
/**/
1549,
/**/
1548,
/**/
1547,
/**/
1546,
/**/
1545,
/**/
1544,
/**/
1543,
/**/
1542,
/**/
1541,
/**/
1540,
/**/
1539,
/**/
1538,
/**/
1537,
/**/
1536,
/**/
1535,
/**/
1534,
/**/
1533,
/**/
1532,
/**/
1531,
/**/
1530,
/**/
1529,
/**/
1528,
/**/
1527,
/**/
1526,
/**/
1525,
/**/
1524,
/**/
1523,
/**/
1522,
/**/
1521,
/**/
1520,
/**/
1519,
/**/
1518,
/**/
1517,
/**/
1516,
/**/
1515,
/**/
1514,
/**/
1513,
/**/
1512,
/**/
1511,
/**/
1510,
/**/
1509,
/**/
1508,
/**/
1507,
/**/
1506,
/**/
1505,
/**/
1504,
/**/
1503,
/**/
1502,
/**/
1501,
/**/
1500,
/**/
1499,
/**/
1498,
/**/
1497,
/**/
1496,
/**/
1495,
/**/
1494,
/**/
1493,
/**/
1492,
/**/
1491,
/**/
1490,
/**/
1489,
/**/
1488,
/**/
1487,
/**/
1486,
/**/
1485,
/**/
1484,
/**/
1483,
/**/
1482,
/**/
1481,
/**/
1480,
/**/
1479,
/**/
1478,
/**/
1477,
/**/
1476,
/**/
1475,
/**/
1474,
/**/
1473,
/**/
1472,
/**/
1471,
/**/
1470,
/**/
1469,
/**/
1468,
/**/
1467,
/**/
1466,
/**/
1465,
/**/
1464,
/**/
1463,
/**/
1462,
/**/
1461,
/**/
1460,
/**/
1459,
/**/
1458,
/**/
1457,
/**/
1456,
/**/
1455,
/**/
1454,
/**/
1453,
/**/
1452,
/**/
1451,
/**/
1450,
/**/
1449,
/**/
1448,
/**/
1447,
/**/
1446,
/**/
1445,
/**/
1444,
/**/
1443,
/**/
1442,
/**/
1441,
/**/
1440,
/**/
1439,
/**/
1438,
/**/
1437,
/**/
1436,
/**/
1435,
/**/
1434,
/**/
1433,
/**/
1432,
/**/
1431,
/**/
1430,
/**/
1429,
/**/
1428,
/**/
1427,
/**/
1426,
/**/
1425,
/**/
1424,
/**/
1423,
/**/
1422,
/**/
1421,
/**/
1420,
/**/
1419,
/**/
1418,
/**/
1417,
/**/
1416,
/**/
1415,
/**/
1414,
/**/
1413,
/**/
1412,
/**/
1411,
/**/
1410,
/**/
1409,
/**/
1408,
/**/
1407,
/**/
1406,
/**/
1405,
/**/
1404,
/**/
1403,
/**/
1402,
/**/
1401,
/**/
1400,
/**/
1399,
/**/
1398,
/**/
1397,
/**/
1396,
/**/
1395,
/**/
1394,
/**/
1393,
/**/
1392,
/**/
1391,
/**/
1390,
/**/
1389,
/**/
1388,
/**/
1387,
/**/
1386,
/**/
1385,
/**/
1384,
/**/
1383,
/**/
1382,
/**/
1381,
/**/
1380,
/**/
1379,
/**/
1378,
/**/
1377,
/**/
1376,
/**/
1375,
/**/
1374,
/**/
1373,
/**/
1372,
/**/
1371,
/**/
1370,
/**/
1369,
/**/
1368,
/**/
1367,
/**/
1366,
/**/
1365,
/**/
1364,
/**/
1363,
/**/
1362,
/**/
1361,
/**/
1360,
/**/
1359,
/**/
1358,
/**/
1357,
/**/
1356,
/**/
1355,
/**/
1354,
/**/
1353,
/**/
1352,
/**/
1351,
/**/
1350,
/**/
1349,
/**/
1348,
/**/
1347,
/**/
1346,
/**/
1345,
/**/
1344,
/**/
1343,
/**/
1342,
/**/
1341,
/**/
1340,
/**/
1339,
/**/
1338,
/**/
1337,
/**/
1336,
/**/
1335,
/**/
1334,
/**/
1333,
/**/
1332,
/**/
1331,
/**/
1330,
/**/
1329,
/**/
1328,
/**/
1327,
/**/
1326,
/**/
1325,
/**/
1324,
/**/
1323,
/**/
1322,
/**/
1321,
/**/
1320,
/**/
1319,
/**/
1318,
/**/
1317,
/**/
1316,
/**/
1315,
/**/
1314,
/**/
1313,
/**/
1312,
/**/
1311,
/**/
1310,
/**/
1309,
/**/
1308,
/**/
1307,
/**/
1306,
/**/
1305,
/**/
1304,
/**/
1303,
/**/
1302,
/**/
1301,
/**/
1300,
/**/
1299,
/**/
1298,
/**/
1297,
/**/
1296,
/**/
1295,
/**/
1294,
/**/
1293,
/**/
1292,
/**/
1291,
/**/
1290,
/**/
1289,
/**/
1288,
/**/
1287,
/**/
1286,
/**/
1285,
/**/
1284,
/**/
1283,
/**/
1282,
/**/
1281,
/**/
1280,
/**/
1279,
/**/
1278,
/**/
1277,
/**/
1276,
/**/
1275,
/**/
1274,
/**/
1273,
/**/
1272,
/**/
1271,
/**/
1270,
/**/
1269,
/**/
1268,
/**/
1267,
/**/
1266,
/**/
1265,
/**/
1264,
/**/
1263,
/**/
1262,
/**/
1261,
/**/
1260,
/**/
1259,
/**/
1258,
/**/
1257,
/**/
1256,
/**/
1255,
/**/
1254,
/**/
1253,
/**/
1252,
/**/
1251,
/**/
1250,
/**/
1249,
/**/
1248,
/**/
1247,
/**/
1246,
/**/
1245,
/**/
1244,
/**/
1243,
/**/
1242,
/**/
1241,
/**/
1240,
/**/
1239,
/**/
1238,
/**/
1237,
/**/
1236,
/**/
1235,
/**/
1234,
/**/
1233,
/**/
1232,
/**/
1231,
/**/
1230,
/**/
1229,
/**/
1228,
/**/
1227,
/**/
1226,
/**/
1225,
/**/
1224,
/**/
1223,
/**/
1222,
/**/
1221,
/**/
1220,
/**/
1219,
/**/
1218,
/**/
1217,
/**/
1216,
/**/
1215,
/**/
1214,
/**/
1213,
/**/
1212,
/**/
1211,
/**/
1210,
/**/
1209,
/**/
1208,
/**/
1207,
/**/
1206,
/**/
1205,
/**/
1204,
/**/
1203,
/**/
1202,
/**/
1201,
/**/
1200,
/**/
1199,
/**/
1198,
/**/
1197,
/**/
1196,
/**/
1195,
/**/
1194,
/**/
1193,
/**/
1192,
/**/
1191,
/**/
1190,
/**/
1189,
/**/
1188,
/**/
1187,
/**/
1186,
/**/
1185,
/**/
1184,
/**/
1183,
/**/
1182,
/**/
1181,
/**/
1180,
/**/
1179,
/**/
1178,
/**/
1177,
/**/
1176,
/**/
1175,
/**/
1174,
/**/
1173,
/**/
1172,
/**/
1171,
/**/
1170,
/**/
1169,
/**/
1168,
/**/
1167,
/**/
1166,
/**/
1165,
/**/
1164,
/**/
1163,
/**/
1162,
/**/
1161,
/**/
1160,
/**/
1159,
/**/
1158,
/**/
1157,
/**/
1156,
/**/
1155,
/**/
1154,
/**/
1153,
/**/
1152,
/**/
1151,
/**/
1150,
/**/
1149,
/**/
1148,
/**/
1147,
/**/
1146,
/**/
1145,
/**/
1144,
/**/
1143,
/**/
1142,
/**/
1141,
/**/
1140,
/**/
1139,
/**/
1138,
/**/
1137,
/**/
1136,
/**/
1135,
/**/
1134,
/**/
1133,
/**/
1132,
/**/
1131,
/**/
1130,
/**/
1129,
/**/
1128,
/**/
1127,
/**/
1126,
/**/
1125,
/**/
1124,
/**/
1123,
/**/
1122,
/**/
1121,
/**/
1120,
/**/
1119,
/**/
1118,
/**/
1117,
/**/
1116,
/**/
1115,
/**/
1114,
/**/
1113,
/**/
1112,
/**/
1111,
/**/
1110,
/**/
1109,
/**/
1108,
/**/
1107,
/**/
1106,
/**/
1105,
/**/
1104,
/**/
1103,
/**/
1102,
/**/
1101,
/**/
1100,
/**/
1099,
/**/
1098,
/**/
1097,
/**/
1096,
/**/
1095,
/**/
1094,
/**/
1093,
/**/
1092,
/**/
1091,
/**/
1090,
/**/
1089,
/**/
1088,
/**/
1087,
/**/
1086,
/**/
1085,
/**/
1084,
/**/
1083,
/**/
1082,
/**/
1081,
/**/
1080,
/**/
1079,
/**/
1078,
/**/
1077,
/**/
1076,
/**/
1075,
/**/
1074,
/**/
1073,
/**/
1072,
/**/
1071,
/**/
1070,
/**/
1069,
/**/
1068,
/**/
1067,
/**/
1066,
/**/
1065,
/**/
1064,
/**/
1063,
/**/
1062,
/**/
1061,
/**/
1060,
/**/
1059,
/**/
1058,
/**/
1057,
/**/
1056,
/**/
1055,
/**/
1054,
/**/
1053,
/**/
1052,
/**/
1051,
/**/
1050,
/**/
1049,
/**/
1048,
/**/
1047,
/**/
1046,
/**/
1045,
/**/
1044,
/**/
1043,
/**/
1042,
/**/
1041,
/**/
1040,
/**/
1039,
/**/
1038,
/**/
1037,
/**/
1036,
/**/
1035,
/**/
1034,
/**/
1033,
/**/
1032,
/**/
1031,
/**/
1030,
/**/
1029,
/**/
1028,
/**/
1027,
/**/
1026,
/**/
1025,
/**/
1024,
/**/
1023,
/**/
1022,
/**/
1021,
/**/
1020,
/**/
1019,
/**/
1018,
/**/
1017,
/**/
1016,
/**/
1015,
/**/
1014,
/**/
1013,
/**/
1012,
/**/
1011,
/**/
1010,
/**/
1009,
/**/
1008,
/**/
1007,
/**/
1006,
/**/
1005,
/**/
1004,
/**/
1003,
/**/
1002,
/**/
1001,
/**/
1000,
/**/
999,
/**/
998,
/**/
997,
/**/
996,
/**/
995,
/**/
994,
/**/
993,
/**/
992,
/**/
991,
/**/
990,
/**/
989,
/**/
988,
/**/
987,
/**/
986,
/**/
985,
/**/
984,
/**/
983,
/**/
982,
/**/
981,
/**/
980,
/**/
979,
/**/
978,
/**/
977,
/**/
976,
/**/
975,
/**/
974,
/**/
973,
/**/
972,
/**/
971,
/**/
970,
/**/
969,
/**/
968,
/**/
967,
/**/
966,
/**/
965,
/**/
964,
/**/
963,
/**/
962,
/**/
961,
/**/
960,
/**/
959,
/**/
958,
/**/
957,
/**/
956,
/**/
955,
/**/
954,
/**/
953,
/**/
952,
/**/
951,
/**/
950,
/**/
949,
/**/
948,
/**/
947,
/**/
946,
/**/
945,
/**/
944,
/**/
943,
/**/
942,
/**/
941,
/**/
940,
/**/
939,
/**/
938,
/**/
937,
/**/
936,
/**/
935,
/**/
934,
/**/
933,
/**/
932,
/**/
931,
/**/
930,
/**/
929,
/**/
928,
/**/
927,
/**/
926,
/**/
925,
/**/
924,
/**/
923,
/**/
922,
/**/
921,
/**/
920,
/**/
919,
/**/
918,
/**/
917,
/**/
916,
/**/
915,
/**/
914,
/**/
913,
/**/
912,
/**/
911,
/**/
910,
/**/
909,
/**/
908,
/**/
907,
/**/
906,
/**/
905,
/**/
904,
/**/
903,
/**/
902,
/**/
901,
/**/
900,
/**/
899,
/**/
898,
/**/
897,
/**/
896,
/**/
895,
/**/
894,
/**/
893,
/**/
892,
/**/
891,
/**/
890,
/**/
889,
/**/
888,
/**/
887,
/**/
886,
/**/
885,
/**/
884,
/**/
883,
/**/
882,
/**/
881,
/**/
880,
/**/
879,
/**/
878,
/**/
877,
/**/
876,
/**/
875,
/**/
874,
/**/
873,
/**/
872,
/**/
871,
/**/
870,
/**/
869,
/**/
868,
/**/
867,
/**/
866,
/**/
865,
/**/
864,
/**/
863,
/**/
862,
/**/
861,
/**/
860,
/**/
859,
/**/
858,
/**/
857,
/**/
856,
/**/
855,
/**/
854,
/**/
853,
/**/
852,
/**/
851,
/**/
850,
/**/
849,
/**/
848,
/**/
847,
/**/
846,
/**/
845,
/**/
844,
/**/
843,
/**/
842,
/**/
841,
/**/
840,
/**/
839,
/**/
838,
/**/
837,
/**/
836,
/**/
835,
/**/
834,
/**/
833,
/**/
832,
/**/
831,
/**/
830,
/**/
829,
/**/
828,
/**/
827,
/**/
826,
/**/
825,
/**/
824,
/**/
823,
/**/
822,
/**/
821,
/**/
820,
/**/
819,
/**/
818,
/**/
817,
/**/
816,
/**/
815,
/**/
814,
/**/
813,
/**/
812,
/**/
811,
/**/
810,
/**/
809,
/**/
808,
/**/
807,
/**/
806,
/**/
805,
/**/
804,
/**/
803,
/**/
802,
/**/
801,
/**/
800,
/**/
799,
/**/
798,
/**/
797,
/**/
796,
/**/
795,
/**/
794,
/**/
793,
/**/
792,
/**/
791,
/**/
790,
/**/
789,
/**/
788,
/**/
787,
/**/
786,
/**/
785,
/**/
784,
/**/
783,
/**/
782,
/**/
781,
/**/
780,
/**/
779,
/**/
778,
/**/
777,
/**/
776,
/**/
775,
/**/
774,
/**/
773,
/**/
772,
/**/
771,
/**/
770,
/**/
769,
/**/
768,
/**/
767,
/**/
766,
/**/
765,
/**/
764,
/**/
763,
/**/
762,
/**/
761,
/**/
760,
/**/
759,
/**/
758,
/**/
757,
/**/
756,
/**/
755,
/**/
754,
/**/
753,
/**/
752,
/**/
751,
/**/
750,
/**/
749,
/**/
748,
/**/
747,
/**/
746,
/**/
745,
/**/
744,
/**/
743,
/**/
742,
/**/
741,
/**/
740,
/**/
739,
/**/
738,
/**/
737,
/**/
736,
/**/
735,
/**/
734,
/**/
733,
/**/
732,
/**/
731,
/**/
730,
/**/
729,
/**/
728,
/**/
727,
/**/
726,
/**/
725,
/**/
724,
/**/
723,
/**/
722,
/**/
721,
/**/
720,
/**/
719,
/**/
718,
/**/
717,
/**/
716,
/**/
715,
/**/
714,
/**/
713,
/**/
712,
/**/
711,
/**/
710,
/**/
709,
/**/
708,
/**/
707,
/**/
706,
/**/
705,
/**/
704,
/**/
703,
/**/
702,
/**/
701,
/**/
700,
/**/
699,
/**/
698,
/**/
697,
/**/
696,
/**/
695,
/**/
694,
/**/
693,
/**/
692,
/**/
691,
/**/
690,
/**/
689,
/**/
688,
/**/
687,
/**/
686,
/**/
685,
/**/
684,
/**/
683,
/**/
682,
/**/
681,
/**/
680,
/**/
679,
/**/
678,
/**/
677,
/**/
676,
/**/
675,
/**/
674,
/**/
673,
/**/
672,
/**/
671,
/**/
670,
/**/
669,
/**/
668,
/**/
667,
/**/
666,
/**/
665,
/**/
664,
/**/
663,
/**/
662,
/**/
661,
/**/
660,
/**/
659,
/**/
658,
/**/
657,
/**/
656,
/**/
655,
/**/
654,
/**/
653,
/**/
652,
/**/
651,
/**/
650,
/**/
649,
/**/
648,
/**/
647,
/**/
646,
/**/
645,
/**/
644,
/**/
643,
/**/
642,
/**/
641,
/**/
640,
/**/
639,
/**/
638,
/**/
637,
/**/
636,
/**/
635,
/**/
634,
/**/
633,
/**/
632,
/**/
631,
/**/
630,
/**/
629,
/**/
628,
/**/
627,
/**/
626,
/**/
625,
/**/
624,
/**/
623,
/**/
622,
/**/
621,
/**/
620,
/**/
619,
/**/
618,
/**/
617,
/**/
616,
/**/
615,
/**/
614,
/**/
613,
/**/
612,
/**/
611,
/**/
610,
/**/
609,
/**/
608,
/**/
607,
/**/
606,
/**/
605,
/**/
604,
/**/
603,
/**/
602,
/**/
601,
/**/
600,
/**/
599,
/**/
598,
/**/
597,
/**/
596,
/**/
595,
/**/
594,
/**/
593,
/**/
592,
/**/
591,
/**/
590,
/**/
589,
/**/
588,
/**/
587,
/**/
586,
/**/
585,
/**/
584,
/**/
583,
/**/
582,
/**/
581,
/**/
580,
/**/
579,
/**/
578,
/**/
577,
/**/
576,
/**/
575,
/**/
574,
/**/
573,
/**/
572,
/**/
571,
/**/
570,
/**/
569,
/**/
568,
/**/
567,
/**/
566,
/**/
565,
/**/
564,
/**/
563,
/**/
562,
/**/
561,
/**/
560,
/**/
559,
/**/
558,
/**/
557,
/**/
556,
/**/
555,
/**/
554,
/**/
553,
/**/
552,
/**/
551,
/**/
550,
/**/
549,
/**/
548,
/**/
547,
/**/
546,
/**/
545,
/**/
544,
/**/
543,
/**/
542,
/**/
541,
/**/
540,
/**/
539,
/**/
538,
/**/
537,
/**/
536,
/**/
535,
/**/
534,
/**/
533,
/**/
532,
/**/
531,
/**/
530,
/**/
529,
/**/
528,
/**/
527,
/**/
526,
/**/
525,
/**/
524,
/**/
523,
/**/
522,
/**/
521,
/**/
520,
/**/
519,
/**/
518,
/**/
517,
/**/
516,
/**/
515,
/**/
514,
/**/
513,
/**/
512,
/**/
511,
/**/
510,
/**/
509,
/**/
508,
/**/
507,
/**/
506,
/**/
505,
/**/
504,
/**/
503,
/**/
502,
/**/
501,
/**/
500,
/**/
499,
/**/
498,
/**/
497,
/**/
496,
/**/
495,
/**/
494,
/**/
493,
/**/
492,
/**/
491,
/**/
490,
/**/
489,
/**/
488,
/**/
487,
/**/
486,
/**/
485,
/**/
484,
/**/
483,
/**/
482,
/**/
481,
/**/
480,
/**/
479,
/**/
478,
/**/
477,
/**/
476,
/**/
475,
/**/
474,
/**/
473,
/**/
472,
/**/
471,
/**/
470,
/**/
469,
/**/
468,
/**/
467,
/**/
466,
/**/
465,
/**/
464,
/**/
463,
/**/
462,
/**/
461,
/**/
460,
/**/
459,
/**/
458,
/**/
457,
/**/
456,
/**/
455,
/**/
454,
/**/
453,
/**/
452,
/**/
451,
/**/
450,
/**/
449,
/**/
448,
/**/
447,
/**/
446,
/**/
445,
/**/
444,
/**/
443,
/**/
442,
/**/
441,
/**/
440,
/**/
439,
/**/
438,
/**/
437,
/**/
436,
/**/
435,
/**/
434,
/**/
433,
/**/
432,
/**/
431,
/**/
430,
/**/
429,
/**/
428,
/**/
427,
/**/
426,
/**/
425,
/**/
424,
/**/
423,
/**/
422,
/**/
421,
/**/
420,
/**/
419,
/**/
418,
/**/
417,
/**/
416,
/**/
415,
/**/
414,
/**/
413,
/**/
412,
/**/
411,
/**/
410,
/**/
409,
/**/
408,
/**/
407,
/**/
406,
/**/
405,
/**/
404,
/**/
403,
/**/
402,
/**/
401,
/**/
400,
/**/
399,
/**/
398,
/**/
397,
/**/
396,
/**/
395,
/**/
394,
/**/
393,
/**/
392,
/**/
391,
/**/
390,
/**/
389,
/**/
388,
/**/
387,
/**/
386,
/**/
385,
/**/
384,
/**/
383,
/**/
382,
/**/
381,
/**/
380,
/**/
379,
/**/
378,
/**/
377,
/**/
376,
/**/
375,
/**/
374,
/**/
373,
/**/
372,
/**/
371,
/**/
370,
/**/
369,
/**/
368,
/**/
367,
/**/
366,
/**/
365,
/**/
364,
/**/
363,
/**/
362,
/**/
361,
/**/
360,
/**/
359,
/**/
358,
/**/
357,
/**/
356,
/**/
355,
/**/
354,
/**/
353,
/**/
352,
/**/
351,
/**/
350,
/**/
349,
/**/
348,
/**/
347,
/**/
346,
/**/
345,
/**/
344,
/**/
343,
/**/
342,
/**/
341,
/**/
340,
/**/
339,
/**/
338,
/**/
337,
/**/
336,
/**/
335,
/**/
334,
/**/
333,
/**/
332,
/**/
331,
/**/
330,
/**/
329,
/**/
328,
/**/
327,
/**/
326,
/**/
325,
/**/
324,
/**/
323,
/**/
322,
/**/
321,
/**/
320,
/**/
319,
/**/
318,
/**/
317,
/**/
316,
/**/
315,
/**/
314,
/**/
313,
/**/
312,
/**/
311,
/**/
310,
/**/
309,
/**/
308,
/**/
307,
/**/
306,
/**/
305,
/**/
304,
/**/
303,
/**/
302,
/**/
301,
/**/
300,
/**/
299,
/**/
298,
/**/
297,
/**/
296,
/**/
295,
/**/
294,
/**/
293,
/**/
292,
/**/
291,
/**/
290,
/**/
289,
/**/
288,
/**/
287,
/**/
286,
/**/
285,
/**/
284,
/**/
283,
/**/
282,
/**/
281,
/**/
280,
/**/
279,
/**/
278,
/**/
277,
/**/
276,
/**/
275,
/**/
274,
/**/
273,
/**/
272,
/**/
271,
/**/
270,
/**/
269,
/**/
268,
/**/
267,
/**/
266,
/**/
265,
/**/
264,
/**/
263,
/**/
262,
/**/
261,
/**/
260,
/**/
259,
/**/
258,
/**/
257,
/**/
256,
/**/
255,
/**/
254,
/**/
253,
/**/
252,
/**/
251,
/**/
250,
/**/
249,
/**/
248,
/**/
247,
/**/
246,
/**/
245,
/**/
244,
/**/
243,
/**/
242,
/**/
241,
/**/
240,
/**/
239,
/**/
238,
/**/
237,
/**/
236,
/**/
235,
/**/
234,
/**/
233,
/**/
232,
/**/
231,
/**/
230,
/**/
229,
/**/
228,
/**/
227,
/**/
226,
/**/
225,
/**/
224,
/**/
223,
/**/
222,
/**/
221,
/**/
220,
/**/
219,
/**/
218,
/**/
217,
/**/
216,
/**/
215,
/**/
214,
/**/
213,
/**/
212,
/**/
211,
/**/
210,
/**/
209,
/**/
208,
/**/
207,
/**/
206,
/**/
205,
/**/
204,
/**/
203,
/**/
202,
/**/
201,
/**/
200,
/**/
199,
/**/
198,
/**/
197,
/**/
196,
/**/
195,
/**/
194,
/**/
193,
/**/
192,
/**/
191,
/**/
190,
/**/
189,
/**/
188,
/**/
187,
/**/
186,
/**/
185,
/**/
184,
/**/
183,
/**/
182,
/**/
181,
/**/
180,
/**/
179,
/**/
178,
/**/
177,
/**/
176,
/**/
175,
/**/
174,
/**/
173,
/**/
172,
/**/
171,
/**/
170,
/**/
169,
/**/
168,
/**/
167,
/**/
166,
/**/
165,
/**/
164,
/**/
163,
/**/
162,
/**/
161,
/**/
160,
/**/
159,
/**/
158,
/**/
157,
/**/
156,
/**/
155,
/**/
154,
/**/
153,
/**/
152,
/**/
151,
/**/
150,
/**/
149,
/**/
148,
/**/
147,
/**/
146,
/**/
145,
/**/
144,
/**/
143,
/**/
142,
/**/
141,
/**/
140,
/**/
139,
/**/
138,
/**/
137,
/**/
136,
/**/
135,
/**/
134,
/**/
133,
/**/
132,
/**/
131,
/**/
130,
/**/
129,
/**/
128,
/**/
127,
/**/
126,
/**/
125,
/**/
124,
/**/
123,
/**/
122,
/**/
121,
/**/
120,
/**/
119,
/**/
118,
/**/
117,
/**/
116,
/**/
115,
/**/
114,
/**/
113,
/**/
112,
/**/
111,
/**/
110,
/**/
109,
/**/
108,
/**/
107,
/**/
106,
/**/
105,
/**/
104,
/**/
103,
/**/
102,
/**/
101,
/**/
100,
/**/
99,
/**/
98,
/**/
97,
/**/
96,
/**/
95,
/**/
94,
/**/
93,
/**/
92,
/**/
91,
/**/
90,
/**/
89,
/**/
88,
/**/
87,
/**/
86,
/**/
85,
/**/
84,
/**/
83,
/**/
82,
/**/
81,
/**/
80,
/**/
79,
/**/
78,
/**/
77,
/**/
76,
/**/
75,
/**/
74,
/**/
73,
/**/
72,
/**/
71,
/**/
70,
/**/
69,
/**/
68,
/**/
67,
/**/
66,
/**/
65,
/**/
64,
/**/
63,
/**/
62,
/**/
61,
/**/
60,
/**/
59,
/**/
58,
/**/
57,
/**/
56,
/**/
55,
/**/
54,
/**/
53,
/**/
52,
/**/
51,
/**/
50,
/**/
49,
/**/
48,
/**/
47,
/**/
46,
/**/
45,
/**/
44,
/**/
43,
/**/
42,
/**/
41,
/**/
40,
/**/
39,
/**/
38,
/**/
37,
/**/
36,
/**/
35,
/**/
34,
/**/
33,
/**/
32,
/**/
31,
/**/
30,
/**/
29,
/**/
28,
/**/
27,
/**/
26,
/**/
25,
/**/
24,
/**/
23,
/**/
22,
/**/
21,
/**/
20,
/**/
19,
/**/
18,
/**/
17,
/**/
16,
/**/
15,
/**/
14,
/**/
13,
/**/
12,
/**/
11,
/**/
10,
/**/
9,
/**/
8,
/**/
7,
/**/
6,
/**/
5,
/**/
4,
/**/
3,
/**/
2,
/**/
1,
/**/
0
};
/*
* Place to put a short description when adding a feature with a patch.
* Keep it short, e.g.,: "relative numbers", "persistent undo".
* Also add a comment marker to separate the lines.
* See the official Vim patches for the diff format: It must use a context of
* one line only. Create it by hand or use "diff -C2" and edit the patch.
*/
static char *(extra_patches[]) =
{ /* Add your patch description below this line */
/**/
NULL
};
int
highest_patch(void)
{
// this relies on the highest patch number to be the first entry
return included_patches[0];
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE if patch "n" has been included.
*/
int
has_patch(int n)
{
int i;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] == n)
return TRUE;
return FALSE;
}
#endif
void
ex_version(exarg_T *eap)
{
/*
* Ignore a ":version 9.99" command.
*/
if (*eap->arg == NUL)
{
msg_putchar('\n');
list_version();
}
}
/*
* Output a string for the version message. If it's going to wrap, output a
* newline, unless the message is too long to fit on the screen anyway.
* When "wrap" is TRUE wrap the string in [].
*/
static void
version_msg_wrap(char_u *s, int wrap)
{
int len = (int)vim_strsize(s) + (wrap ? 2 : 0);
if (!got_int && len < (int)Columns && msg_col + len >= (int)Columns
&& *s != '\n')
msg_putchar('\n');
if (!got_int)
{
if (wrap)
msg_puts("[");
msg_puts((char *)s);
if (wrap)
msg_puts("]");
}
}
static void
version_msg(char *s)
{
version_msg_wrap((char_u *)s, FALSE);
}
/*
* List all features aligned in columns, dictionary style.
*/
static void
list_features(void)
{
list_in_columns((char_u **)features, -1, -1);
}
/*
* List string items nicely aligned in columns.
* When "size" is < 0 then the last entry is marked with NULL.
* The entry with index "current" is inclosed in [].
*/
void
list_in_columns(char_u **items, int size, int current)
{
int i;
int ncol;
int nrow;
int cur_row = 1;
int item_count = 0;
int width = 0;
#ifdef FEAT_SYN_HL
int use_highlight = (items == (char_u **)features);
#endif
// Find the length of the longest item, use that + 1 as the column
// width.
for (i = 0; size < 0 ? items[i] != NULL : i < size; ++i)
{
int l = (int)vim_strsize(items[i]) + (i == current ? 2 : 0);
if (l > width)
width = l;
++item_count;
}
width += 1;
if (Columns < width)
{
// Not enough screen columns - show one per line
for (i = 0; i < item_count; ++i)
{
version_msg_wrap(items[i], i == current);
if (msg_col > 0 && i < item_count - 1)
msg_putchar('\n');
}
return;
}
// The rightmost column doesn't need a separator.
// Sacrifice it to fit in one more column if possible.
ncol = (int) (Columns + 1) / width;
nrow = item_count / ncol + (item_count % ncol ? 1 : 0);
// "i" counts columns then rows. "idx" counts rows then columns.
for (i = 0; !got_int && i < nrow * ncol; ++i)
{
int idx = (i / ncol) + (i % ncol) * nrow;
if (idx < item_count)
{
int last_col = (i + 1) % ncol == 0;
if (idx == current)
msg_putchar('[');
#ifdef FEAT_SYN_HL
if (use_highlight && items[idx][0] == '-')
msg_puts_attr((char *)items[idx], HL_ATTR(HLF_W));
else
#endif
msg_puts((char *)items[idx]);
if (idx == current)
msg_putchar(']');
if (last_col)
{
if (msg_col > 0 && cur_row < nrow)
msg_putchar('\n');
++cur_row;
}
else
{
while (msg_col % width)
msg_putchar(' ');
}
}
else
{
// this row is out of items, thus at the end of the row
if (msg_col > 0)
{
if (cur_row < nrow)
msg_putchar('\n');
++cur_row;
}
}
}
}
void
list_version(void)
{
int i;
int first;
char *s = "";
/*
* When adding features here, don't forget to update the list of
* internal variables in eval.c!
*/
init_longVersion();
msg(longVersion);
#ifdef MSWIN
# ifdef FEAT_GUI_MSWIN
# ifdef VIMDLL
# ifdef _WIN64
msg_puts(_("\nMS-Windows 64-bit GUI/console version"));
# else
msg_puts(_("\nMS-Windows 32-bit GUI/console version"));
# endif
# else
# ifdef _WIN64
msg_puts(_("\nMS-Windows 64-bit GUI version"));
# else
msg_puts(_("\nMS-Windows 32-bit GUI version"));
# endif
# endif
# ifdef FEAT_OLE
msg_puts(_(" with OLE support"));
# endif
# else
# ifdef _WIN64
msg_puts(_("\nMS-Windows 64-bit console version"));
# else
msg_puts(_("\nMS-Windows 32-bit console version"));
# endif
# endif
#endif
#if defined(MACOS_X)
# if defined(MACOS_X_DARWIN)
msg_puts(_("\nmacOS version"));
# else
msg_puts(_("\nmacOS version w/o darwin feat."));
# endif
#endif
#ifdef VMS
msg_puts(_("\nOpenVMS version"));
# ifdef HAVE_PATHDEF
if (*compiled_arch != NUL)
{
msg_puts(" - ");
msg_puts((char *)compiled_arch);
}
# endif
#endif
// Print the list of patch numbers if there is at least one.
// Print a range when patches are consecutive: "1-10, 12, 15-40, 42-45"
if (included_patches[0] != 0)
{
msg_puts(_("\nIncluded patches: "));
first = -1;
// find last one
for (i = 0; included_patches[i] != 0; ++i)
;
while (--i >= 0)
{
if (first < 0)
first = included_patches[i];
if (i == 0 || included_patches[i - 1] != included_patches[i] + 1)
{
msg_puts(s);
s = ", ";
msg_outnum((long)first);
if (first != included_patches[i])
{
msg_puts("-");
msg_outnum((long)included_patches[i]);
}
first = -1;
}
}
}
// Print the list of extra patch descriptions if there is at least one.
if (extra_patches[0] != NULL)
{
msg_puts(_("\nExtra patches: "));
s = "";
for (i = 0; extra_patches[i] != NULL; ++i)
{
msg_puts(s);
s = ", ";
msg_puts(extra_patches[i]);
}
}
#ifdef MODIFIED_BY
msg_puts("\n");
msg_puts(_("Modified by "));
msg_puts(MODIFIED_BY);
#endif
#ifdef HAVE_PATHDEF
if (*compiled_user != NUL || *compiled_sys != NUL)
{
msg_puts(_("\nCompiled "));
if (*compiled_user != NUL)
{
msg_puts(_("by "));
msg_puts((char *)compiled_user);
}
if (*compiled_sys != NUL)
{
msg_puts("@");
msg_puts((char *)compiled_sys);
}
}
#endif
#ifdef FEAT_HUGE
msg_puts(_("\nHuge version "));
#else
# ifdef FEAT_BIG
msg_puts(_("\nBig version "));
# else
# ifdef FEAT_NORMAL
msg_puts(_("\nNormal version "));
# else
# ifdef FEAT_SMALL
msg_puts(_("\nSmall version "));
# else
msg_puts(_("\nTiny version "));
# endif
# endif
# endif
#endif
#ifndef FEAT_GUI
msg_puts(_("without GUI."));
#else
# ifdef FEAT_GUI_GTK
# ifdef USE_GTK3
msg_puts(_("with GTK3 GUI."));
# else
# ifdef FEAT_GUI_GNOME
msg_puts(_("with GTK2-GNOME GUI."));
# else
msg_puts(_("with GTK2 GUI."));
# endif
# endif
# else
# ifdef FEAT_GUI_MOTIF
msg_puts(_("with X11-Motif GUI."));
# else
# ifdef FEAT_GUI_ATHENA
# ifdef FEAT_GUI_NEXTAW
msg_puts(_("with X11-neXtaw GUI."));
# else
msg_puts(_("with X11-Athena GUI."));
# endif
# else
# ifdef FEAT_GUI_HAIKU
msg_puts(_("with Haiku GUI."));
# else
# ifdef FEAT_GUI_PHOTON
msg_puts(_("with Photon GUI."));
# else
# if defined(MSWIN)
msg_puts(_("with GUI."));
# endif
# endif
# endif
# endif
# endif
# endif
#endif
version_msg(_(" Features included (+) or not (-):\n"));
list_features();
if (msg_col > 0)
msg_putchar('\n');
#ifdef SYS_VIMRC_FILE
version_msg(_(" system vimrc file: \""));
version_msg(SYS_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE
version_msg(_(" user vimrc file: \""));
version_msg(USR_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE2
version_msg(_(" 2nd user vimrc file: \""));
version_msg(USR_VIMRC_FILE2);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE3
version_msg(_(" 3rd user vimrc file: \""));
version_msg(USR_VIMRC_FILE3);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE
version_msg(_(" user exrc file: \""));
version_msg(USR_EXRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE2
version_msg(_(" 2nd user exrc file: \""));
version_msg(USR_EXRC_FILE2);
version_msg("\"\n");
#endif
#ifdef FEAT_GUI
# ifdef SYS_GVIMRC_FILE
version_msg(_(" system gvimrc file: \""));
version_msg(SYS_GVIMRC_FILE);
version_msg("\"\n");
# endif
version_msg(_(" user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE);
version_msg("\"\n");
# ifdef USR_GVIMRC_FILE2
version_msg(_("2nd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE2);
version_msg("\"\n");
# endif
# ifdef USR_GVIMRC_FILE3
version_msg(_("3rd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE3);
version_msg("\"\n");
# endif
#endif
version_msg(_(" defaults file: \""));
version_msg(VIM_DEFAULTS_FILE);
version_msg("\"\n");
#ifdef FEAT_GUI
# ifdef SYS_MENU_FILE
version_msg(_(" system menu file: \""));
version_msg(SYS_MENU_FILE);
version_msg("\"\n");
# endif
#endif
#ifdef HAVE_PATHDEF
if (*default_vim_dir != NUL)
{
version_msg(_(" fall-back for $VIM: \""));
version_msg((char *)default_vim_dir);
version_msg("\"\n");
}
if (*default_vimruntime_dir != NUL)
{
version_msg(_(" f-b for $VIMRUNTIME: \""));
version_msg((char *)default_vimruntime_dir);
version_msg("\"\n");
}
version_msg(_("Compilation: "));
version_msg((char *)all_cflags);
version_msg("\n");
#ifdef VMS
if (*compiler_version != NUL)
{
version_msg(_("Compiler: "));
version_msg((char *)compiler_version);
version_msg("\n");
}
#endif
version_msg(_("Linking: "));
version_msg((char *)all_lflags);
#endif
#ifdef DEBUG
version_msg("\n");
version_msg(_(" DEBUG BUILD"));
#endif
}
static void do_intro_line(int row, char_u *mesg, int add_version, int attr);
static void intro_message(int colon);
/*
* Show the intro message when not editing a file.
*/
void
maybe_intro_message(void)
{
if (BUFEMPTY()
&& curbuf->b_fname == NULL
&& firstwin->w_next == NULL
&& vim_strchr(p_shm, SHM_INTRO) == NULL)
intro_message(FALSE);
}
/*
* Give an introductory message about Vim.
* Only used when starting Vim on an empty file, without a file name.
* Or with the ":intro" command (for Sven :-).
*/
static void
intro_message(
int colon) // TRUE for ":intro"
{
int i;
int row;
int blanklines;
int sponsor;
char *p;
static char *(lines[]) =
{
N_("VIM - Vi IMproved"),
"",
N_("version "),
N_("by Bram Moolenaar et al."),
#ifdef MODIFIED_BY
" ",
#endif
N_("Vim is open source and freely distributable"),
"",
N_("Help poor children in Uganda!"),
N_("type :help iccf<Enter> for information "),
"",
N_("type :q<Enter> to exit "),
N_("type :help<Enter> or <F1> for on-line help"),
N_("type :help version8<Enter> for version info"),
NULL,
"",
N_("Running in Vi compatible mode"),
N_("type :set nocp<Enter> for Vim defaults"),
N_("type :help cp-default<Enter> for info on this"),
};
#ifdef FEAT_GUI
static char *(gui_lines[]) =
{
NULL,
NULL,
NULL,
NULL,
#ifdef MODIFIED_BY
NULL,
#endif
NULL,
NULL,
NULL,
N_("menu Help->Orphans for information "),
NULL,
N_("Running modeless, typed text is inserted"),
N_("menu Edit->Global Settings->Toggle Insert Mode "),
N_(" for two modes "),
NULL,
NULL,
NULL,
N_("menu Edit->Global Settings->Toggle Vi Compatible"),
N_(" for Vim defaults "),
};
#endif
// blanklines = screen height - # message lines
blanklines = (int)Rows - ((sizeof(lines) / sizeof(char *)) - 1);
if (!p_cp)
blanklines += 4; // add 4 for not showing "Vi compatible" message
// Don't overwrite a statusline. Depends on 'cmdheight'.
if (p_ls > 1)
blanklines -= Rows - topframe->fr_height;
if (blanklines < 0)
blanklines = 0;
// Show the sponsor and register message one out of four times, the Uganda
// message two out of four times.
sponsor = (int)time(NULL);
sponsor = ((sponsor & 2) == 0) - ((sponsor & 4) == 0);
// start displaying the message lines after half of the blank lines
row = blanklines / 2;
if ((row >= 2 && Columns >= 50) || colon)
{
for (i = 0; i < (int)(sizeof(lines) / sizeof(char *)); ++i)
{
p = lines[i];
#ifdef FEAT_GUI
if (p_im && gui.in_use && gui_lines[i] != NULL)
p = gui_lines[i];
#endif
if (p == NULL)
{
if (!p_cp)
break;
continue;
}
if (sponsor != 0)
{
if (strstr(p, "children") != NULL)
p = sponsor < 0
? N_("Sponsor Vim development!")
: N_("Become a registered Vim user!");
else if (strstr(p, "iccf") != NULL)
p = sponsor < 0
? N_("type :help sponsor<Enter> for information ")
: N_("type :help register<Enter> for information ");
else if (strstr(p, "Orphans") != NULL)
p = N_("menu Help->Sponsor/Register for information ");
}
if (*p != NUL)
do_intro_line(row, (char_u *)_(p), i == 2, 0);
++row;
}
}
// Make the wait-return message appear just below the text.
if (colon)
msg_row = row;
}
static void
do_intro_line(
int row,
char_u *mesg,
int add_version,
int attr)
{
char_u vers[20];
int col;
char_u *p;
int l;
int clen;
#ifdef MODIFIED_BY
# define MODBY_LEN 150
char_u modby[MODBY_LEN];
if (*mesg == ' ')
{
vim_strncpy(modby, (char_u *)_("Modified by "), MODBY_LEN - 1);
l = (int)STRLEN(modby);
vim_strncpy(modby + l, (char_u *)MODIFIED_BY, MODBY_LEN - l - 1);
mesg = modby;
}
#endif
// Center the message horizontally.
col = vim_strsize(mesg);
if (add_version)
{
STRCPY(vers, mediumVersion);
if (highest_patch())
{
// Check for 9.9x or 9.9xx, alpha/beta version
if (isalpha((int)vers[3]))
{
int len = (isalpha((int)vers[4])) ? 5 : 4;
sprintf((char *)vers + len, ".%d%s", highest_patch(),
mediumVersion + len);
}
else
sprintf((char *)vers + 3, ".%d", highest_patch());
}
col += (int)STRLEN(vers);
}
col = (Columns - col) / 2;
if (col < 0)
col = 0;
// Split up in parts to highlight <> items differently.
for (p = mesg; *p != NUL; p += l)
{
clen = 0;
for (l = 0; p[l] != NUL
&& (l == 0 || (p[l] != '<' && p[l - 1] != '>')); ++l)
{
if (has_mbyte)
{
clen += ptr2cells(p + l);
l += (*mb_ptr2len)(p + l) - 1;
}
else
clen += byte2cells(p[l]);
}
screen_puts_len(p, l, row, col, *p == '<' ? HL_ATTR(HLF_8) : attr);
col += clen;
}
// Add the version number to the version line.
if (add_version)
screen_puts(vers, row, col, 0);
}
/*
* ":intro": clear screen, display intro screen and wait for return.
*/
void
ex_intro(exarg_T *eap UNUSED)
{
screenclear();
intro_message(TRUE);
wait_return(TRUE);
}
|
533513.c | /******************************************************************************
* Copyright (C) 2010 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xqspips_flash_polled_example.c
*
*
* This file contains a design example using the QSPI driver (XQspiPs) in
* polled mode with a serial FLASH device. This examples performs
* some transfers in Auto mode and Manual start mode, to illustrate the modes
* available. It is recommended to use Manual CS + Auto start for
* best performance.
* The hardware which this example runs on. must have a serial FLASH (Numonyx
* N25Q, Winbond W25Q, Spansion S25FL, ISSI IS25WP) for it to run. This example
* has been tested with the Numonyx Serial Flash (N25Q128) and IS25WP series
* flash parts.
*
* @note
*
* None.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- --- -------- -----------------------------------------------
* 1.00 sdm 11/25/10 First release
* 1.01 srt 06/12/12 Changed to meet frequency requirements of READ command
* for CR 663787
* 2.00a kka 22/08/12 Updated the example as XQspiPs_PolledTransfer API has
* changed. Changed the prescalar to use divide by 8.
* The user can change the prescalar to a maximum of
* divide by 2 based on the reference clock in the
* system.
* Set the Holdb_dr bit in the configuration register using
* XQSPIPS_HOLD_B_DRIVE_OPTION. Setting this bit
* drives the HOLD bit of the QSPI controller.
* This is required for QSPI to be used in Non QSPI boot
* mode else there needs to be an external pullup on this
* line.
* See http://www.xilinx.com/support/answers/47596.htm.
* 2.01a sg 02/03/13 Created a function FlashReadID. Removed multiple
* initialization using SetOptions.
* ms 04/05/17 Modified Comment lines in functions to
* recognize it as documentation block for doxygen
* generation.
* 3.5 tjs 07/16/18 Added support for low density ISSI flash parts.
* Added FlashQuadEnable API to enable quad mode in flash.
* 3.6 akm 04/15/19 Modified FlashQuadEnable, FlashWrie and FlashErase APIs,
* to wait for the on going operation to complete before
* performing the next operation.
*</pre>
*
******************************************************************************/
/***************************** Include Files *********************************/
#include "xparameters.h" /* SDK generated parameters */
#include "xqspips.h" /* QSPI device driver */
#include "xil_printf.h"
/************************** Constant Definitions *****************************/
/*
* The following constants map to the XPAR parameters created in the
* xparameters.h file. They are defined here such that a user can easily
* change all the needed parameters in one place.
*/
#define QSPI_DEVICE_ID XPAR_XQSPIPS_0_DEVICE_ID
/*
* The following constants define the commands which may be sent to the FLASH
* device.
*/
#define WRITE_STATUS_CMD 0x01
#define WRITE_CMD 0x02
#define READ_CMD 0x03
#define WRITE_DISABLE_CMD 0x04
#define READ_STATUS_CMD 0x05
#define WRITE_ENABLE_CMD 0x06
#define FAST_READ_CMD 0x0B
#define DUAL_READ_CMD 0x3B
#define QUAD_READ_CMD 0x6B
#define BULK_ERASE_CMD 0xC7
#define SEC_ERASE_CMD 0xD8
#define READ_ID 0x9F
/*
* The following constants define the offsets within a FlashBuffer data
* type for each kind of data. Note that the read data offset is not the
* same as the write data because the QSPI driver is designed to allow full
* duplex transfers such that the number of bytes received is the number
* sent and received.
*/
#define COMMAND_OFFSET 0 /* FLASH instruction */
#define ADDRESS_1_OFFSET 1 /* MSB byte of address to read or write */
#define ADDRESS_2_OFFSET 2 /* Middle byte of address to read or write */
#define ADDRESS_3_OFFSET 3 /* LSB byte of address to read or write */
#define DATA_OFFSET 4 /* Start of Data for Read/Write */
#define DUMMY_OFFSET 4 /* Dummy byte offset for fast, dual and quad
* reads
*/
#define DUMMY_SIZE 1 /* Number of dummy bytes for fast, dual and
* quad reads
*/
#define RD_ID_SIZE 4 /* Read ID command + 3 bytes ID response */
#define BULK_ERASE_SIZE 1 /* Bulk Erase command size */
#define SEC_ERASE_SIZE 4 /* Sector Erase command + Sector address */
/*
* The following constants specify the extra bytes which are sent to the
* FLASH on the QSPI interface, that are not data, but control information
* which includes the command and address
*/
#define OVERHEAD_SIZE 4
/*
* The following constants specify the page size, sector size, and number of
* pages and sectors for the FLASH. The page size specifies a max number of
* bytes that can be written to the FLASH with a single transfer.
*/
#define SECTOR_SIZE 0x10000
#define NUM_SECTORS 0x100
#define NUM_PAGES 0x10000
#define PAGE_SIZE 256
/* Number of flash pages to be written.*/
#define PAGE_COUNT 16
/* Flash address to which data is ot be written.*/
#define TEST_ADDRESS 0x00055000
#define UNIQUE_VALUE 0x05
/*
* The following constants specify the max amount of data and the size of the
* the buffer required to hold the data and overhead to transfer the data to
* and from the FLASH.
*/
#define MAX_DATA (PAGE_COUNT * PAGE_SIZE)
/**************************** Type Definitions *******************************/
/***************** Macros (Inline Functions) Definitions *********************/
/************************** Function Prototypes ******************************/
void FlashErase(XQspiPs *QspiPtr, u32 Address, u32 ByteCount);
void FlashWrite(XQspiPs *QspiPtr, u32 Address, u32 ByteCount, u8 Command);
void FlashRead(XQspiPs *QspiPtr, u32 Address, u32 ByteCount, u8 Command);
int FlashReadID(void);
void FlashQuadEnable(XQspiPs *QspiPtr);
int QspiFlashPolledExample(XQspiPs *QspiInstancePtr, u16 QspiDeviceId);
/************************** Variable Definitions *****************************/
/*
* The instances to support the device drivers are global such that they
* are initialized to zero each time the program runs. They could be local
* but should at least be static so they are zeroed.
*/
static XQspiPs QspiInstance;
/*
* The following variable allows a test value to be added to the values that
* are written to the FLASH such that unique values can be generated to
* guarantee the writes to the FLASH were successful
*/
int Test = 5;
/*
* The following variables are used to read and write to the flash and they
* are global to avoid having large buffers on the stack
*/
u8 ReadBuffer[MAX_DATA + DATA_OFFSET + DUMMY_SIZE];
u8 WriteBuffer[PAGE_SIZE + DATA_OFFSET];
/*****************************************************************************/
/**
*
* Main function to call the QSPI Flash example.
*
* @param None
*
* @return XST_SUCCESS if successful, otherwise XST_FAILURE.
*
* @note None
*
******************************************************************************/
int main(void)
{
int Status;
xil_printf("QSPI FLASH Polled Example Test \r\n");
/* Run the Qspi Interrupt example.*/
Status = QspiFlashPolledExample(&QspiInstance, QSPI_DEVICE_ID);
if (Status != XST_SUCCESS) {
xil_printf("QSPI FLASH Polled Example Test Failed\r\n");
return XST_FAILURE;
}
xil_printf("Successfully ran QSPI FLASH Polled Example Test\r\n");
return XST_SUCCESS;
}
/*****************************************************************************/
/**
*
* The purpose of this function is to illustrate how to use the XQspiPs
* device driver in polled mode. This function writes and reads data
* from a serial FLASH.
*
* @param None.
*
* @return XST_SUCCESS if successful, else XST_FAILURE.
*
* @note None.
*
*****************************************************************************/
int QspiFlashPolledExample(XQspiPs *QspiInstancePtr, u16 QspiDeviceId)
{
int Status;
u8 *BufferPtr;
u8 UniqueValue;
int Count;
int Page;
XQspiPs_Config *QspiConfig;
/* Initialize the QSPI driver so that it's ready to use*/
QspiConfig = XQspiPs_LookupConfig(QspiDeviceId);
if (QspiConfig == NULL) {
return XST_FAILURE;
}
Status = XQspiPs_CfgInitialize(QspiInstancePtr, QspiConfig,
QspiConfig->BaseAddress);
if (Status != XST_SUCCESS) {
return XST_FAILURE;
}
/* Perform a self-test to check hardware build*/
Status = XQspiPs_SelfTest(QspiInstancePtr);
if (Status != XST_SUCCESS) {
return XST_FAILURE;
}
/*
* Initialize the write buffer for a pattern to write to the FLASH
* and the read buffer to zero so it can be verified after the read,
* the test value that is added to the unique value allows the value
* to be changed in a debug environment to guarantee
*/
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < PAGE_SIZE;
Count++, UniqueValue++) {
WriteBuffer[DATA_OFFSET + Count] = (u8)(UniqueValue + Test);
}
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
/*
* Set Manual Start and Manual Chip select options and drive HOLD_B
* pin high.
*/
XQspiPs_SetOptions(QspiInstancePtr, XQSPIPS_MANUAL_START_OPTION |
XQSPIPS_FORCE_SSELECT_OPTION |
XQSPIPS_HOLD_B_DRIVE_OPTION);
/* Set the prescaler for QSPI clock*/
XQspiPs_SetClkPrescaler(QspiInstancePtr, XQSPIPS_CLK_PRESCALE_8);
/* Assert the FLASH chip select.*/
XQspiPs_SetSlaveSelect(QspiInstancePtr);
FlashReadID();
FlashQuadEnable(QspiInstancePtr);
/* Erase the flash.*/
FlashErase(QspiInstancePtr, TEST_ADDRESS, MAX_DATA);
/*
* Write the data in the write buffer to the serial FLASH a page at a
* time, starting from TEST_ADDRESS
*/
for (Page = 0; Page < PAGE_COUNT; Page++) {
FlashWrite(QspiInstancePtr, (Page * PAGE_SIZE) + TEST_ADDRESS,
PAGE_SIZE, WRITE_CMD);
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Normal Read
* command. Change the prescaler as the READ command operates at a
* lower frequency.
*/
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Fast Read
* command
*/
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, FAST_READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET + DUMMY_SIZE];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Dual Read
* command
*/
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, DUAL_READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET + DUMMY_SIZE];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Quad Read
* command
*/
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, QUAD_READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET + DUMMY_SIZE];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Initialize the write buffer for a pattern to write to the FLASH
* and the read buffer to zero so it can be verified after the read,
* the test value that is added to the unique value allows the value
* to be changed in a debug environment to guarantee
*/
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < PAGE_SIZE;
Count++, UniqueValue++) {
WriteBuffer[DATA_OFFSET + Count] = (u8)(UniqueValue + Test);
}
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
/*
* Set Auto Start and Manual Chip select options and drive HOLD_B
* pin high.
*/
XQspiPs_SetOptions(QspiInstancePtr, XQSPIPS_FORCE_SSELECT_OPTION |
XQSPIPS_HOLD_B_DRIVE_OPTION);
/* Erase the flash.*/
FlashErase(QspiInstancePtr, TEST_ADDRESS, MAX_DATA);
/*
* Write the data in the write buffer to the serial FLASH a page at a
* time, starting from TEST_ADDRESS
*/
for (Page = 0; Page < PAGE_COUNT; Page++) {
FlashWrite(QspiInstancePtr, (Page * PAGE_SIZE) + TEST_ADDRESS,
PAGE_SIZE, WRITE_CMD);
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Normal Read
* command. Change the prescaler as the READ command operates at a
* lower frequency.
*/
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Fast Read
* command
*/
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, FAST_READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET + DUMMY_SIZE];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Dual Read
* command
*/
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, DUAL_READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET + DUMMY_SIZE];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
/*
* Read the contents of the FLASH from TEST_ADDRESS, using Quad Read
* command
*/
memset(ReadBuffer, 0x00, sizeof(ReadBuffer));
FlashRead(QspiInstancePtr, TEST_ADDRESS, MAX_DATA, QUAD_READ_CMD);
/*
* Setup a pointer to the start of the data that was read into the read
* buffer and verify the data read is the data that was written
*/
BufferPtr = &ReadBuffer[DATA_OFFSET + DUMMY_SIZE];
for (UniqueValue = UNIQUE_VALUE, Count = 0; Count < MAX_DATA;
Count++, UniqueValue++) {
if (BufferPtr[Count] != (u8)(UniqueValue + Test)) {
return XST_FAILURE;
}
}
return XST_SUCCESS;
}
/*****************************************************************************/
/**
*
* This function writes to the serial FLASH connected to the QSPI interface.
* All the data put into the buffer must be in the same page of the device with
* page boundaries being on 256 byte boundaries.
*
* @param QspiPtr is a pointer to the QSPI driver component to use.
* @param Address contains the address to write data to in the FLASH.
* @param ByteCount contains the number of bytes to write.
* @param Command is the command used to write data to the flash. QSPI
* device supports only Page Program command to write data to the
* flash.
*
* @return None.
*
* @note None.
*
******************************************************************************/
void FlashWrite(XQspiPs *QspiPtr, u32 Address, u32 ByteCount, u8 Command)
{
u8 WriteEnableCmd = { WRITE_ENABLE_CMD };
u8 ReadStatusCmd[] = { READ_STATUS_CMD, 0 }; /* must send 2 bytes */
u8 FlashStatus[2];
/*
* Send the write enable command to the FLASH so that it can be
* written to, this needs to be sent as a separate transfer before
* the write
*/
XQspiPs_PolledTransfer(QspiPtr, &WriteEnableCmd, NULL,
sizeof(WriteEnableCmd));
/*
* Setup the write command with the specified address and data for the
* FLASH
*/
WriteBuffer[COMMAND_OFFSET] = Command;
WriteBuffer[ADDRESS_1_OFFSET] = (u8)((Address & 0xFF0000) >> 16);
WriteBuffer[ADDRESS_2_OFFSET] = (u8)((Address & 0xFF00) >> 8);
WriteBuffer[ADDRESS_3_OFFSET] = (u8)(Address & 0xFF);
/*
* Send the write command, address, and data to the FLASH to be
* written, no receive buffer is specified since there is nothing to
* receive
*/
XQspiPs_PolledTransfer(QspiPtr, WriteBuffer, NULL,
ByteCount + OVERHEAD_SIZE);
/*
* Wait for the write command to the FLASH to be completed, it takes
* some time for the data to be written
*/
while (1) {
/*
* Poll the status register of the FLASH to determine when it
* completes, by sending a read status command and receiving the
* status byte
*/
XQspiPs_PolledTransfer(QspiPtr, ReadStatusCmd, FlashStatus,
sizeof(ReadStatusCmd));
/*
* If the status indicates the write is done, then stop waiting,
* if a value of 0xFF in the status byte is read from the
* device and this loop never exits, the device slave select is
* possibly incorrect such that the device status is not being
* read
*/
FlashStatus[1] |= FlashStatus[0];
if ((FlashStatus[1] & 0x01) == 0) {
break;
}
}
}
/*****************************************************************************/
/**
*
* This function reads from the serial FLASH connected to the
* QSPI interface.
*
* @param QspiPtr is a pointer to the QSPI driver component to use.
* @param Address contains the address to read data from in the FLASH.
* @param ByteCount contains the number of bytes to read.
* @param Command is the command used to read data from the flash. QSPI
* device supports one of the Read, Fast Read, Dual Read and Fast
* Read commands to read data from the flash.
*
* @return None.
*
* @note None.
*
******************************************************************************/
void FlashRead(XQspiPs *QspiPtr, u32 Address, u32 ByteCount, u8 Command)
{
/*
* Setup the write command with the specified address and data for the
* FLASH
*/
WriteBuffer[COMMAND_OFFSET] = Command;
WriteBuffer[ADDRESS_1_OFFSET] = (u8)((Address & 0xFF0000) >> 16);
WriteBuffer[ADDRESS_2_OFFSET] = (u8)((Address & 0xFF00) >> 8);
WriteBuffer[ADDRESS_3_OFFSET] = (u8)(Address & 0xFF);
if ((Command == FAST_READ_CMD) || (Command == DUAL_READ_CMD) ||
(Command == QUAD_READ_CMD)) {
ByteCount += DUMMY_SIZE;
}
/*
* Send the read command to the FLASH to read the specified number
* of bytes from the FLASH, send the read command and address and
* receive the specified number of bytes of data in the data buffer
*/
XQspiPs_PolledTransfer(QspiPtr, WriteBuffer, ReadBuffer,
ByteCount + OVERHEAD_SIZE);
}
/*****************************************************************************/
/**
*
* This function erases the sectors in the serial FLASH connected to the
* QSPI interface.
*
* @param QspiPtr is a pointer to the QSPI driver component to use.
* @param Address contains the address of the first sector which needs to
* be erased.
* @param ByteCount contains the total size to be erased.
*
* @return None.
*
* @note None.
*
******************************************************************************/
void FlashErase(XQspiPs *QspiPtr, u32 Address, u32 ByteCount)
{
u8 WriteEnableCmd = { WRITE_ENABLE_CMD };
u8 ReadStatusCmd[] = { READ_STATUS_CMD, 0 }; /* must send 2 bytes */
u8 FlashStatus[2];
int Sector;
/*
* If erase size is same as the total size of the flash, use bulk erase
* command
*/
if (ByteCount == (NUM_SECTORS * SECTOR_SIZE)) {
/*
* Send the write enable command to the FLASH so that it can be
* written to, this needs to be sent as a separate transfer
* before the erase
*/
XQspiPs_PolledTransfer(QspiPtr, &WriteEnableCmd, NULL,
sizeof(WriteEnableCmd));
/* Setup the bulk erase command*/
WriteBuffer[COMMAND_OFFSET] = BULK_ERASE_CMD;
/*
* Send the bulk erase command; no receive buffer is specified
* since there is nothing to receive
*/
XQspiPs_PolledTransfer(QspiPtr, WriteBuffer, NULL,
BULK_ERASE_SIZE);
/* Wait for the erase command to the FLASH to be completed*/
while (1) {
/*
* Poll the status register of the device to determine
* when it completes, by sending a read status command
* and receiving the status byte
*/
XQspiPs_PolledTransfer(QspiPtr, ReadStatusCmd,
FlashStatus,
sizeof(ReadStatusCmd));
/*
* If the status indicates the write is done, then stop
* waiting; if a value of 0xFF in the status byte is
* read from the device and this loop never exits, the
* device slave select is possibly incorrect such that
* the device status is not being read
*/
FlashStatus[1] |= FlashStatus[0];
if ((FlashStatus[1] & 0x01) == 0) {
break;
}
}
return;
}
/*
* If the erase size is less than the total size of the flash, use
* sector erase command
*/
for (Sector = 0; Sector < ((ByteCount / SECTOR_SIZE) + 1); Sector++) {
/*
* Send the write enable command to the SEEPOM so that it can be
* written to, this needs to be sent as a separate transfer
* before the write
*/
XQspiPs_PolledTransfer(QspiPtr, &WriteEnableCmd, NULL,
sizeof(WriteEnableCmd));
/*
* Setup the write command with the specified address and data
* for the FLASH
*/
WriteBuffer[COMMAND_OFFSET] = SEC_ERASE_CMD;
WriteBuffer[ADDRESS_1_OFFSET] = (u8)(Address >> 16);
WriteBuffer[ADDRESS_2_OFFSET] = (u8)(Address >> 8);
WriteBuffer[ADDRESS_3_OFFSET] = (u8)(Address & 0xFF);
/*
* Send the sector erase command and address; no receive buffer
* is specified since there is nothing to receive
*/
XQspiPs_PolledTransfer(QspiPtr, WriteBuffer, NULL,
SEC_ERASE_SIZE);
/*
* Wait for the sector erse command to the
* FLASH to be completed
*/
while (1) {
/*
* Poll the status register of the device to determine
* when it completes, by sending a read status command
* and receiving the status byte
*/
XQspiPs_PolledTransfer(QspiPtr, ReadStatusCmd,
FlashStatus,
sizeof(ReadStatusCmd));
/*
* If the status indicates the write is done, then stop
* waiting, if a value of 0xFF in the status byte is
* read from the device and this loop never exits, the
* device slave select is possibly incorrect such that
* the device status is not being read
*/
FlashStatus[1] |= FlashStatus[0];
if ((FlashStatus[1] & 0x01) == 0) {
break;
}
}
Address += SECTOR_SIZE;
}
}
/*****************************************************************************/
/**
*
* This function reads serial FLASH ID connected to the SPI interface.
*
* @param None.
*
* @return XST_SUCCESS if read id, otherwise XST_FAILURE.
*
* @note None.
*
******************************************************************************/
int FlashReadID(void)
{
int Status;
/* Read ID in Auto mode.*/
WriteBuffer[COMMAND_OFFSET] = READ_ID;
WriteBuffer[ADDRESS_1_OFFSET] = 0x23; /* 3 dummy bytes */
WriteBuffer[ADDRESS_2_OFFSET] = 0x08;
WriteBuffer[ADDRESS_3_OFFSET] = 0x09;
Status = XQspiPs_PolledTransfer(&QspiInstance, WriteBuffer, ReadBuffer,
RD_ID_SIZE);
if (Status != XST_SUCCESS) {
return XST_FAILURE;
}
xil_printf("FlashID=0x%x 0x%x 0x%x\n\r", ReadBuffer[1], ReadBuffer[2],
ReadBuffer[3]);
return XST_SUCCESS;
}
/*****************************************************************************/
/**
*
* This function enables quad mode in the serial flash connected to the
* SPI interface.
*
* @param QspiPtr is a pointer to the QSPI driver component to use.
*
* @return None.
*
* @note None.
*
******************************************************************************/
void FlashQuadEnable(XQspiPs *QspiPtr)
{
u8 WriteEnableCmd = {WRITE_ENABLE_CMD};
u8 ReadStatusCmd[] = {READ_STATUS_CMD, 0};
u8 QuadEnableCmd[] = {WRITE_STATUS_CMD, 0};
u8 FlashStatus[2];
if (ReadBuffer[1] == 0x9D) {
XQspiPs_PolledTransfer(QspiPtr, ReadStatusCmd,
FlashStatus,
sizeof(ReadStatusCmd));
QuadEnableCmd[1] = FlashStatus[1] | 1 << 6;
XQspiPs_PolledTransfer(QspiPtr, &WriteEnableCmd, NULL,
sizeof(WriteEnableCmd));
XQspiPs_PolledTransfer(QspiPtr, QuadEnableCmd, NULL,
sizeof(QuadEnableCmd));
while (1) {
/*
* Poll the status register of the FLASH to determine when
* Quad Mode is enabled and the device is ready, by sending
* a read status command and receiving the status byte
*/
XQspiPs_PolledTransfer(QspiPtr, ReadStatusCmd, FlashStatus,
sizeof(ReadStatusCmd));
/*
* If 6th bit is set & 0th bit is reset, then Quad is Enabled
* and device is ready.
*/
if ((FlashStatus[0] == 0x40) && (FlashStatus[1] == 0x40)) {
break;
}
}
}
}
|
809929.c | // SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
*
* Copyright (c) 2019, Intel Corporation.
*/
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
#include "mib.h"
/* path manager command handlers */
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo)
{
u8 add_addr = READ_ONCE(msk->pm.addr_signal);
pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
lockdep_assert_held(&msk->pm.lock);
if (add_addr &
(echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo);
return -EINVAL;
}
if (echo) {
msk->pm.remote = *addr;
add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
} else {
msk->pm.local = *addr;
add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
}
WRITE_ONCE(msk->pm.addr_signal, add_addr);
return 0;
}
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
{
u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
if (rm_addr) {
pr_warn("addr_signal error, rm_addr=%d", rm_addr);
return -EINVAL;
}
msk->pm.rm_list_tx = *rm_list;
rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
WRITE_ONCE(msk->pm.addr_signal, rm_addr);
mptcp_pm_nl_addr_send_ack(msk);
return 0;
}
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
{
pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
spin_lock_bh(&msk->pm.lock);
mptcp_pm_nl_rm_subflow_received(msk, rm_list);
spin_unlock_bh(&msk->pm.lock);
return 0;
}
/* path manager event handlers */
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
}
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
unsigned int subflows_max;
int ret = 0;
subflows_max = mptcp_pm_get_subflows_max(msk);
pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
subflows_max, READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
if (!READ_ONCE(pm->accept_subflow))
return false;
spin_lock_bh(&pm->lock);
if (READ_ONCE(pm->accept_subflow)) {
ret = pm->subflows < subflows_max;
if (ret && ++pm->subflows == subflows_max)
WRITE_ONCE(pm->accept_subflow, false);
}
spin_unlock_bh(&pm->lock);
return ret;
}
/* return true if the new status bit is currently cleared, that is, this event
* can be server, eventually by an already scheduled work
*/
static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
enum mptcp_pm_status new_status)
{
pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
BIT(new_status));
if (msk->pm.status & BIT(new_status))
return false;
msk->pm.status |= BIT(new_status);
mptcp_schedule_work((struct sock *)msk);
return true;
}
void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp)
{
struct mptcp_pm_data *pm = &msk->pm;
bool announce = false;
pr_debug("msk=%p", msk);
spin_lock_bh(&pm->lock);
/* mptcp_pm_fully_established() can be invoked by multiple
* racing paths - accept() and check_fully_established()
* be sure to serve this event only once.
*/
if (READ_ONCE(pm->work_pending) &&
!(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
announce = true;
msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
spin_unlock_bh(&pm->lock);
if (announce)
mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp);
}
void mptcp_pm_connection_closed(struct mptcp_sock *msk)
{
pr_debug("msk=%p", msk);
}
void mptcp_pm_subflow_established(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p", msk);
if (!READ_ONCE(pm->work_pending))
return;
spin_lock_bh(&pm->lock);
if (READ_ONCE(pm->work_pending))
mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
spin_unlock_bh(&pm->lock);
}
void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
const struct mptcp_subflow_context *subflow)
{
struct mptcp_pm_data *pm = &msk->pm;
bool update_subflows;
update_subflows = (ssk->sk_state == TCP_CLOSE) &&
(subflow->request_join || subflow->mp_join);
if (!READ_ONCE(pm->work_pending) && !update_subflows)
return;
spin_lock_bh(&pm->lock);
if (update_subflows)
pm->subflows--;
/* Even if this subflow is not really established, tell the PM to try
* to pick the next ones, if possible.
*/
if (mptcp_pm_nl_check_work_pending(msk))
mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
spin_unlock_bh(&pm->lock);
}
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
READ_ONCE(pm->accept_addr));
mptcp_event_addr_announced(msk, addr);
spin_lock_bh(&pm->lock);
if (!READ_ONCE(pm->accept_addr)) {
mptcp_pm_announce_addr(msk, addr, true);
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->remote = *addr;
} else {
__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
spin_unlock_bh(&pm->lock);
}
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p", msk);
spin_lock_bh(&pm->lock);
if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
spin_unlock_bh(&pm->lock);
}
void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
{
if (!mptcp_pm_should_add_signal(msk))
return;
mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
}
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list)
{
struct mptcp_pm_data *pm = &msk->pm;
u8 i;
pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
for (i = 0; i < rm_list->nr; i++)
mptcp_event_addr_removed(msk, rm_list->ids[i]);
spin_lock_bh(&pm->lock);
if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
pm->rm_list_rx = *rm_list;
else
__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
spin_unlock_bh(&pm->lock);
}
void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
subflow->backup = bkup;
mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC);
}
void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
{
pr_debug("fail_seq=%llu", fail_seq);
}
/* path manager helpers */
bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
unsigned int opt_size, unsigned int remaining,
struct mptcp_addr_info *addr, bool *echo,
bool *drop_other_suboptions)
{
int ret = false;
u8 add_addr;
u8 family;
bool port;
spin_lock_bh(&msk->pm.lock);
/* double check after the lock is acquired */
if (!mptcp_pm_should_add_signal(msk))
goto out_unlock;
/* always drop every other options for pure ack ADD_ADDR; this is a
* plain dup-ack from TCP perspective. The other MPTCP-relevant info,
* if any, will be carried by the 'original' TCP ack
*/
if (skb && skb_is_tcp_pure_ack(skb)) {
remaining += opt_size;
*drop_other_suboptions = true;
}
*echo = mptcp_pm_should_add_signal_echo(msk);
port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
family = *echo ? msk->pm.remote.family : msk->pm.local.family;
if (remaining < mptcp_add_addr_len(family, *echo, port))
goto out_unlock;
if (*echo) {
*addr = msk->pm.remote;
add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
} else {
*addr = msk->pm.local;
add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
}
WRITE_ONCE(msk->pm.addr_signal, add_addr);
ret = true;
out_unlock:
spin_unlock_bh(&msk->pm.lock);
return ret;
}
bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
struct mptcp_rm_list *rm_list)
{
int ret = false, len;
u8 rm_addr;
spin_lock_bh(&msk->pm.lock);
/* double check after the lock is acquired */
if (!mptcp_pm_should_rm_signal(msk))
goto out_unlock;
rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
if (len < 0) {
WRITE_ONCE(msk->pm.addr_signal, rm_addr);
goto out_unlock;
}
if (remaining < len)
goto out_unlock;
*rm_list = msk->pm.rm_list_tx;
WRITE_ONCE(msk->pm.addr_signal, rm_addr);
ret = true;
out_unlock:
spin_unlock_bh(&msk->pm.lock);
return ret;
}
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
{
return mptcp_pm_nl_get_local_id(msk, skc);
}
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
/* keep track of rtx periods with no progress */
if (!subflow->stale_count) {
subflow->stale_rcv_tstamp = rcv_tstamp;
subflow->stale_count++;
} else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
if (subflow->stale_count < U8_MAX)
subflow->stale_count++;
mptcp_pm_nl_subflow_chk_stale(msk, ssk);
} else {
subflow->stale_count = 0;
mptcp_subflow_set_active(subflow);
}
}
void mptcp_pm_data_reset(struct mptcp_sock *msk)
{
msk->pm.add_addr_signaled = 0;
msk->pm.add_addr_accepted = 0;
msk->pm.local_addr_used = 0;
msk->pm.subflows = 0;
msk->pm.rm_list_tx.nr = 0;
msk->pm.rm_list_rx.nr = 0;
WRITE_ONCE(msk->pm.work_pending, false);
WRITE_ONCE(msk->pm.addr_signal, 0);
WRITE_ONCE(msk->pm.accept_addr, false);
WRITE_ONCE(msk->pm.accept_subflow, false);
WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
msk->pm.status = 0;
bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
mptcp_pm_nl_data_init(msk);
}
void mptcp_pm_data_init(struct mptcp_sock *msk)
{
spin_lock_init(&msk->pm.lock);
INIT_LIST_HEAD(&msk->pm.anno_list);
mptcp_pm_data_reset(msk);
}
void __init mptcp_pm_init(void)
{
mptcp_pm_nl_init();
}
|
773741.c | #pragma ident "$Id: root.c,v 1.4 2007/10/05 19:37:25 dechavez Exp $"
/*======================================================================
*
* Lookup root directory for a particular app
*
*====================================================================*/
#include "isi/db.h"
#include "util.h"
static BOOL FlatFileLookup(DBIO *db, char *app, char *root)
{
IDAFF_GLOB ffglob;
idaffLookupGlob(db->ff, &ffglob);
strlcpy(root, ffglob.root, MAXPATHLEN+1);
return TRUE;
}
static BOOL MysqlLookup(DBIO *db, char *app, char *root)
{
IDAFF_GLOB ffglob;
char path[MAXPATHLEN+1];
char *home;
static char *isicfg = "isi.cfg";
static char *default_home = "/usr/nrts";
static char *NRTS_HOME = "NRTS_HOME";
if ((home = getenv(NRTS_HOME)) == NULL) home = default_home;
sprintf(path, "%s%cetc%c%s", home, PATH_DELIMITER, PATH_DELIMITER, isicfg);
if (utilFileExists(path)) {
idaffReadGlobalInitFile(path, &ffglob);
} else {
idaffDefaultGlob(&ffglob);
}
strlcpy(root, ffglob.root, MAXPATHLEN);
return TRUE;
}
BOOL isidbLookupRoot(DBIO *db, char *app, char *root)
{
LOGIO *lp;
static char *fid = "isidbLookupRoot";
if (db == NULL || app == NULL || root == NULL) {
lp = (db == NULL) ? NULL : db->lp;
logioMsg(lp, LOG_DEBUG, "%s: NULL input(s)", fid);
errno = EINVAL;
return FALSE;
}
if (db->engine != DBIO_MYSQL) {
return FlatFileLookup(db, app, root);
} else {
return MysqlLookup(db, app, root);
}
}
/* Revision History
*
* $Log: root.c,v $
* Revision 1.4 2007/10/05 19:37:25 dechavez
* added support for NRTS_HOME environment variable
*
* Revision 1.3 2007/01/11 21:59:25 dechavez
* use new isidl and/or isidb function prefixes
*
* Revision 1.2 2007/01/08 16:00:51 dechavez
* switch to size-bounded string operations
*
* Revision 1.1 2006/03/13 22:26:47 dechavez
* initial release (faked MySQL support)
*
*/
|
754007.c | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <[email protected]>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <[email protected]>
*
****************************************************************************/
#include "../wifi.h"
#include "../pci.h"
#include "../usb.h"
#include "../ps.h"
#include "../cam.h"
#include "../stats.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "mac.h"
#include "trx.h"
#include "../rtl8192c/fw_common.h"
#include <linux/module.h>
/* macro to shorten lines */
#define LINK_Q ui_link_quality
#define RX_EVM rx_evm_percentage
#define RX_SIGQ rx_mimo_sig_qual
void rtl92c_read_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
enum version_8192c chip_version = VERSION_UNKNOWN;
const char *versionid;
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (value32 & TRP_VAUX_EN) {
chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
VERSION_TEST_CHIP_88C;
} else {
/* Normal mass production chip. */
chip_version = NORMAL_CHIP;
chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
if (IS_VENDOR_UMC(chip_version))
chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
CHIP_VENDOR_UMC_B_CUT : 0);
if (IS_92C_SERIAL(chip_version)) {
value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
}
}
rtlhal->version = (enum version_8192c)chip_version;
pr_info("Chip version 0x%x\n", chip_version);
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
versionid = "NORMAL_B_CHIP_92C";
break;
case VERSION_NORMAL_TSMC_CHIP_92C:
versionid = "NORMAL_TSMC_CHIP_92C";
break;
case VERSION_NORMAL_TSMC_CHIP_88C:
versionid = "NORMAL_TSMC_CHIP_88C";
break;
case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
versionid = "NORMAL_UMC_CHIP_i92C_1T2R_A_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
versionid = "NORMAL_UMC_CHIP_92C_A_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
versionid = "NORMAL_UMC_CHIP_88C_A_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
versionid = "NORMAL_UMC_CHIP_92C_1T2R_B_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
versionid = "NORMAL_UMC_CHIP_92C_B_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
break;
case VERSION_TEST_CHIP_92C:
versionid = "TEST_CHIP_92C";
break;
case VERSION_TEST_CHIP_88C:
versionid = "TEST_CHIP_88C";
break;
default:
versionid = "UNKNOWN";
break;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Chip Version ID: %s\n", versionid);
if (IS_92C_SERIAL(rtlhal->version))
rtlphy->rf_type =
(IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
else
rtlphy->rf_type = RF_1T1R;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Chip RF Type: %s\n",
rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
rtlhal->version);
}
/**
* writeLLT - LLT table write access
* @io: io callback
* @address: LLT logical address.
* @data: LLT data content
*
* Realtek hardware access function.
*
*/
bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool status = true;
long count = 0;
u32 value = _LLT_INIT_ADDR(address) |
_LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
do {
value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
break;
if (count > POLLING_LLT_THRESHOLD) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n",
address, _LLT_OP_VALUE(value));
status = false;
break;
}
} while (++count);
return status;
}
/**
* rtl92c_init_LLT_table - Init LLT table
* @io: io callback
* @boundary:
*
* Realtek hardware access function.
*
*/
bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
{
bool rst = true;
u32 i;
for (i = 0; i < (boundary - 1); i++) {
rst = rtl92c_llt_write(hw, i , i + 1);
if (true != rst) {
pr_err("===> %s #1 fail\n", __func__);
return rst;
}
}
/* end of list */
rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
if (true != rst) {
pr_err("===> %s #2 fail\n", __func__);
return rst;
}
/* Make the other pages as ring buffer
* This ring buffer is used as beacon buffer if we config this MAC
* as two MAC transfer.
* Otherwise used as local loopback buffer.
*/
for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
rst = rtl92c_llt_write(hw, i, (i + 1));
if (true != rst) {
pr_err("===> %s #3 fail\n", __func__);
return rst;
}
}
/* Let last entry point to the start entry of ring buffer */
rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
if (true != rst) {
pr_err("===> %s #4 fail\n", __func__);
return rst;
}
return rst;
}
void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 *macaddr = p_macaddr;
u32 entry_id = 0;
bool is_pairwise = false;
static u8 cam_const_addr[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
};
static u8 cam_const_broad[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
if (clear_all) {
u8 idx = 0;
u8 cam_offset = 0;
u8 clear_number = 5;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
if (idx < 5) {
memset(rtlpriv->sec.key_buf[idx], 0,
MAX_KEY_LEN);
rtlpriv->sec.key_len[idx] = 0;
}
}
} else {
switch (enc_algo) {
case WEP40_ENCRYPTION:
enc_algo = CAM_WEP40;
break;
case WEP104_ENCRYPTION:
enc_algo = CAM_WEP104;
break;
case TKIP_ENCRYPTION:
enc_algo = CAM_TKIP;
break;
case AESCCMP_ENCRYPTION:
enc_algo = CAM_AES;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"illegal switch case\n");
enc_algo = CAM_TKIP;
break;
}
if (is_wepkey || rtlpriv->sec.use_defaultkey) {
macaddr = cam_const_addr[key_index];
entry_id = key_index;
} else {
if (is_group) {
macaddr = cam_const_broad;
entry_id = key_index;
} else {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv, COMP_SEC,
DBG_EMERG,
"Can not find free hw security cam entry\n");
return;
}
} else {
entry_id = CAM_PAIRWISE_KEY_POSITION;
}
key_index = PAIRWISE_KEYIDX;
is_pairwise = true;
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"delete one entry\n");
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
"The insert KEY length is %d\n",
rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
"The insert KEY is %x %x\n",
rtlpriv->sec.key_buf[0][0],
rtlpriv->sec.key_buf[0][1]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.
key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
PAIRWISE_KEYIDX,
CAM_PAIRWISE_KEY_POSITION,
enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf
[entry_id]);
}
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[entry_id]);
}
}
}
}
u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
}
void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
0xFFFFFFFF);
} else {
rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
0xFFFFFFFF);
}
}
void rtl92c_init_interrupt(struct ieee80211_hw *hw)
{
rtl92c_enable_interrupt(hw);
}
void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
}
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92c_dm_init_edca_turbo(hw);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
}
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
}
int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
{
u8 value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
value = NT_NO_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
value = NT_LINK_AD_HOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
value = NT_LINK_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
value = NT_AS_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Network type %d not supported!\n", type);
return -EOPNOTSUPP;
}
rtl_write_byte(rtlpriv, MSR, value);
return 0;
}
void rtl92c_init_network_type(struct ieee80211_hw *hw)
{
rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
}
void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
{
u16 value16;
u32 value32;
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* Response Rate Set */
value32 = rtl_read_dword(rtlpriv, REG_RRSR);
value32 &= ~RATE_BITMAP_ALL;
value32 |= RATE_RRSR_CCK_ONLY_1M;
rtl_write_dword(rtlpriv, REG_RRSR, value32);
/* SIFS (used in NAV) */
value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16);
/* Retry Limit */
value16 = _LRL(0x30) | _SRL(0x30);
rtl_write_dword(rtlpriv, REG_RL, value16);
}
void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* Set Data Auto Rate Fallback Retry Count register. */
rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404);
rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605);
}
static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
u8 ctx_sifs)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
}
static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
u8 ctx_sifs)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
}
void rtl92c_init_edca_param(struct ieee80211_hw *hw,
u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
{
/* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
* referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
*/
u32 value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
value = (u32)aifs;
value |= ((u32)cw_min & 0xF) << 8;
value |= ((u32)cw_max & 0xF) << 12;
value |= (u32)txop << 16;
/* 92C hardware register sequence is the same as queue number. */
rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
}
void rtl92c_init_edca(struct ieee80211_hw *hw)
{
u16 value16;
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* disable EDCCA count down, to reduce collison and retry */
value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
value16 |= DIS_EDCA_CNT_DWN;
rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
/* Update SIFS timing. ??????????
* pHalData->SifsTime = 0x0e0e0a0a; */
rtl92c_set_cck_sifs(hw, 0xa, 0xa);
rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
/* Set CCK/OFDM SIFS to be 10us. */
rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
/* TXOP */
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
/* PIFS */
rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
/* AGGR BREAK TIME Register */
rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
}
void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
/* init AMPDU aggregation number, tuning for Tx's TP, */
rtl_write_word(rtlpriv, 0x4CA, 0x0708);
}
void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
}
void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
}
void rtl92c_init_retry_function(struct ieee80211_hw *hw)
{
u8 value8;
struct rtl_priv *rtlpriv = rtl_priv(hw);
value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
value8 |= EN_AMPDU_RTY_NEW;
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
/* Set ACK timeout */
rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
}
void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
}
void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
}
/*==============================================================*/
static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
else if (antpower >= 0)
return 100;
else
return 100 + antpower;
}
static u8 _rtl92c_evm_db_to_percentage(s8 value)
{
s8 ret_val;
ret_val = value;
if (ret_val >= 0)
ret_val = 0;
if (ret_val <= -33)
ret_val = -33;
ret_val = 0 - ret_val;
ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
return ret_val;
}
static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
long currsig)
{
long retsig;
if (currsig >= 61 && currsig <= 100)
retsig = 90 + ((currsig - 60) / 4);
else if (currsig >= 41 && currsig <= 60)
retsig = 78 + ((currsig - 40) / 2);
else if (currsig >= 31 && currsig <= 40)
retsig = 66 + (currsig - 30);
else if (currsig >= 21 && currsig <= 30)
retsig = 54 + (currsig - 20);
else if (currsig >= 5 && currsig <= 20)
retsig = 42 + (((currsig - 5) * 2) / 3);
else if (currsig == 4)
retsig = 36;
else if (currsig == 3)
retsig = 27;
else if (currsig == 2)
retsig = 18;
else if (currsig == 1)
retsig = 9;
else
retsig = currsig;
return retsig;
}
static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *p_desc,
struct rx_fwinfo_92c *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct phy_sts_cck_8192s_t *cck_buf;
s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, pwdb_all;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
bool in_powersavemode = false;
bool is_cck_rate;
u8 *pdesc = (u8 *)p_desc;
is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc->rxmcs);
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
pstats->packet_beacon = packet_beacon;
pstats->is_cck = is_cck_rate;
pstats->RX_SIGQ[0] = -1;
pstats->RX_SIGQ[1] = -1;
if (is_cck_rate) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (!in_powersavemode)
cck_highpwr = rtlphy->cck_high_power;
else
cck_highpwr = false;
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
case 0x3:
rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
break;
case 0x2:
rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
break;
case 0x1:
rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
break;
case 0x0:
rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
break;
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
case 0x3:
rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x2:
rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x1:
rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x0:
rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
break;
}
}
pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
u8 sq;
if (pstats->rx_pwdb_all > 40)
sq = 100;
else {
sq = cck_buf->sq_rpt;
if (sq > 64)
sq = 0;
else if (sq < 20)
sq = 100;
else
sq = ((64 - sq) * 100) / 44;
}
pstats->signalquality = sq;
pstats->RX_SIGQ[0] = sq;
pstats->RX_SIGQ[1] = -1;
}
} else {
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
if (rtlpriv->dm.rfpath_rxenable[i])
rf_rx_num++;
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
if (packet_match_bssid)
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
if (GET_RX_DESC_RX_MCS(pdesc) &&
GET_RX_DESC_RX_MCS(pdesc) >= DESC_RATEMCS8 &&
GET_RX_DESC_RX_MCS(pdesc) <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
for (i = 0; i < max_spatial_stream; i++) {
evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
if (packet_match_bssid) {
if (i == 0)
pstats->signalquality =
(u8) (evm & 0xff);
pstats->RX_SIGQ[i] =
(u8) (evm & 0xff);
}
}
}
if (is_cck_rate)
pstats->signalstrength =
(u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
(u8) (_rtl92c_signal_scale_mapping
(hw, total_rssi /= rf_rx_num));
}
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
struct rx_fwinfo_92c *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
__le16 fc;
u16 type, cpu_fc;
bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
fc = hdr->frame_control;
cpu_fc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
packet_matchbssid =
((IEEE80211_FTYPE_CTL != type) &&
ether_addr_equal(mac->bssid,
(cpu_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
(cpu_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
hdr->addr3) &&
(!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
packet_toself = packet_matchbssid &&
ether_addr_equal(praddr, rtlefuse->dev_addr);
if (ieee80211_is_beacon(fc))
packet_beacon = true;
_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself,
packet_beacon);
rtl_process_phyinfo(hw, tmp_buf, pstats);
}
|
342963.c | // Monocypher version 3.0.0
//
// This file is dual-licensed. Choose whichever licence you want from
// the two licences listed below.
//
// The first licence is a regular 2-clause BSD licence. The second licence
// is the CC-0 from Creative Commons. It is intended to release Monocypher
// to the public domain. The BSD licence serves as a fallback option.
//
// SPDX-License-Identifier: BSD-2-Clause OR CC0-1.0
//
// ------------------------------------------------------------------------
//
// Copyright (c) 2017-2020, Loup Vaillant
// All rights reserved.
//
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ------------------------------------------------------------------------
//
// Written in 2017-2020 by Loup Vaillant
//
// To the extent possible under law, the author(s) have dedicated all copyright
// and related neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication along
// with this software. If not, see
// <https://creativecommons.org/publicdomain/zero/1.0/>
#include "monocypher.h"
/////////////////
/// Utilities ///
/////////////////
#define FOR_T(type, i, start, end) for (type i = (start); i < (end); i++)
#define FOR(i, start, end) FOR_T(size_t, i, start, end)
#define WIPE_CTX(ctx) crypto_wipe(ctx , sizeof(*(ctx)))
#define WIPE_BUFFER(buffer) crypto_wipe(buffer, sizeof(buffer))
#define MIN(a, b) ((a) <= (b) ? (a) : (b))
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
#define ALIGN(x, block_size) ((~(x) + 1) & ((block_size) - 1))
typedef int8_t i8;
typedef uint8_t u8;
typedef int16_t i16;
typedef uint32_t u32;
typedef int32_t i32;
typedef int64_t i64;
typedef uint64_t u64;
static const u8 zero[128] = {0};
static u32 load24_le(const u8 s[3])
{
return (u32)s[0]
| ((u32)s[1] << 8)
| ((u32)s[2] << 16);
}
static u32 load32_le(const u8 s[4])
{
return (u32)s[0]
| ((u32)s[1] << 8)
| ((u32)s[2] << 16)
| ((u32)s[3] << 24);
}
static u64 load64_le(const u8 s[8])
{
return load32_le(s) | ((u64)load32_le(s+4) << 32);
}
static void store32_le(u8 out[4], u32 in)
{
out[0] = in & 0xff;
out[1] = (in >> 8) & 0xff;
out[2] = (in >> 16) & 0xff;
out[3] = (in >> 24) & 0xff;
}
static void store64_le(u8 out[8], u64 in)
{
store32_le(out , (u32)in );
store32_le(out + 4, in >> 32);
}
static u64 rotr64(u64 x, u64 n) { return (x >> n) ^ (x << (64 - n)); }
static u32 rotl32(u32 x, u32 n) { return (x << n) ^ (x >> (32 - n)); }
static int neq0(u64 diff)
{ // constant time comparison to zero
// return diff != 0 ? -1 : 0
u64 half = (diff >> 32) | ((u32)diff);
return (1 & ((half - 1) >> 32)) - 1;
}
static u64 x16(const u8 a[16], const u8 b[16])
{
return (load64_le(a + 0) ^ load64_le(b + 0))
| (load64_le(a + 8) ^ load64_le(b + 8));
}
static u64 x32(const u8 a[32],const u8 b[32]){return x16(a,b)| x16(a+16, b+16);}
static u64 x64(const u8 a[64],const u8 b[64]){return x32(a,b)| x32(a+32, b+32);}
int crypto_verify16(const u8 a[16], const u8 b[16]){ return neq0(x16(a, b)); }
int crypto_verify32(const u8 a[32], const u8 b[32]){ return neq0(x32(a, b)); }
int crypto_verify64(const u8 a[64], const u8 b[64]){ return neq0(x64(a, b)); }
static int zerocmp32(const u8 p[32])
{
return crypto_verify32(p, zero);
}
void crypto_wipe(void *secret, size_t size)
{
volatile u8 *v_secret = (u8*)secret;
FOR (i, 0, size) {
v_secret[i] = 0;
}
}
/////////////////
/// Chacha 20 ///
/////////////////
#define QUARTERROUND(a, b, c, d) \
a += b; d = rotl32(d ^ a, 16); \
c += d; b = rotl32(b ^ c, 12); \
a += b; d = rotl32(d ^ a, 8); \
c += d; b = rotl32(b ^ c, 7)
static void chacha20_rounds(u32 out[16], const u32 in[16])
{
// The temporary variables make Chacha20 10% faster.
u32 t0 = in[ 0]; u32 t1 = in[ 1]; u32 t2 = in[ 2]; u32 t3 = in[ 3];
u32 t4 = in[ 4]; u32 t5 = in[ 5]; u32 t6 = in[ 6]; u32 t7 = in[ 7];
u32 t8 = in[ 8]; u32 t9 = in[ 9]; u32 t10 = in[10]; u32 t11 = in[11];
u32 t12 = in[12]; u32 t13 = in[13]; u32 t14 = in[14]; u32 t15 = in[15];
FOR (i, 0, 10) { // 20 rounds, 2 rounds per loop.
QUARTERROUND(t0, t4, t8 , t12); // column 0
QUARTERROUND(t1, t5, t9 , t13); // column 1
QUARTERROUND(t2, t6, t10, t14); // column 2
QUARTERROUND(t3, t7, t11, t15); // column 3
QUARTERROUND(t0, t5, t10, t15); // diagonal 0
QUARTERROUND(t1, t6, t11, t12); // diagonal 1
QUARTERROUND(t2, t7, t8 , t13); // diagonal 2
QUARTERROUND(t3, t4, t9 , t14); // diagonal 3
}
out[ 0] = t0; out[ 1] = t1; out[ 2] = t2; out[ 3] = t3;
out[ 4] = t4; out[ 5] = t5; out[ 6] = t6; out[ 7] = t7;
out[ 8] = t8; out[ 9] = t9; out[10] = t10; out[11] = t11;
out[12] = t12; out[13] = t13; out[14] = t14; out[15] = t15;
}
static void chacha20_init_key(u32 block[16], const u8 key[32])
{
// constant
block[0] = load32_le((const u8*)"expa");
block[1] = load32_le((const u8*)"nd 3");
block[2] = load32_le((const u8*)"2-by");
block[3] = load32_le((const u8*)"te k");
// key
FOR (i, 0, 8) {
block[i+4] = load32_le(key + i*4);
}
}
static u64 chacha20_core(u32 input[16], u8 *cipher_text, const u8 *plain_text,
size_t text_size)
{
// Whole blocks
u32 pool[16];
size_t nb_blocks = text_size >> 6;
FOR (i, 0, nb_blocks) {
chacha20_rounds(pool, input);
if (plain_text != 0) {
FOR (j, 0, 16) {
u32 p = pool[j] + input[j];
store32_le(cipher_text, p ^ load32_le(plain_text));
cipher_text += 4;
plain_text += 4;
}
} else {
FOR (j, 0, 16) {
u32 p = pool[j] + input[j];
store32_le(cipher_text, p);
cipher_text += 4;
}
}
input[12]++;
if (input[12] == 0) {
input[13]++;
}
}
text_size &= 63;
// Last (incomplete) block
if (text_size > 0) {
if (plain_text == 0) {
plain_text = zero;
}
chacha20_rounds(pool, input);
u8 tmp[64];
FOR (i, 0, 16) {
store32_le(tmp + i*4, pool[i] + input[i]);
}
FOR (i, 0, text_size) {
cipher_text[i] = tmp[i] ^ plain_text[i];
}
WIPE_BUFFER(tmp);
}
WIPE_BUFFER(pool);
return input[12] + ((u64)input[13] << 32) + (text_size > 0);
}
void crypto_hchacha20(u8 out[32], const u8 key[32], const u8 in [16])
{
u32 block[16];
chacha20_init_key(block, key);
// input
FOR (i, 0, 4) {
block[i+12] = load32_le(in + i*4);
}
chacha20_rounds(block, block);
// prevents reversal of the rounds by revealing only half of the buffer.
FOR (i, 0, 4) {
store32_le(out + i*4, block[i ]); // constant
store32_le(out + 16 + i*4, block[i + 12]); // counter and nonce
}
WIPE_BUFFER(block);
}
u64 crypto_chacha20_ctr(u8 *cipher_text, const u8 *plain_text,
size_t text_size, const u8 key[32], const u8 nonce[8],
u64 ctr)
{
u32 input[16];
chacha20_init_key(input, key);
input[12] = (u32) ctr;
input[13] = (u32)(ctr >> 32);
input[14] = load32_le(nonce);
input[15] = load32_le(nonce + 4);
ctr = chacha20_core(input, cipher_text, plain_text, text_size);
WIPE_BUFFER(input);
return ctr;
}
u32 crypto_ietf_chacha20_ctr(u8 *cipher_text, const u8 *plain_text,
size_t text_size,
const u8 key[32], const u8 nonce[12], u32 ctr)
{
u32 input[16];
chacha20_init_key(input, key);
input[12] = (u32) ctr;
input[13] = load32_le(nonce);
input[14] = load32_le(nonce + 4);
input[15] = load32_le(nonce + 8);
ctr = (u32)chacha20_core(input, cipher_text, plain_text, text_size);
WIPE_BUFFER(input);
return ctr;
}
u64 crypto_xchacha20_ctr(u8 *cipher_text, const u8 *plain_text,
size_t text_size,
const u8 key[32], const u8 nonce[24], u64 ctr)
{
u8 sub_key[32];
crypto_hchacha20(sub_key, key, nonce);
ctr = crypto_chacha20_ctr(cipher_text, plain_text, text_size,
sub_key, nonce+16, ctr);
WIPE_BUFFER(sub_key);
return ctr;
}
void crypto_chacha20(u8 *cipher_text, const u8 *plain_text, size_t text_size,
const u8 key[32], const u8 nonce[8])
{
crypto_chacha20_ctr(cipher_text, plain_text, text_size, key, nonce, 0);
}
void crypto_ietf_chacha20(u8 *cipher_text, const u8 *plain_text,
size_t text_size,
const u8 key[32], const u8 nonce[12])
{
crypto_ietf_chacha20_ctr(cipher_text, plain_text, text_size, key, nonce, 0);
}
void crypto_xchacha20(u8 *cipher_text, const u8 *plain_text, size_t text_size,
const u8 key[32], const u8 nonce[24])
{
crypto_xchacha20_ctr(cipher_text, plain_text, text_size, key, nonce, 0);
}
/////////////////
/// Poly 1305 ///
/////////////////
// h = (h + c) * r
// preconditions:
// ctx->h <= 4_ffffffff_ffffffff_ffffffff_ffffffff
// ctx->c <= 1_ffffffff_ffffffff_ffffffff_ffffffff
// ctx->r <= 0ffffffc_0ffffffc_0ffffffc_0fffffff
// Postcondition:
// ctx->h <= 4_ffffffff_ffffffff_ffffffff_ffffffff
static void poly_block(crypto_poly1305_ctx *ctx)
{
// s = h + c, without carry propagation
const u64 s0 = ctx->h[0] + (u64)ctx->c[0]; // s0 <= 1_fffffffe
const u64 s1 = ctx->h[1] + (u64)ctx->c[1]; // s1 <= 1_fffffffe
const u64 s2 = ctx->h[2] + (u64)ctx->c[2]; // s2 <= 1_fffffffe
const u64 s3 = ctx->h[3] + (u64)ctx->c[3]; // s3 <= 1_fffffffe
const u32 s4 = ctx->h[4] + ctx->c[4]; // s4 <= 5
// Local all the things!
const u32 r0 = ctx->r[0]; // r0 <= 0fffffff
const u32 r1 = ctx->r[1]; // r1 <= 0ffffffc
const u32 r2 = ctx->r[2]; // r2 <= 0ffffffc
const u32 r3 = ctx->r[3]; // r3 <= 0ffffffc
const u32 rr0 = (r0 >> 2) * 5; // rr0 <= 13fffffb // lose 2 bits...
const u32 rr1 = (r1 >> 2) + r1; // rr1 <= 13fffffb // rr1 == (r1 >> 2) * 5
const u32 rr2 = (r2 >> 2) + r2; // rr2 <= 13fffffb // rr1 == (r2 >> 2) * 5
const u32 rr3 = (r3 >> 2) + r3; // rr3 <= 13fffffb // rr1 == (r3 >> 2) * 5
// (h + c) * r, without carry propagation
const u64 x0 = s0*r0+ s1*rr3+ s2*rr2+ s3*rr1+ s4*rr0; // <= 97ffffe007fffff8
const u64 x1 = s0*r1+ s1*r0 + s2*rr3+ s3*rr2+ s4*rr1; // <= 8fffffe20ffffff6
const u64 x2 = s0*r2+ s1*r1 + s2*r0 + s3*rr3+ s4*rr2; // <= 87ffffe417fffff4
const u64 x3 = s0*r3+ s1*r2 + s2*r1 + s3*r0 + s4*rr3; // <= 7fffffe61ffffff2
const u32 x4 = s4 * (r0 & 3); // ...recover 2 bits // <= f
// partial reduction modulo 2^130 - 5
const u32 u5 = x4 + (x3 >> 32); // u5 <= 7ffffff5
const u64 u0 = (u5 >> 2) * 5 + (x0 & 0xffffffff);
const u64 u1 = (u0 >> 32) + (x1 & 0xffffffff) + (x0 >> 32);
const u64 u2 = (u1 >> 32) + (x2 & 0xffffffff) + (x1 >> 32);
const u64 u3 = (u2 >> 32) + (x3 & 0xffffffff) + (x2 >> 32);
const u64 u4 = (u3 >> 32) + (u5 & 3);
// Update the hash
ctx->h[0] = (u32)u0; // u0 <= 1_9ffffff0
ctx->h[1] = (u32)u1; // u1 <= 1_97ffffe0
ctx->h[2] = (u32)u2; // u2 <= 1_8fffffe2
ctx->h[3] = (u32)u3; // u3 <= 1_87ffffe4
ctx->h[4] = (u32)u4; // u4 <= 4
}
// (re-)initialises the input counter and input buffer
static void poly_clear_c(crypto_poly1305_ctx *ctx)
{
FOR (i, 0, 4) {
ctx->c[i] = 0;
}
ctx->c_idx = 0;
}
static void poly_take_input(crypto_poly1305_ctx *ctx, u8 input)
{
size_t word = ctx->c_idx >> 2;
size_t byte = ctx->c_idx & 3;
ctx->c[word] |= (u32)input << (byte * 8);
ctx->c_idx++;
}
static void poly_update(crypto_poly1305_ctx *ctx,
const u8 *message, size_t message_size)
{
FOR (i, 0, message_size) {
poly_take_input(ctx, message[i]);
if (ctx->c_idx == 16) {
poly_block(ctx);
poly_clear_c(ctx);
}
}
}
void crypto_poly1305_init(crypto_poly1305_ctx *ctx, const u8 key[32])
{
// Initial hash is zero
FOR (i, 0, 5) {
ctx->h[i] = 0;
}
// add 2^130 to every input block
ctx->c[4] = 1;
poly_clear_c(ctx);
// load r and pad (r has some of its bits cleared)
FOR (i, 0, 1) { ctx->r [0] = load32_le(key ) & 0x0fffffff; }
FOR (i, 1, 4) { ctx->r [i] = load32_le(key + i*4 ) & 0x0ffffffc; }
FOR (i, 0, 4) { ctx->pad[i] = load32_le(key + i*4 + 16); }
}
void crypto_poly1305_update(crypto_poly1305_ctx *ctx,
const u8 *message, size_t message_size)
{
// Align ourselves with block boundaries
size_t align = MIN(ALIGN(ctx->c_idx, 16), message_size);
poly_update(ctx, message, align);
message += align;
message_size -= align;
// Process the message block by block
size_t nb_blocks = message_size >> 4;
FOR (i, 0, nb_blocks) {
FOR (j, 0, 4) {
ctx->c[j] = load32_le(message + j*4);
}
poly_block(ctx);
message += 16;
}
if (nb_blocks > 0) {
poly_clear_c(ctx);
}
message_size &= 15;
// remaining bytes
poly_update(ctx, message, message_size);
}
void crypto_poly1305_final(crypto_poly1305_ctx *ctx, u8 mac[16])
{
// Process the last block (if any)
if (ctx->c_idx != 0) {
// move the final 1 according to remaining input length
// (We may add less than 2^130 to the last input block)
ctx->c[4] = 0;
poly_take_input(ctx, 1);
// one last hash update
poly_block(ctx);
}
// check if we should subtract 2^130-5 by performing the
// corresponding carry propagation.
const u64 u0 = (u64)5 + ctx->h[0]; // <= 1_00000004
const u64 u1 = (u0 >> 32) + ctx->h[1]; // <= 1_00000000
const u64 u2 = (u1 >> 32) + ctx->h[2]; // <= 1_00000000
const u64 u3 = (u2 >> 32) + ctx->h[3]; // <= 1_00000000
const u64 u4 = (u3 >> 32) + ctx->h[4]; // <= 5
// u4 indicates how many times we should subtract 2^130-5 (0 or 1)
// h + pad, minus 2^130-5 if u4 exceeds 3
const u64 uu0 = (u4 >> 2) * 5 + ctx->h[0] + ctx->pad[0]; // <= 2_00000003
const u64 uu1 = (uu0 >> 32) + ctx->h[1] + ctx->pad[1]; // <= 2_00000000
const u64 uu2 = (uu1 >> 32) + ctx->h[2] + ctx->pad[2]; // <= 2_00000000
const u64 uu3 = (uu2 >> 32) + ctx->h[3] + ctx->pad[3]; // <= 2_00000000
store32_le(mac , (u32)uu0);
store32_le(mac + 4, (u32)uu1);
store32_le(mac + 8, (u32)uu2);
store32_le(mac + 12, (u32)uu3);
WIPE_CTX(ctx);
}
void crypto_poly1305(u8 mac[16], const u8 *message,
size_t message_size, const u8 key[32])
{
crypto_poly1305_ctx ctx;
crypto_poly1305_init (&ctx, key);
crypto_poly1305_update(&ctx, message, message_size);
crypto_poly1305_final (&ctx, mac);
}
////////////////
/// Blake2 b ///
////////////////
static const u64 iv[8] = {
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
// increment the input offset
static void blake2b_incr(crypto_blake2b_ctx *ctx)
{
u64 *x = ctx->input_offset;
size_t y = ctx->input_idx;
x[0] += y;
if (x[0] < y) {
x[1]++;
}
}
static void blake2b_compress(crypto_blake2b_ctx *ctx, int is_last_block)
{
static const u8 sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
};
// init work vector
u64 v0 = ctx->hash[0]; u64 v8 = iv[0];
u64 v1 = ctx->hash[1]; u64 v9 = iv[1];
u64 v2 = ctx->hash[2]; u64 v10 = iv[2];
u64 v3 = ctx->hash[3]; u64 v11 = iv[3];
u64 v4 = ctx->hash[4]; u64 v12 = iv[4] ^ ctx->input_offset[0];
u64 v5 = ctx->hash[5]; u64 v13 = iv[5] ^ ctx->input_offset[1];
u64 v6 = ctx->hash[6]; u64 v14 = iv[6] ^ (u64)~(is_last_block - 1);
u64 v7 = ctx->hash[7]; u64 v15 = iv[7];
// mangle work vector
u64 *input = ctx->input;
#define BLAKE2_G(v, a, b, c, d, x, y) \
v##a += v##b + x; v##d = rotr64(v##d ^ v##a, 32); \
v##c += v##d; v##b = rotr64(v##b ^ v##c, 24); \
v##a += v##b + y; v##d = rotr64(v##d ^ v##a, 16); \
v##c += v##d; v##b = rotr64(v##b ^ v##c, 63)
#define BLAKE2_ROUND(i) \
BLAKE2_G(v, 0, 4, 8, 12, input[sigma[i][ 0]], input[sigma[i][ 1]]);\
BLAKE2_G(v, 1, 5, 9, 13, input[sigma[i][ 2]], input[sigma[i][ 3]]);\
BLAKE2_G(v, 2, 6, 10, 14, input[sigma[i][ 4]], input[sigma[i][ 5]]);\
BLAKE2_G(v, 3, 7, 11, 15, input[sigma[i][ 6]], input[sigma[i][ 7]]);\
BLAKE2_G(v, 0, 5, 10, 15, input[sigma[i][ 8]], input[sigma[i][ 9]]);\
BLAKE2_G(v, 1, 6, 11, 12, input[sigma[i][10]], input[sigma[i][11]]);\
BLAKE2_G(v, 2, 7, 8, 13, input[sigma[i][12]], input[sigma[i][13]]);\
BLAKE2_G(v, 3, 4, 9, 14, input[sigma[i][14]], input[sigma[i][15]])
#ifdef BLAKE2_NO_UNROLLING
FOR (i, 0, 12) {
BLAKE2_ROUND(i);
}
#else
BLAKE2_ROUND(0); BLAKE2_ROUND(1); BLAKE2_ROUND(2); BLAKE2_ROUND(3);
BLAKE2_ROUND(4); BLAKE2_ROUND(5); BLAKE2_ROUND(6); BLAKE2_ROUND(7);
BLAKE2_ROUND(8); BLAKE2_ROUND(9); BLAKE2_ROUND(0); BLAKE2_ROUND(1);
#endif
// update hash
ctx->hash[0] ^= v0 ^ v8;
ctx->hash[1] ^= v1 ^ v9;
ctx->hash[2] ^= v2 ^ v10;
ctx->hash[3] ^= v3 ^ v11;
ctx->hash[4] ^= v4 ^ v12;
ctx->hash[5] ^= v5 ^ v13;
ctx->hash[6] ^= v6 ^ v14;
ctx->hash[7] ^= v7 ^ v15;
}
static void blake2b_set_input(crypto_blake2b_ctx *ctx, u8 input, size_t index)
{
if (index == 0) {
FOR (i, 0, 16) {
ctx->input[i] = 0;
}
}
size_t word = index >> 3;
size_t byte = index & 7;
ctx->input[word] |= (u64)input << (byte << 3);
}
static void blake2b_end_block(crypto_blake2b_ctx *ctx)
{
if (ctx->input_idx == 128) { // If buffer is full,
blake2b_incr(ctx); // update the input offset
blake2b_compress(ctx, 0); // and compress the (not last) block
ctx->input_idx = 0;
}
}
static void blake2b_update(crypto_blake2b_ctx *ctx,
const u8 *message, size_t message_size)
{
FOR (i, 0, message_size) {
blake2b_end_block(ctx);
blake2b_set_input(ctx, message[i], ctx->input_idx);
ctx->input_idx++;
}
}
void crypto_blake2b_general_init(crypto_blake2b_ctx *ctx, size_t hash_size,
const u8 *key, size_t key_size)
{
// initial hash
FOR (i, 0, 8) {
ctx->hash[i] = iv[i];
}
ctx->hash[0] ^= 0x01010000 ^ (key_size << 8) ^ hash_size;
ctx->input_offset[0] = 0; // beginning of the input, no offset
ctx->input_offset[1] = 0; // beginning of the input, no offset
ctx->hash_size = hash_size; // remember the hash size we want
ctx->input_idx = 0;
// if there is a key, the first block is that key (padded with zeroes)
if (key_size > 0) {
crypto_blake2b_update(ctx, key , key_size);
crypto_blake2b_update(ctx, zero, 128 - key_size);
}
}
void crypto_blake2b_init(crypto_blake2b_ctx *ctx)
{
crypto_blake2b_general_init(ctx, 64, 0, 0);
}
void crypto_blake2b_update(crypto_blake2b_ctx *ctx,
const u8 *message, size_t message_size)
{
// Align ourselves with block boundaries
size_t align = MIN(ALIGN(ctx->input_idx, 128), message_size);
blake2b_update(ctx, message, align);
message += align;
message_size -= align;
// Process the message block by block
FOR (i, 0, message_size >> 7) { // number of blocks
blake2b_end_block(ctx);
FOR (j, 0, 16) {
ctx->input[j] = load64_le(message + j*8);
}
message += 128;
ctx->input_idx = 128;
}
message_size &= 127;
// remaining bytes
blake2b_update(ctx, message, message_size);
}
void crypto_blake2b_final(crypto_blake2b_ctx *ctx, u8 *hash)
{
// Pad the end of the block with zeroes
FOR (i, ctx->input_idx, 128) {
blake2b_set_input(ctx, 0, i);
}
blake2b_incr(ctx); // update the input offset
blake2b_compress(ctx, 1); // compress the last block
size_t nb_words = ctx->hash_size >> 3;
FOR (i, 0, nb_words) {
store64_le(hash + i*8, ctx->hash[i]);
}
FOR (i, nb_words << 3, ctx->hash_size) {
hash[i] = (ctx->hash[i >> 3] >> (8 * (i & 7))) & 0xff;
}
WIPE_CTX(ctx);
}
void crypto_blake2b_general(u8 *hash , size_t hash_size,
const u8 *key , size_t key_size,
const u8 *message, size_t message_size)
{
crypto_blake2b_ctx ctx;
crypto_blake2b_general_init(&ctx, hash_size, key, key_size);
crypto_blake2b_update(&ctx, message, message_size);
crypto_blake2b_final(&ctx, hash);
}
void crypto_blake2b(u8 hash[64], const u8 *message, size_t message_size)
{
crypto_blake2b_general(hash, 64, 0, 0, message, message_size);
}
static void blake2b_vtable_init(void *ctx)
{
crypto_blake2b_init(&((crypto_sign_ctx*)ctx)->hash);
}
static void blake2b_vtable_update(void *ctx, const u8 *m, size_t s)
{
crypto_blake2b_update(&((crypto_sign_ctx*)ctx)->hash, m, s);
}
static void blake2b_vtable_final(void *ctx, u8 *h)
{
crypto_blake2b_final(&((crypto_sign_ctx*)ctx)->hash, h);
}
const crypto_sign_vtable crypto_blake2b_vtable = {
crypto_blake2b,
blake2b_vtable_init,
blake2b_vtable_update,
blake2b_vtable_final,
sizeof(crypto_sign_ctx),
};
////////////////
/// Argon2 i ///
////////////////
// references to R, Z, Q etc. come from the spec
// Argon2 operates on 1024 byte blocks.
typedef struct { u64 a[128]; } block;
static void wipe_block(block *b)
{
volatile u64* a = b->a;
FOR (i, 0, 128) {
a[i] = 0;
}
}
// updates a Blake2 hash with a 32 bit word, little endian.
static void blake_update_32(crypto_blake2b_ctx *ctx, u32 input)
{
u8 buf[4];
store32_le(buf, input);
crypto_blake2b_update(ctx, buf, 4);
WIPE_BUFFER(buf);
}
static void load_block(block *b, const u8 bytes[1024])
{
FOR (i, 0, 128) {
b->a[i] = load64_le(bytes + i*8);
}
}
static void store_block(u8 bytes[1024], const block *b)
{
FOR (i, 0, 128) {
store64_le(bytes + i*8, b->a[i]);
}
}
static void copy_block(block *o,const block*in){FOR(i,0,128)o->a[i] = in->a[i];}
static void xor_block(block *o,const block*in){FOR(i,0,128)o->a[i]^= in->a[i];}
// Hash with a virtually unlimited digest size.
// Doesn't extract more entropy than the base hash function.
// Mainly used for filling a whole kilobyte block with pseudo-random bytes.
// (One could use a stream cipher with a seed hash as the key, but
// this would introduce another dependency —and point of failure.)
static void extended_hash(u8 *digest, u32 digest_size,
const u8 *input , u32 input_size)
{
crypto_blake2b_ctx ctx;
crypto_blake2b_general_init(&ctx, MIN(digest_size, 64), 0, 0);
blake_update_32 (&ctx, digest_size);
crypto_blake2b_update (&ctx, input, input_size);
crypto_blake2b_final (&ctx, digest);
if (digest_size > 64) {
// the conversion to u64 avoids integer overflow on
// ludicrously big hash sizes.
u32 r = (u32)(((u64)digest_size + 31) >> 5) - 2;
u32 i = 1;
u32 in = 0;
u32 out = 32;
while (i < r) {
// Input and output overlap. This is intentional
crypto_blake2b(digest + out, digest + in, 64);
i += 1;
in += 32;
out += 32;
}
crypto_blake2b_general(digest + out, digest_size - (32 * r),
0, 0, // no key
digest + in , 64);
}
}
#define LSB(x) ((x) & 0xffffffff)
#define G(a, b, c, d) \
a += b + 2 * LSB(a) * LSB(b); d ^= a; d = rotr64(d, 32); \
c += d + 2 * LSB(c) * LSB(d); b ^= c; b = rotr64(b, 24); \
a += b + 2 * LSB(a) * LSB(b); d ^= a; d = rotr64(d, 16); \
c += d + 2 * LSB(c) * LSB(d); b ^= c; b = rotr64(b, 63)
#define ROUND(v0, v1, v2, v3, v4, v5, v6, v7, \
v8, v9, v10, v11, v12, v13, v14, v15) \
G(v0, v4, v8, v12); G(v1, v5, v9, v13); \
G(v2, v6, v10, v14); G(v3, v7, v11, v15); \
G(v0, v5, v10, v15); G(v1, v6, v11, v12); \
G(v2, v7, v8, v13); G(v3, v4, v9, v14)
// Core of the compression function G. Computes Z from R in place.
static void g_rounds(block *work_block)
{
// column rounds (work_block = Q)
for (int i = 0; i < 128; i += 16) {
ROUND(work_block->a[i ], work_block->a[i + 1],
work_block->a[i + 2], work_block->a[i + 3],
work_block->a[i + 4], work_block->a[i + 5],
work_block->a[i + 6], work_block->a[i + 7],
work_block->a[i + 8], work_block->a[i + 9],
work_block->a[i + 10], work_block->a[i + 11],
work_block->a[i + 12], work_block->a[i + 13],
work_block->a[i + 14], work_block->a[i + 15]);
}
// row rounds (work_block = Z)
for (int i = 0; i < 16; i += 2) {
ROUND(work_block->a[i ], work_block->a[i + 1],
work_block->a[i + 16], work_block->a[i + 17],
work_block->a[i + 32], work_block->a[i + 33],
work_block->a[i + 48], work_block->a[i + 49],
work_block->a[i + 64], work_block->a[i + 65],
work_block->a[i + 80], work_block->a[i + 81],
work_block->a[i + 96], work_block->a[i + 97],
work_block->a[i + 112], work_block->a[i + 113]);
}
}
// The compression function G (copy version for the first pass)
static void g_copy(block *result, const block *x, const block *y, block* tmp)
{
copy_block(tmp , x ); // tmp = X
xor_block (tmp , y ); // tmp = X ^ Y = R
copy_block(result, tmp); // result = R (only difference with g_xor)
g_rounds (tmp); // tmp = Z
xor_block (result, tmp); // result = R ^ Z
}
// The compression function G (xor version for subsequent passes)
static void g_xor(block *result, const block *x, const block *y, block *tmp)
{
copy_block(tmp , x ); // tmp = X
xor_block (tmp , y ); // tmp = X ^ Y = R
xor_block (result, tmp); // result = R ^ old (only difference with g_copy)
g_rounds (tmp); // tmp = Z
xor_block (result, tmp); // result = R ^ old ^ Z
}
// Unary version of the compression function.
// The missing argument is implied zero.
// Does the transformation in place.
static void unary_g(block *work_block, block *tmp)
{
// work_block == R
copy_block(tmp, work_block); // tmp = R
g_rounds (work_block); // work_block = Z
xor_block (work_block, tmp); // work_block = Z ^ R
}
// Argon2i uses a kind of stream cipher to determine which reference
// block it will take to synthesise the next block. This context hold
// that stream's state. (It's very similar to Chacha20. The block b
// is analogous to Chacha's own pool)
typedef struct {
block b;
u32 pass_number;
u32 slice_number;
u32 nb_blocks;
u32 nb_iterations;
u32 ctr;
u32 offset;
} gidx_ctx;
// The block in the context will determine array indices. To avoid
// timing attacks, it only depends on public information. No looking
// at a previous block to seed the next. This makes offline attacks
// easier, but timing attacks are the bigger threat in many settings.
static void gidx_refresh(gidx_ctx *ctx)
{
// seed the beginning of the block...
ctx->b.a[0] = ctx->pass_number;
ctx->b.a[1] = 0; // lane number (we have only one)
ctx->b.a[2] = ctx->slice_number;
ctx->b.a[3] = ctx->nb_blocks;
ctx->b.a[4] = ctx->nb_iterations;
ctx->b.a[5] = 1; // type: Argon2i
ctx->b.a[6] = ctx->ctr;
FOR (i, 7, 128) { ctx->b.a[i] = 0; } // ...then zero the rest out
// Shuffle the block thus: ctx->b = G((G(ctx->b, zero)), zero)
// (G "square" function), to get cheap pseudo-random numbers.
block tmp;
unary_g(&ctx->b, &tmp);
unary_g(&ctx->b, &tmp);
wipe_block(&tmp);
}
static void gidx_init(gidx_ctx *ctx,
u32 pass_number, u32 slice_number,
u32 nb_blocks, u32 nb_iterations)
{
ctx->pass_number = pass_number;
ctx->slice_number = slice_number;
ctx->nb_blocks = nb_blocks;
ctx->nb_iterations = nb_iterations;
ctx->ctr = 0;
// Offset from the beginning of the segment. For the first slice
// of the first pass, we start at the *third* block, so the offset
// starts at 2, not 0.
if (pass_number != 0 || slice_number != 0) {
ctx->offset = 0;
} else {
ctx->offset = 2;
ctx->ctr++; // Compensates for missed lazy creation
gidx_refresh(ctx); // at the start of gidx_next()
}
}
static u32 gidx_next(gidx_ctx *ctx)
{
// lazily creates the offset block we need
if ((ctx->offset & 127) == 0) {
ctx->ctr++;
gidx_refresh(ctx);
}
u32 index = ctx->offset & 127; // save index for current call
u32 offset = ctx->offset; // save offset for current call
ctx->offset++; // update offset for next call
// Computes the area size.
// Pass 0 : all already finished segments plus already constructed
// blocks in this segment
// Pass 1+: 3 last segments plus already constructed
// blocks in this segment. THE SPEC SUGGESTS OTHERWISE.
// I CONFORM TO THE REFERENCE IMPLEMENTATION.
int first_pass = ctx->pass_number == 0;
u32 slice_size = ctx->nb_blocks >> 2;
u32 nb_segments = first_pass ? ctx->slice_number : 3;
u32 area_size = nb_segments * slice_size + offset - 1;
// Computes the starting position of the reference area.
// CONTRARY TO WHAT THE SPEC SUGGESTS, IT STARTS AT THE
// NEXT SEGMENT, NOT THE NEXT BLOCK.
u32 next_slice = ((ctx->slice_number + 1) & 3) * slice_size;
u32 start_pos = first_pass ? 0 : next_slice;
// Generate offset from J1 (no need for J2, there's only one lane)
u64 j1 = ctx->b.a[index] & 0xffffffff; // pseudo-random number
u64 x = (j1 * j1) >> 32;
u64 y = (area_size * x) >> 32;
u64 z = (area_size - 1) - y;
return (start_pos + z) % ctx->nb_blocks;
}
// Main algorithm
void crypto_argon2i_general(u8 *hash, u32 hash_size,
void *work_area, u32 nb_blocks,
u32 nb_iterations,
const u8 *password, u32 password_size,
const u8 *salt, u32 salt_size,
const u8 *key, u32 key_size,
const u8 *ad, u32 ad_size)
{
// work area seen as blocks (must be suitably aligned)
block *blocks = (block*)work_area;
{
crypto_blake2b_ctx ctx;
crypto_blake2b_init(&ctx);
blake_update_32 (&ctx, 1 ); // p: number of threads
blake_update_32 (&ctx, hash_size );
blake_update_32 (&ctx, nb_blocks );
blake_update_32 (&ctx, nb_iterations);
blake_update_32 (&ctx, 0x13 ); // v: version number
blake_update_32 (&ctx, 1 ); // y: Argon2i
blake_update_32 (&ctx, password_size);
crypto_blake2b_update(&ctx, password, password_size);
blake_update_32 (&ctx, salt_size);
crypto_blake2b_update(&ctx, salt, salt_size);
blake_update_32 (&ctx, key_size);
crypto_blake2b_update(&ctx, key, key_size);
blake_update_32 (&ctx, ad_size);
crypto_blake2b_update(&ctx, ad, ad_size);
u8 initial_hash[72]; // 64 bytes plus 2 words for future hashes
crypto_blake2b_final(&ctx, initial_hash);
// fill first 2 blocks
block tmp_block;
u8 hash_area[1024];
store32_le(initial_hash + 64, 0); // first additional word
store32_le(initial_hash + 68, 0); // second additional word
extended_hash(hash_area, 1024, initial_hash, 72);
load_block(&tmp_block, hash_area);
copy_block(blocks, &tmp_block);
store32_le(initial_hash + 64, 1); // slight modification
extended_hash(hash_area, 1024, initial_hash, 72);
load_block(&tmp_block, hash_area);
copy_block(blocks + 1, &tmp_block);
WIPE_BUFFER(initial_hash);
WIPE_BUFFER(hash_area);
wipe_block(&tmp_block);
}
// Actual number of blocks
nb_blocks -= nb_blocks & 3; // round down to 4 p (p == 1 thread)
const u32 segment_size = nb_blocks >> 2;
// fill (then re-fill) the rest of the blocks
block tmp;
gidx_ctx ctx; // public information, no need to wipe
FOR_T (u32, pass_number, 0, nb_iterations) {
int first_pass = pass_number == 0;
FOR_T (u32, segment, 0, 4) {
gidx_init(&ctx, pass_number, segment, nb_blocks, nb_iterations);
// On the first segment of the first pass,
// blocks 0 and 1 are already filled.
// We use the offset to skip them.
u32 start_offset = first_pass && segment == 0 ? 2 : 0;
u32 segment_start = segment * segment_size + start_offset;
u32 segment_end = (segment + 1) * segment_size;
FOR_T (u32, current_block, segment_start, segment_end) {
u32 reference_block = gidx_next(&ctx);
u32 previous_block = current_block == 0
? nb_blocks - 1
: current_block - 1;
block *c = blocks + current_block;
block *p = blocks + previous_block;
block *r = blocks + reference_block;
if (first_pass) { g_copy(c, p, r, &tmp); }
else { g_xor (c, p, r, &tmp); }
}
}
}
wipe_block(&tmp);
// hash the very last block with H' into the output hash
u8 final_block[1024];
store_block(final_block, blocks + (nb_blocks - 1));
extended_hash(hash, hash_size, final_block, 1024);
WIPE_BUFFER(final_block);
// wipe work area
volatile u64 *p = (u64*)work_area;
FOR (i, 0, 128 * nb_blocks) {
p[i] = 0;
}
}
void crypto_argon2i(u8 *hash, u32 hash_size,
void *work_area, u32 nb_blocks,
u32 nb_iterations,
const u8 *password, u32 password_size,
const u8 *salt, u32 salt_size)
{
crypto_argon2i_general(hash, hash_size,
work_area, nb_blocks, nb_iterations,
password, password_size,
salt , salt_size,
0, 0, 0, 0);
}
////////////////////////////////////
/// Arithmetic modulo 2^255 - 19 ///
////////////////////////////////////
// Taken from SUPERCOP's ref10 implementation.
// A bit bigger than TweetNaCl, over 4 times faster.
// field element
typedef i32 fe[10];
static void fe_0(fe h) { FOR(i, 0, 10) h[i] = 0; }
static void fe_1(fe h) { h[0] = 1; FOR(i, 1, 10) h[i] = 0; }
static void fe_copy(fe h,const fe f ){FOR(i,0,10) h[i] = f[i]; }
static void fe_neg (fe h,const fe f ){FOR(i,0,10) h[i] = -f[i]; }
static void fe_add (fe h,const fe f,const fe g){FOR(i,0,10) h[i] = f[i] + g[i];}
static void fe_sub (fe h,const fe f,const fe g){FOR(i,0,10) h[i] = f[i] - g[i];}
static void fe_cswap(fe f, fe g, int b)
{
i32 mask = -b; // -1 = 0xffffffff
FOR (i, 0, 10) {
i32 x = (f[i] ^ g[i]) & mask;
f[i] = f[i] ^ x;
g[i] = g[i] ^ x;
}
}
static void fe_ccopy(fe f, const fe g, int b)
{
i32 mask = -b; // -1 = 0xffffffff
FOR (i, 0, 10) {
i32 x = (f[i] ^ g[i]) & mask;
f[i] = f[i] ^ x;
}
}
#define FE_CARRY \
i64 c0, c1, c2, c3, c4, c5, c6, c7, c8, c9; \
c9 = (t9 + (i64)(1<<24)) >> 25; t0 += c9 * 19; t9 -= c9 * (1 << 25); \
c1 = (t1 + (i64)(1<<24)) >> 25; t2 += c1; t1 -= c1 * (1 << 25); \
c3 = (t3 + (i64)(1<<24)) >> 25; t4 += c3; t3 -= c3 * (1 << 25); \
c5 = (t5 + (i64)(1<<24)) >> 25; t6 += c5; t5 -= c5 * (1 << 25); \
c7 = (t7 + (i64)(1<<24)) >> 25; t8 += c7; t7 -= c7 * (1 << 25); \
c0 = (t0 + (i64)(1<<25)) >> 26; t1 += c0; t0 -= c0 * (1 << 26); \
c2 = (t2 + (i64)(1<<25)) >> 26; t3 += c2; t2 -= c2 * (1 << 26); \
c4 = (t4 + (i64)(1<<25)) >> 26; t5 += c4; t4 -= c4 * (1 << 26); \
c6 = (t6 + (i64)(1<<25)) >> 26; t7 += c6; t6 -= c6 * (1 << 26); \
c8 = (t8 + (i64)(1<<25)) >> 26; t9 += c8; t8 -= c8 * (1 << 26); \
h[0]=(i32)t0; h[1]=(i32)t1; h[2]=(i32)t2; h[3]=(i32)t3; h[4]=(i32)t4; \
h[5]=(i32)t5; h[6]=(i32)t6; h[7]=(i32)t7; h[8]=(i32)t8; h[9]=(i32)t9
static void fe_frombytes(fe h, const u8 s[32])
{
i64 t0 = load32_le(s);
i64 t1 = load24_le(s + 4) << 6;
i64 t2 = load24_le(s + 7) << 5;
i64 t3 = load24_le(s + 10) << 3;
i64 t4 = load24_le(s + 13) << 2;
i64 t5 = load32_le(s + 16);
i64 t6 = load24_le(s + 20) << 7;
i64 t7 = load24_le(s + 23) << 5;
i64 t8 = load24_le(s + 26) << 4;
i64 t9 = (load24_le(s + 29) & 0x7fffff) << 2;
FE_CARRY;
}
// multiply a field element by a signed 32-bit integer
static void fe_mul_small(fe h, const fe f, i32 g)
{
i64 t0 = f[0] * (i64) g; i64 t1 = f[1] * (i64) g;
i64 t2 = f[2] * (i64) g; i64 t3 = f[3] * (i64) g;
i64 t4 = f[4] * (i64) g; i64 t5 = f[5] * (i64) g;
i64 t6 = f[6] * (i64) g; i64 t7 = f[7] * (i64) g;
i64 t8 = f[8] * (i64) g; i64 t9 = f[9] * (i64) g;
FE_CARRY;
}
static void fe_mul121666(fe h, const fe f) { fe_mul_small(h, f, 121666); }
static void fe_mul(fe h, const fe f, const fe g)
{
// Everything is unrolled and put in temporary variables.
// We could roll the loop, but that would make curve25519 twice as slow.
i32 f0 = f[0]; i32 f1 = f[1]; i32 f2 = f[2]; i32 f3 = f[3]; i32 f4 = f[4];
i32 f5 = f[5]; i32 f6 = f[6]; i32 f7 = f[7]; i32 f8 = f[8]; i32 f9 = f[9];
i32 g0 = g[0]; i32 g1 = g[1]; i32 g2 = g[2]; i32 g3 = g[3]; i32 g4 = g[4];
i32 g5 = g[5]; i32 g6 = g[6]; i32 g7 = g[7]; i32 g8 = g[8]; i32 g9 = g[9];
i32 F1 = f1*2; i32 F3 = f3*2; i32 F5 = f5*2; i32 F7 = f7*2; i32 F9 = f9*2;
i32 G1 = g1*19; i32 G2 = g2*19; i32 G3 = g3*19;
i32 G4 = g4*19; i32 G5 = g5*19; i32 G6 = g6*19;
i32 G7 = g7*19; i32 G8 = g8*19; i32 G9 = g9*19;
i64 h0 = f0*(i64)g0 + F1*(i64)G9 + f2*(i64)G8 + F3*(i64)G7 + f4*(i64)G6
+ F5*(i64)G5 + f6*(i64)G4 + F7*(i64)G3 + f8*(i64)G2 + F9*(i64)G1;
i64 h1 = f0*(i64)g1 + f1*(i64)g0 + f2*(i64)G9 + f3*(i64)G8 + f4*(i64)G7
+ f5*(i64)G6 + f6*(i64)G5 + f7*(i64)G4 + f8*(i64)G3 + f9*(i64)G2;
i64 h2 = f0*(i64)g2 + F1*(i64)g1 + f2*(i64)g0 + F3*(i64)G9 + f4*(i64)G8
+ F5*(i64)G7 + f6*(i64)G6 + F7*(i64)G5 + f8*(i64)G4 + F9*(i64)G3;
i64 h3 = f0*(i64)g3 + f1*(i64)g2 + f2*(i64)g1 + f3*(i64)g0 + f4*(i64)G9
+ f5*(i64)G8 + f6*(i64)G7 + f7*(i64)G6 + f8*(i64)G5 + f9*(i64)G4;
i64 h4 = f0*(i64)g4 + F1*(i64)g3 + f2*(i64)g2 + F3*(i64)g1 + f4*(i64)g0
+ F5*(i64)G9 + f6*(i64)G8 + F7*(i64)G7 + f8*(i64)G6 + F9*(i64)G5;
i64 h5 = f0*(i64)g5 + f1*(i64)g4 + f2*(i64)g3 + f3*(i64)g2 + f4*(i64)g1
+ f5*(i64)g0 + f6*(i64)G9 + f7*(i64)G8 + f8*(i64)G7 + f9*(i64)G6;
i64 h6 = f0*(i64)g6 + F1*(i64)g5 + f2*(i64)g4 + F3*(i64)g3 + f4*(i64)g2
+ F5*(i64)g1 + f6*(i64)g0 + F7*(i64)G9 + f8*(i64)G8 + F9*(i64)G7;
i64 h7 = f0*(i64)g7 + f1*(i64)g6 + f2*(i64)g5 + f3*(i64)g4 + f4*(i64)g3
+ f5*(i64)g2 + f6*(i64)g1 + f7*(i64)g0 + f8*(i64)G9 + f9*(i64)G8;
i64 h8 = f0*(i64)g8 + F1*(i64)g7 + f2*(i64)g6 + F3*(i64)g5 + f4*(i64)g4
+ F5*(i64)g3 + f6*(i64)g2 + F7*(i64)g1 + f8*(i64)g0 + F9*(i64)G9;
i64 h9 = f0*(i64)g9 + f1*(i64)g8 + f2*(i64)g7 + f3*(i64)g6 + f4*(i64)g5
+ f5*(i64)g4 + f6*(i64)g3 + f7*(i64)g2 + f8*(i64)g1 + f9*(i64)g0;
#define CARRY \
i64 c0, c1, c2, c3, c4, c5, c6, c7, c8, c9; \
c0 = (h0 + (i64) (1<<25)) >> 26; h1 += c0; h0 -= c0 * (1 << 26); \
c4 = (h4 + (i64) (1<<25)) >> 26; h5 += c4; h4 -= c4 * (1 << 26); \
c1 = (h1 + (i64) (1<<24)) >> 25; h2 += c1; h1 -= c1 * (1 << 25); \
c5 = (h5 + (i64) (1<<24)) >> 25; h6 += c5; h5 -= c5 * (1 << 25); \
c2 = (h2 + (i64) (1<<25)) >> 26; h3 += c2; h2 -= c2 * (1 << 26); \
c6 = (h6 + (i64) (1<<25)) >> 26; h7 += c6; h6 -= c6 * (1 << 26); \
c3 = (h3 + (i64) (1<<24)) >> 25; h4 += c3; h3 -= c3 * (1 << 25); \
c7 = (h7 + (i64) (1<<24)) >> 25; h8 += c7; h7 -= c7 * (1 << 25); \
c4 = (h4 + (i64) (1<<25)) >> 26; h5 += c4; h4 -= c4 * (1 << 26); \
c8 = (h8 + (i64) (1<<25)) >> 26; h9 += c8; h8 -= c8 * (1 << 26); \
c9 = (h9 + (i64) (1<<24)) >> 25; h0 += c9 * 19; h9 -= c9 * (1 << 25); \
c0 = (h0 + (i64) (1<<25)) >> 26; h1 += c0; h0 -= c0 * (1 << 26); \
h[0]=(i32)h0; h[1]=(i32)h1; h[2]=(i32)h2; h[3]=(i32)h3; h[4]=(i32)h4; \
h[5]=(i32)h5; h[6]=(i32)h6; h[7]=(i32)h7; h[8]=(i32)h8; h[9]=(i32)h9
CARRY;
}
// we could use fe_mul() for this, but this is significantly faster
static void fe_sq(fe h, const fe f)
{
i32 f0 = f[0]; i32 f1 = f[1]; i32 f2 = f[2]; i32 f3 = f[3]; i32 f4 = f[4];
i32 f5 = f[5]; i32 f6 = f[6]; i32 f7 = f[7]; i32 f8 = f[8]; i32 f9 = f[9];
i32 f0_2 = f0*2; i32 f1_2 = f1*2; i32 f2_2 = f2*2; i32 f3_2 = f3*2;
i32 f4_2 = f4*2; i32 f5_2 = f5*2; i32 f6_2 = f6*2; i32 f7_2 = f7*2;
i32 f5_38 = f5*38; i32 f6_19 = f6*19; i32 f7_38 = f7*38;
i32 f8_19 = f8*19; i32 f9_38 = f9*38;
i64 h0 = f0 *(i64)f0 + f1_2*(i64)f9_38 + f2_2*(i64)f8_19
+ f3_2*(i64)f7_38 + f4_2*(i64)f6_19 + f5 *(i64)f5_38;
i64 h1 = f0_2*(i64)f1 + f2 *(i64)f9_38 + f3_2*(i64)f8_19
+ f4 *(i64)f7_38 + f5_2*(i64)f6_19;
i64 h2 = f0_2*(i64)f2 + f1_2*(i64)f1 + f3_2*(i64)f9_38
+ f4_2*(i64)f8_19 + f5_2*(i64)f7_38 + f6 *(i64)f6_19;
i64 h3 = f0_2*(i64)f3 + f1_2*(i64)f2 + f4 *(i64)f9_38
+ f5_2*(i64)f8_19 + f6 *(i64)f7_38;
i64 h4 = f0_2*(i64)f4 + f1_2*(i64)f3_2 + f2 *(i64)f2
+ f5_2*(i64)f9_38 + f6_2*(i64)f8_19 + f7 *(i64)f7_38;
i64 h5 = f0_2*(i64)f5 + f1_2*(i64)f4 + f2_2*(i64)f3
+ f6 *(i64)f9_38 + f7_2*(i64)f8_19;
i64 h6 = f0_2*(i64)f6 + f1_2*(i64)f5_2 + f2_2*(i64)f4
+ f3_2*(i64)f3 + f7_2*(i64)f9_38 + f8 *(i64)f8_19;
i64 h7 = f0_2*(i64)f7 + f1_2*(i64)f6 + f2_2*(i64)f5
+ f3_2*(i64)f4 + f8 *(i64)f9_38;
i64 h8 = f0_2*(i64)f8 + f1_2*(i64)f7_2 + f2_2*(i64)f6
+ f3_2*(i64)f5_2 + f4 *(i64)f4 + f9 *(i64)f9_38;
i64 h9 = f0_2*(i64)f9 + f1_2*(i64)f8 + f2_2*(i64)f7
+ f3_2*(i64)f6 + f4 *(i64)f5_2;
CARRY;
}
// h = 2 * (f^2)
static void fe_sq2(fe h, const fe f)
{
fe_sq(h, f);
fe_mul_small(h, h, 2);
}
// This could be simplified, but it would be slower
static void fe_pow22523(fe out, const fe z)
{
fe t0, t1, t2;
fe_sq(t0, z);
fe_sq(t1,t0); fe_sq(t1, t1); fe_mul(t1, z, t1);
fe_mul(t0, t0, t1);
fe_sq(t0, t0); fe_mul(t0, t1, t0);
fe_sq(t1, t0); FOR (i, 1, 5) fe_sq(t1, t1); fe_mul(t0, t1, t0);
fe_sq(t1, t0); FOR (i, 1, 10) fe_sq(t1, t1); fe_mul(t1, t1, t0);
fe_sq(t2, t1); FOR (i, 1, 20) fe_sq(t2, t2); fe_mul(t1, t2, t1);
fe_sq(t1, t1); FOR (i, 1, 10) fe_sq(t1, t1); fe_mul(t0, t1, t0);
fe_sq(t1, t0); FOR (i, 1, 50) fe_sq(t1, t1); fe_mul(t1, t1, t0);
fe_sq(t2, t1); FOR (i, 1, 100) fe_sq(t2, t2); fe_mul(t1, t2, t1);
fe_sq(t1, t1); FOR (i, 1, 50) fe_sq(t1, t1); fe_mul(t0, t1, t0);
fe_sq(t0, t0); FOR (i, 1, 2) fe_sq(t0, t0); fe_mul(out, t0, z);
WIPE_BUFFER(t0);
WIPE_BUFFER(t1);
WIPE_BUFFER(t2);
}
// Inverting means multiplying by 2^255 - 21
// 2^255 - 21 = (2^252 - 3) * 8 + 3
// So we reuse the multiplication chain of fe_pow22523
static void fe_invert(fe out, const fe z)
{
fe tmp;
fe_pow22523(tmp, z);
// tmp2^8 * z^3
fe_sq(tmp, tmp); // 0
fe_sq(tmp, tmp); fe_mul(tmp, tmp, z); // 1
fe_sq(tmp, tmp); fe_mul(out, tmp, z); // 1
WIPE_BUFFER(tmp);
}
static void fe_tobytes(u8 s[32], const fe h)
{
i32 t[10];
FOR (i, 0, 10) {
t[i] = h[i];
}
i32 q = (19 * t[9] + (((i32) 1) << 24)) >> 25;
FOR (i, 0, 5) {
q += t[2*i ]; q >>= 26;
q += t[2*i+1]; q >>= 25;
}
t[0] += 19 * q;
i32 c0 = t[0] >> 26; t[1] += c0; t[0] -= c0 * (1 << 26);
i32 c1 = t[1] >> 25; t[2] += c1; t[1] -= c1 * (1 << 25);
i32 c2 = t[2] >> 26; t[3] += c2; t[2] -= c2 * (1 << 26);
i32 c3 = t[3] >> 25; t[4] += c3; t[3] -= c3 * (1 << 25);
i32 c4 = t[4] >> 26; t[5] += c4; t[4] -= c4 * (1 << 26);
i32 c5 = t[5] >> 25; t[6] += c5; t[5] -= c5 * (1 << 25);
i32 c6 = t[6] >> 26; t[7] += c6; t[6] -= c6 * (1 << 26);
i32 c7 = t[7] >> 25; t[8] += c7; t[7] -= c7 * (1 << 25);
i32 c8 = t[8] >> 26; t[9] += c8; t[8] -= c8 * (1 << 26);
i32 c9 = t[9] >> 25; t[9] -= c9 * (1 << 25);
store32_le(s + 0, ((u32)t[0] >> 0) | ((u32)t[1] << 26));
store32_le(s + 4, ((u32)t[1] >> 6) | ((u32)t[2] << 19));
store32_le(s + 8, ((u32)t[2] >> 13) | ((u32)t[3] << 13));
store32_le(s + 12, ((u32)t[3] >> 19) | ((u32)t[4] << 6));
store32_le(s + 16, ((u32)t[5] >> 0) | ((u32)t[6] << 25));
store32_le(s + 20, ((u32)t[6] >> 7) | ((u32)t[7] << 19));
store32_le(s + 24, ((u32)t[7] >> 13) | ((u32)t[8] << 12));
store32_le(s + 28, ((u32)t[8] >> 20) | ((u32)t[9] << 6));
WIPE_BUFFER(t);
}
// Parity check. Returns 0 if even, 1 if odd
static int fe_isnegative(const fe f)
{
u8 s[32];
fe_tobytes(s, f);
u8 isneg = s[0] & 1;
WIPE_BUFFER(s);
return isneg;
}
static int fe_isnonzero(const fe f)
{
u8 s[32];
fe_tobytes(s, f);
int isnonzero = zerocmp32(s);
WIPE_BUFFER(s);
return isnonzero;
}
// trim a scalar for scalar multiplication
static void trim_scalar(u8 s[32])
{
s[ 0] &= 248;
s[31] &= 127;
s[31] |= 64;
}
// get bit from scalar at position i
static int scalar_bit(const u8 s[32], int i) {
if (i < 0) { return 0; } // handle -1 for sliding windows
return (s[i>>3] >> (i&7)) & 1;
}
///////////////
/// X-25519 /// Taken from SUPERCOP's ref10 implementation.
///////////////
void crypto_x25519(u8 raw_shared_secret[32],
const u8 your_secret_key [32],
const u8 their_public_key [32])
{
// computes the scalar product
fe x1;
fe_frombytes(x1, their_public_key);
// restrict the possible scalar values
u8 e[32];
FOR (i, 0, 32) {
e[i] = your_secret_key[i];
}
trim_scalar(e);
// computes the actual scalar product (the result is in x2 and z2)
fe x2, z2, x3, z3, t0, t1;
// Montgomery ladder
// In projective coordinates, to avoid divisions: x = X / Z
// We don't care about the y coordinate, it's only 1 bit of information
fe_1(x2); fe_0(z2); // "zero" point
fe_copy(x3, x1); fe_1(z3); // "one" point
int swap = 0;
for (int pos = 254; pos >= 0; --pos) {
// constant time conditional swap before ladder step
int b = scalar_bit(e, pos);
swap ^= b; // xor trick avoids swapping at the end of the loop
fe_cswap(x2, x3, swap);
fe_cswap(z2, z3, swap);
swap = b; // anticipates one last swap after the loop
// Montgomery ladder step: replaces (P2, P3) by (P2*2, P2+P3)
// with differential addition
fe_sub(t0, x3, z3); fe_sub(t1, x2, z2); fe_add(x2, x2, z2);
fe_add(z2, x3, z3); fe_mul(z3, t0, x2); fe_mul(z2, z2, t1);
fe_sq (t0, t1 ); fe_sq (t1, x2 ); fe_add(x3, z3, z2);
fe_sub(z2, z3, z2); fe_mul(x2, t1, t0); fe_sub(t1, t1, t0);
fe_sq (z2, z2 ); fe_mul121666(z3, t1); fe_sq (x3, x3 );
fe_add(t0, t0, z3); fe_mul(z3, x1, z2); fe_mul(z2, t1, t0);
}
// last swap is necessary to compensate for the xor trick
// Note: after this swap, P3 == P2 + P1.
fe_cswap(x2, x3, swap);
fe_cswap(z2, z3, swap);
// normalises the coordinates: x == X / Z
fe_invert(z2, z2);
fe_mul(x2, x2, z2);
fe_tobytes(raw_shared_secret, x2);
WIPE_BUFFER(x1); WIPE_BUFFER(e );
WIPE_BUFFER(x2); WIPE_BUFFER(z2);
WIPE_BUFFER(x3); WIPE_BUFFER(z3);
WIPE_BUFFER(t0); WIPE_BUFFER(t1);
}
void crypto_x25519_public_key(u8 public_key[32],
const u8 secret_key[32])
{
static const u8 base_point[32] = {9};
crypto_x25519(public_key, secret_key, base_point);
}
///////////////
/// Ed25519 ///
///////////////
static const i64 L[32] = { 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
// r = x mod L (little-endian)
static void modL(u8 *r, i64 x[64])
{
for (unsigned i = 63; i >= 32; i--) {
i64 carry = 0;
FOR (j, i-32, i-12) {
x[j] += carry - 16 * x[i] * L[j - (i - 32)];
carry = (x[j] + 128) >> 8;
x[j] -= carry * (1 << 8);
}
x[i-12] += carry;
x[i] = 0;
}
i64 carry = 0;
FOR (i, 0, 32) {
x[i] += carry - (x[31] >> 4) * L[i];
carry = x[i] >> 8;
x[i] &= 255;
}
FOR (i, 0, 32) {
x[i] -= carry * L[i];
}
FOR (i, 0, 32) {
x[i+1] += x[i] >> 8;
r[i ] = x[i] & 255;
}
}
// Reduces a 64-byte hash modulo L (little endian)
static void reduce(u8 r[64])
{
i64 x[64];
FOR (i, 0, 64) {
x[i] = (i64)(u64)r[i]; // preserve unsigned
r[i] = 0;
}
modL(r, x);
WIPE_BUFFER(x);
}
// r = (a * b) + c
static void mul_add(u8 r[32], const u8 a[32], const u8 b[32], const u8 c[32])
{
i64 s[64];
FOR (i, 0, 32) { s[i] = (i64)(u64)c[i]; } // preserve unsigned
FOR (i, 32, 64) { s[i] = 0; }
FOR (i, 0, 32) {
FOR (j, 0, 32) {
s[i+j] += a[i] * (u64)b[j];
}
}
modL(r, s);
WIPE_BUFFER(s);
}
// Variable time! a must not be secret!
static int is_above_L(const u8 a[32])
{
for (int i = 31; i >= 0; i--) {
if (a[i] > L[i]) { return 1; }
if (a[i] < L[i]) { return 0; }
}
return 1;
}
// Point (group element, ge) in a twisted Edwards curve,
// in extended projective coordinates.
// x = X/Z, y = Y/Z, T = XY/Z
typedef struct { fe X; fe Y; fe Z; fe T; } ge;
typedef struct { fe Yp; fe Ym; fe Z; fe T2; } ge_cached;
static void ge_zero(ge *p)
{
fe_0(p->X);
fe_1(p->Y);
fe_1(p->Z);
fe_0(p->T);
}
static void ge_tobytes(u8 s[32], const ge *h)
{
fe recip, x, y;
fe_invert(recip, h->Z);
fe_mul(x, h->X, recip);
fe_mul(y, h->Y, recip);
fe_tobytes(s, y);
s[31] ^= fe_isnegative(x) << 7;
WIPE_BUFFER(recip);
WIPE_BUFFER(x);
WIPE_BUFFER(y);
}
// h = -s, where s is a point encoded in 32 bytes
// ge_double_scalarmult_vartime() performs addition, but the algorithm it is
// used for requires subtraction; thus we negate s on load so that we can do
// addition in ge_double_scalarmult_vartime() later.
//
// Variable time! Inputs must not be secret!
// => Use only to *check* signatures.
static int ge_frombytes_neg_vartime(ge *h, const u8 s[32])
{
static const fe d = {
-10913610, 13857413, -15372611, 6949391, 114729,
-8787816, -6275908, -3247719, -18696448, -12055116
};
static const fe sqrtm1 = {
-32595792, -7943725, 9377950, 3500415, 12389472,
-272473, -25146209, -2005654, 326686, 11406482
};
fe u, v, v3; // no secret, no wipe
fe_frombytes(h->Y, s);
fe_1(h->Z);
fe_sq(u, h->Y); // y^2
fe_mul(v, u, d);
fe_sub(u, u, h->Z); // u = y^2-1
fe_add(v, v, h->Z); // v = dy^2+1
fe_sq(v3, v);
fe_mul(v3, v3, v); // v3 = v^3
fe_sq(h->X, v3);
fe_mul(h->X, h->X, v);
fe_mul(h->X, h->X, u); // x = uv^7
fe_pow22523(h->X, h->X); // x = (uv^7)^((q-5)/8)
fe_mul(h->X, h->X, v3);
fe_mul(h->X, h->X, u); // x = uv^3(uv^7)^((q-5)/8)
fe vxx, check; // no secret, no wipe
fe_sq(vxx, h->X);
fe_mul(vxx, vxx, v);
fe_sub(check, vxx, u); // vx^2-u
if (fe_isnonzero(check)) {
fe_add(check, vxx, u); // vx^2+u
if (fe_isnonzero(check)) {
return -1;
}
fe_mul(h->X, h->X, sqrtm1);
}
if (fe_isnegative(h->X) == (s[31] >> 7)) {
fe_neg(h->X, h->X);
}
fe_mul(h->T, h->X, h->Y);
return 0;
}
static void ge_cache(ge_cached *c, const ge *p)
{
static const fe D2 = { // - 2 * 121665 / 121666
-21827239, -5839606, -30745221, 13898782, 229458,
15978800, -12551817, -6495438, 29715968, 9444199
};
fe_add (c->Yp, p->Y, p->X);
fe_sub (c->Ym, p->Y, p->X);
fe_copy(c->Z , p->Z );
fe_mul (c->T2, p->T, D2 );
}
// Internal buffers are not wiped! Inputs must not be secret!
// => Use only to *check* signatures.
static void ge_add(ge *s, const ge *p, const ge_cached *q)
{
fe a, b;
fe_add(a , p->Y, p->X );
fe_sub(b , p->Y, p->X );
fe_mul(a , a , q->Yp);
fe_mul(b , b , q->Ym);
fe_add(s->Y, a , b );
fe_sub(s->X, a , b );
fe_add(s->Z, p->Z, p->Z );
fe_mul(s->Z, s->Z, q->Z );
fe_mul(s->T, p->T, q->T2);
fe_add(a , s->Z, s->T );
fe_sub(b , s->Z, s->T );
fe_mul(s->T, s->X, s->Y);
fe_mul(s->X, s->X, b );
fe_mul(s->Y, s->Y, a );
fe_mul(s->Z, a , b );
}
// Internal buffers are not wiped! Inputs must not be secret!
// => Use only to *check* signatures.
static void ge_sub(ge *s, const ge *p, const ge_cached *q)
{
ge_cached neg;
fe_copy(neg.Ym, q->Yp);
fe_copy(neg.Yp, q->Ym);
fe_copy(neg.Z , q->Z );
fe_neg (neg.T2, q->T2);
ge_add(s, p, &neg);
}
static void ge_madd(ge *s, const ge *p, const fe yp, const fe ym, const fe t2,
fe a, fe b)
{
fe_add(a , p->Y, p->X );
fe_sub(b , p->Y, p->X );
fe_mul(a , a , yp );
fe_mul(b , b , ym );
fe_add(s->Y, a , b );
fe_sub(s->X, a , b );
fe_add(s->Z, p->Z, p->Z );
fe_mul(s->T, p->T, t2 );
fe_add(a , s->Z, s->T );
fe_sub(b , s->Z, s->T );
fe_mul(s->T, s->X, s->Y);
fe_mul(s->X, s->X, b );
fe_mul(s->Y, s->Y, a );
fe_mul(s->Z, a , b );
}
// Internal buffers are not wiped! Inputs must not be secret!
// => Use only to *check* signatures.
static void ge_msub(ge *s, const ge *p, const fe yp, const fe ym, const fe t2,
fe a, fe b)
{
fe n2;
fe_neg(n2, t2);
ge_madd(s, p, ym, yp, n2, a, b);
}
static void ge_double(ge *s, const ge *p, ge *q)
{
fe_sq (q->X, p->X);
fe_sq (q->Y, p->Y);
fe_sq2(q->Z, p->Z);
fe_add(q->T, p->X, p->Y);
fe_sq (s->T, q->T);
fe_add(q->T, q->Y, q->X);
fe_sub(q->Y, q->Y, q->X);
fe_sub(q->X, s->T, q->T);
fe_sub(q->Z, q->Z, q->Y);
fe_mul(s->X, q->X , q->Z);
fe_mul(s->Y, q->T , q->Y);
fe_mul(s->Z, q->Y , q->Z);
fe_mul(s->T, q->X , q->T);
}
static const fe window_Yp[8] = {
{25967493, -14356035, 29566456, 3660896, -12694345,
4014787, 27544626, -11754271, -6079156, 2047605},
{15636291, -9688557, 24204773, -7912398, 616977,
-16685262, 27787600, -14772189, 28944400, -1550024},
{10861363, 11473154, 27284546, 1981175, -30064349,
12577861, 32867885, 14515107, -15438304, 10819380},
{5153746, 9909285, 1723747, -2777874, 30523605,
5516873, 19480852, 5230134, -23952439, -15175766},
{-22518993, -6692182, 14201702, -8745502, -23510406,
8844726, 18474211, -1361450, -13062696, 13821877},
{-25154831, -4185821, 29681144, 7868801, -6854661,
-9423865, -12437364, -663000, -31111463, -16132436},
{-33521811, 3180713, -2394130, 14003687, -16903474,
-16270840, 17238398, 4729455, -18074513, 9256800},
{-3151181, -5046075, 9282714, 6866145, -31907062,
-863023, -18940575, 15033784, 25105118, -7894876},
};
static const fe window_Ym[8] = {
{-12545711, 934262, -2722910, 3049990, -727428,
9406986, 12720692, 5043384, 19500929, -15469378},
{16568933, 4717097, -11556148, -1102322, 15682896,
-11807043, 16354577, -11775962, 7689662, 11199574},
{4708026, 6336745, 20377586, 9066809, -11272109,
6594696, -25653668, 12483688, -12668491, 5581306},
{-30269007, -3463509, 7665486, 10083793, 28475525,
1649722, 20654025, 16520125, 30598449, 7715701},
{-6455177, -7839871, 3374702, -4740862, -27098617,
-10571707, 31655028, -7212327, 18853322, -14220951},
{25576264, -2703214, 7349804, -11814844, 16472782,
9300885, 3844789, 15725684, 171356, 6466918},
{-25182317, -4174131, 32336398, 5036987, -21236817,
11360617, 22616405, 9761698, -19827198, 630305},
{-24326370, 15950226, -31801215, -14592823, -11662737,
-5090925, 1573892, -2625887, 2198790, -15804619},
};
static const fe window_T2[8] = {
{-8738181, 4489570, 9688441, -14785194, 10184609,
-12363380, 29287919, 11864899, -24514362, -4438546},
{30464156, -5976125, -11779434, -15670865, 23220365,
15915852, 7512774, 10017326, -17749093, -9920357},
{19563160, 16186464, -29386857, 4097519, 10237984,
-4348115, 28542350, 13850243, -23678021, -15815942},
{28881845, 14381568, 9657904, 3680757, -20181635,
7843316, -31400660, 1370708, 29794553, -1409300},
{4566830, -12963868, -28974889, -12240689, -7602672,
-2830569, -8514358, -10431137, 2207753, -3209784},
{23103977, 13316479, 9739013, -16149481, 817875,
-15038942, 8965339, -14088058, -30714912, 16193877},
{-13720693, 2639453, -24237460, -7406481, 9494427,
-5774029, -6554551, -15960994, -2449256, -14291300},
{-3099351, 10324967, -2241613, 7453183, -5446979,
-2735503, -13812022, -16236442, -32461234, -12290683},
};
// Incremental sliding windows (left to right)
// Based on Roberto Maria Avanzi[2005]
typedef struct {
i16 next_index; // position of the next signed digit
i8 next_digit; // next signed digit (odd number below 2^window_width)
u8 next_check; // point at which we must check for a new window
} slide_ctx;
static void slide_init(slide_ctx *ctx, const u8 scalar[32])
{
// scalar is guaranteed to be below L, either because we checked (s),
// or because we reduced it modulo L (h_ram). L is under 2^253, so
// so bits 253 to 255 are guaranteed to be zero. No need to test them.
//
// Note however that L is very close to 2^252, so bit 252 is almost
// always zero. If we were to start at bit 251, the tests wouldn't
// catch the off-by-one error (constructing one that does would be
// prohibitively expensive).
//
// We should still check bit 252, though.
int i = 252;
while (i > 0 && scalar_bit(scalar, i) == 0) {
i--;
}
ctx->next_check = (u8)(i + 1);
ctx->next_index = -1;
ctx->next_digit = -1;
}
static int slide_step(slide_ctx *ctx, int width, int i, const u8 scalar[32])
{
if (i == ctx->next_check) {
if (scalar_bit(scalar, i) == scalar_bit(scalar, i - 1)) {
ctx->next_check--;
} else {
// compute digit of next window
int w = MIN(width, i + 1);
int v = -(scalar_bit(scalar, i) << (w-1));
FOR_T (int, j, 0, w-1) {
v += scalar_bit(scalar, i-(w-1)+j) << j;
}
v += scalar_bit(scalar, i-w);
int lsb = v & (~v + 1); // smallest bit of v
int s = ( ((lsb & 0xAA) != 0) // log2(lsb)
| (((lsb & 0xCC) != 0) << 1)
| (((lsb & 0xF0) != 0) << 2));
ctx->next_index = (i16)(i-(w-1)+s);
ctx->next_digit = (i8) (v >> s );
ctx->next_check -= w;
}
}
return i == ctx->next_index ? ctx->next_digit: 0;
}
#define P_W_WIDTH 3 // Affects the size of the stack
#define B_W_WIDTH 5 // Affects the size of the binary
#define P_W_SIZE (1<<(P_W_WIDTH-2))
// P = [b]B + [p]P, where B is the base point
//
// Variable time! Internal buffers are not wiped! Inputs must not be secret!
// => Use only to *check* signatures.
static void ge_double_scalarmult_vartime(ge *P, const u8 p[32], const u8 b[32])
{
// cache P window for addition
ge_cached cP[P_W_SIZE];
{
ge P2, tmp;
ge_double(&P2, P, &tmp);
ge_cache(&cP[0], P);
FOR (i, 0, (P_W_SIZE)-1) {
ge_add(&tmp, &P2, &cP[i]);
ge_cache(&cP[i+1], &tmp);
}
}
// Merged double and add ladder, fused with sliding
slide_ctx p_slide; slide_init(&p_slide, p);
slide_ctx b_slide; slide_init(&b_slide, b);
int i = MAX(p_slide.next_check, b_slide.next_check);
ge *sum = P;
ge_zero(sum);
while (i >= 0) {
ge tmp;
ge_double(sum, sum, &tmp);
int p_digit = slide_step(&p_slide, P_W_WIDTH, i, p);
int b_digit = slide_step(&b_slide, B_W_WIDTH, i, b);
if (p_digit > 0) { ge_add(sum, sum, &cP[ p_digit / 2]); }
if (p_digit < 0) { ge_sub(sum, sum, &cP[-p_digit / 2]); }
fe t1, t2;
if (b_digit > 0) { ge_madd(sum, sum,
window_Yp[ b_digit / 2],
window_Ym[ b_digit / 2],
window_T2[ b_digit / 2], t1, t2); }
if (b_digit < 0) { ge_msub(sum, sum,
window_Yp[-b_digit / 2],
window_Ym[-b_digit / 2],
window_T2[-b_digit / 2], t1, t2); }
i--;
}
}
// 5-bit signed comb in cached format (Niels coordinates, Z=1)
static const fe comb_Yp[16] = {
{2615675, 9989699, 17617367, -13953520, -8802803,
1447286, -8909978, -270892, -12199203, -11617247},
{-1271192, 4785266, -29856067, -6036322, -10435381,
15493337, 20321440, -6036064, 15902131, 13420909},
{-26170888, -12891603, 9568996, -6197816, 26424622,
16308973, -4518568, -3771275, -15522557, 3991142},
{-25875044, 1958396, 19442242, -9809943, -26099408,
-18589, -30794750, -14100910, 4971028, -10535388},
{-13896937, -7357727, -12131124, 617289, -33188817,
10080542, 6402555, 10779157, 1176712, 2472642},
{71503, 12662254, -17008072, -8370006, 23408384,
-12897959, 32287612, 11241906, -16724175, 15336924},
{27397666, 4059848, 23573959, 8868915, -10602416,
-10456346, -22812831, -9666299, 31810345, -2695469},
{-3418193, -694531, 2320482, -11850408, -1981947,
-9606132, 23743894, 3933038, -25004889, -4478918},
{-4448372, 5537982, -4805580, 14016777, 15544316,
16039459, -7143453, -8003716, -21904564, 8443777},
{32495180, 15749868, 2195406, -15542321, -3213890,
-4030779, -2915317, 12751449, -1872493, 11926798},
{26779741, 12553580, -24344000, -4071926, -19447556,
-13464636, 21989468, 7826656, -17344881, 10055954},
{5848288, -1639207, -10452929, -11760637, 6484174,
-5895268, -11561603, 587105, -19220796, 14378222},
{32050187, 12536702, 9206308, -10016828, -13333241,
-4276403, -24225594, 14562479, -31803624, -9967812},
{23536033, -6219361, 199701, 4574817, 30045793,
7163081, -2244033, 883497, 10960746, -14779481},
{-8143354, -11558749, 15772067, 14293390, 5914956,
-16702904, -7410985, 7536196, 6155087, 16571424},
{6211591, -11166015, 24568352, 2768318, -10822221,
11922793, 33211827, 3852290, -13160369, -8855385},
};
static const fe comb_Ym[16] = {
{8873912, 14981221, 13714139, 6923085, 25481101,
4243739, 4646647, -203847, 9015725, -16205935},
{-1827892, 15407265, 2351140, -11810728, 28403158,
-1487103, -15057287, -4656433, -3780118, -1145998},
{-30623162, -11845055, -11327147, -16008347, 17564978,
-1449578, -20580262, 14113978, 29643661, 15580734},
{-15109423, 13348938, -14756006, 14132355, 30481360,
1830723, -240510, 9371801, -13907882, 8024264},
{25119567, 5628696, 10185251, -9279452, 683770,
-14523112, -7982879, -16450545, 1431333, -13253541},
{-8390493, 1276691, 19008763, -12736675, -9249429,
-12526388, 17434195, -13761261, 18962694, -1227728},
{26361856, -12366343, 8941415, 15163068, 7069802,
-7240693, -18656349, 8167008, 31106064, -1670658},
{-5677136, -11012483, -1246680, -6422709, 14772010,
1829629, -11724154, -15914279, -18177362, 1301444},
{937094, 12383516, -22597284, 7580462, -18767748,
13813292, -2323566, 13503298, 11510849, -10561992},
{28028043, 14715827, -6558532, -1773240, 27563607,
-9374554, 3201863, 8865591, -16953001, 7659464},
{13628467, 5701368, 4674031, 11935670, 11461401,
10699118, 31846435, -114971, -8269924, -14777505},
{-22124018, -12859127, 11966893, 1617732, 30972446,
-14350095, -21822286, 8369862, -29443219, -15378798},
{290131, -471434, 8840522, -2654851, 25963762,
-11578288, -7227978, 13847103, 30641797, 6003514},
{-23547482, -11475166, -11913550, 9374455, 22813401,
-5707910, 26635288, 9199956, 20574690, 2061147},
{9715324, 7036821, -17981446, -11505533, 26555178,
-3571571, 5697062, -14128022, 2795223, 9694380},
{14864569, -6319076, -3080, -8151104, 4994948,
-1572144, -41927, 9269803, 13881712, -13439497},
};
static const fe comb_T2[16] = {
{-18494317, 2686822, 18449263, -13905325, 5966562,
-3368714, 2738304, -8583315, 15987143, 12180258},
{-33336513, -13705917, -18473364, -5039204, -4268481,
-4136039, -8192211, -2935105, -19354402, 5995895},
{-19753139, -1729018, 21880604, 13471713, 28315373,
-8530159, -17492688, 11730577, -8790216, 3942124},
{17278020, 3905045, 29577748, 11151940, 18451761,
-6801382, 31480073, -13819665, 26308905, 10868496},
{26937294, 3313561, 28601532, -3497112, -22814130,
11073654, 8956359, -16757370, 13465868, 16623983},
{-5468054, 6059101, -31275300, 2469124, 26532937,
8152142, 6423741, -11427054, -15537747, -10938247},
{-11303505, -9659620, -12354748, -9331434, 19501116,
-9146390, -841918, -5315657, 8903828, 8839982},
{16603354, -215859, 1591180, 3775832, -705596,
-13913449, 26574704, 14963118, 19649719, 6562441},
{33188866, -12232360, -24929148, -6133828, 21818432,
11040754, -3041582, -3524558, -29364727, -10264096},
{-20704194, -12560423, -1235774, -785473, 13240395,
4831780, -472624, -3796899, 25480903, -15422283},
{-2204347, -16313180, -21388048, 7520851, -8697745,
-14460961, 20894017, 12210317, -475249, -2319102},
{-16407882, 4940236, -21194947, 10781753, 22248400,
14425368, 14866511, -7552907, 12148703, -7885797},
{16376744, 15908865, -30663553, 4663134, -30882819,
-10105163, 19294784, -10800440, -33259252, 2563437},
{30208741, 11594088, -15145888, 15073872, 5279309,
-9651774, 8273234, 4796404, -31270809, -13316433},
{-17802574, 14455251, 27149077, -7832700, -29163160,
-7246767, 17498491, -4216079, 31788733, -14027536},
{-25233439, -9389070, -6618212, -3268087, -521386,
-7350198, 21035059, -14970947, 25910190, 11122681},
};
// p = [scalar]B, where B is the base point
static void ge_scalarmult_base(ge *p, const u8 scalar[32])
{
// 5-bits signed comb, from Mike Hamburg's
// Fast and compact elliptic-curve cryptography (2012)
static const u8 half_mod_L[32] = { // 1 / 2 modulo L
0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c,
0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c, 0x6f, 0x0a,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
};
static const u8 half_ones[32] = { // (2^255 - 1) / 2 modulo L
0x42, 0x9a, 0xa3, 0xba, 0x23, 0xa5, 0xbf, 0xcb,
0x11, 0x5b, 0x9d, 0xc5, 0x74, 0x95, 0xf3, 0xb6,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07,
};
// All bits set form: 1 means 1, 0 means -1
u8 s_scalar[32];
mul_add(s_scalar, scalar, half_mod_L, half_ones);
// Double and add ladder
fe yp, ym, t2, n2, a; // temporaries for addition
ge dbl; // temporary for doubling
ge_zero(p);
for (int i = 50; i >= 0; i--) {
if (i < 50) {
ge_double(p, p, &dbl);
}
fe_1(yp);
fe_1(ym);
fe_0(t2);
u8 teeth = (u8)((scalar_bit(s_scalar, i) ) +
(scalar_bit(s_scalar, i + 51) << 1) +
(scalar_bit(s_scalar, i + 102) << 2) +
(scalar_bit(s_scalar, i + 153) << 3) +
(scalar_bit(s_scalar, i + 204) << 4));
u8 high = teeth >> 4;
u8 index = (teeth ^ (high - 1)) & 15;
FOR (j, 0, 16) {
i32 select = 1 & (((j ^ index) - 1) >> 8);
fe_ccopy(yp, comb_Yp[j], select);
fe_ccopy(ym, comb_Ym[j], select);
fe_ccopy(t2, comb_T2[j], select);
}
fe_neg(n2, t2);
fe_cswap(t2, n2, high);
fe_cswap(yp, ym, high);
ge_madd(p, p, ym, yp, n2, a, t2); // reuse t2 as temporary
}
WIPE_CTX(&dbl);
WIPE_BUFFER(yp); WIPE_BUFFER(t2); WIPE_BUFFER(a);
WIPE_BUFFER(ym); WIPE_BUFFER(n2);
WIPE_BUFFER(s_scalar);
}
void crypto_sign_public_key_custom_hash(u8 public_key[32],
const u8 secret_key[32],
const crypto_sign_vtable *hash)
{
u8 a[64];
hash->hash(a, secret_key, 32);
trim_scalar(a);
ge A;
ge_scalarmult_base(&A, a);
ge_tobytes(public_key, &A);
WIPE_BUFFER(a);
WIPE_CTX(&A);
}
void crypto_sign_public_key(u8 public_key[32], const u8 secret_key[32])
{
crypto_sign_public_key_custom_hash(public_key, secret_key,
&crypto_blake2b_vtable);
}
void crypto_sign_init_first_pass_custom_hash(crypto_sign_ctx_abstract *ctx,
const u8 secret_key[32],
const u8 public_key[32],
const crypto_sign_vtable *hash)
{
ctx->hash = hash; // set vtable
u8 *a = ctx->buf;
u8 *prefix = ctx->buf + 32;
ctx->hash->hash(a, secret_key, 32);
trim_scalar(a);
if (public_key == 0) {
crypto_sign_public_key_custom_hash(ctx->pk, secret_key, ctx->hash);
} else {
FOR (i, 0, 32) {
ctx->pk[i] = public_key[i];
}
}
// Deterministic part of EdDSA: Construct a nonce by hashing the message
// instead of generating a random number.
// An actual random number would work just fine, and would save us
// the trouble of hashing the message twice. If we did that
// however, the user could fuck it up and reuse the nonce.
ctx->hash->init (ctx);
ctx->hash->update(ctx, prefix , 32);
}
void crypto_sign_init_first_pass(crypto_sign_ctx_abstract *ctx,
const u8 secret_key[32],
const u8 public_key[32])
{
crypto_sign_init_first_pass_custom_hash(ctx, secret_key, public_key,
&crypto_blake2b_vtable);
}
void crypto_sign_update(crypto_sign_ctx_abstract *ctx,
const u8 *msg, size_t msg_size)
{
ctx->hash->update(ctx, msg, msg_size);
}
void crypto_sign_init_second_pass(crypto_sign_ctx_abstract *ctx)
{
u8 *r = ctx->buf + 32;
u8 *half_sig = ctx->buf + 64;
ctx->hash->final(ctx, r);
reduce(r);
// first half of the signature = "random" nonce times the base point
ge R;
ge_scalarmult_base(&R, r);
ge_tobytes(half_sig, &R);
WIPE_CTX(&R);
// Hash R, the public key, and the message together.
// It cannot be done in parallel with the first hash.
ctx->hash->init (ctx);
ctx->hash->update(ctx, half_sig, 32);
ctx->hash->update(ctx, ctx->pk , 32);
}
void crypto_sign_final(crypto_sign_ctx_abstract *ctx, u8 signature[64])
{
u8 *a = ctx->buf;
u8 *r = ctx->buf + 32;
u8 *half_sig = ctx->buf + 64;
u8 h_ram[64];
ctx->hash->final(ctx, h_ram);
reduce(h_ram);
FOR (i, 0, 32) {
signature[i] = half_sig[i];
}
mul_add(signature + 32, h_ram, a, r); // s = h_ram * a + r
WIPE_BUFFER(h_ram);
crypto_wipe(ctx, ctx->hash->ctx_size);
}
void crypto_sign(u8 signature[64],
const u8 secret_key[32],
const u8 public_key[32],
const u8 *message, size_t message_size)
{
crypto_sign_ctx ctx;
crypto_sign_ctx_abstract *actx = (crypto_sign_ctx_abstract*)&ctx;
crypto_sign_init_first_pass (actx, secret_key, public_key);
crypto_sign_update (actx, message, message_size);
crypto_sign_init_second_pass(actx);
crypto_sign_update (actx, message, message_size);
crypto_sign_final (actx, signature);
}
void crypto_check_init_custom_hash(crypto_check_ctx_abstract *ctx,
const u8 signature[64],
const u8 public_key[32],
const crypto_sign_vtable *hash)
{
ctx->hash = hash; // set vtable
FOR (i, 0, 64) { ctx->buf[i] = signature [i]; }
FOR (i, 0, 32) { ctx->pk [i] = public_key[i]; }
ctx->hash->init (ctx);
ctx->hash->update(ctx, signature , 32);
ctx->hash->update(ctx, public_key, 32);
}
void crypto_check_init(crypto_check_ctx_abstract *ctx,
const u8 signature[64],
const u8 public_key[32])
{
crypto_check_init_custom_hash(ctx, signature, public_key,
&crypto_blake2b_vtable);
}
void crypto_check_update(crypto_check_ctx_abstract *ctx,
const u8 *msg, size_t msg_size)
{
ctx->hash->update(ctx, msg, msg_size);
}
int crypto_check_final(crypto_check_ctx_abstract *ctx)
{
ge A;
u8 *h_ram = ctx->pk; // save stack space
u8 *R_check = ctx->pk; // save stack space
u8 *R = ctx->buf; // R
u8 *s = ctx->buf + 32; // s
ge *diff = &A; // -A is overwritten...
if (ge_frombytes_neg_vartime(&A, ctx->pk) ||
is_above_L(s)) { // prevent s malleability
return -1;
}
{
u8 tmp[64];
ctx->hash->final(ctx, tmp);
reduce(tmp);
FOR (i, 0, 32) { // the extra copy saves 32 bytes of stack
h_ram[i] = tmp[i];
}
}
ge_double_scalarmult_vartime(&A, h_ram, s); // ...here
ge_tobytes(R_check, diff); // R_check = s*B - h_ram*A
return crypto_verify32(R, R_check); // R == R_check ? OK : fail
// No secret, no wipe
}
int crypto_check(const u8 signature[64],
const u8 public_key[32],
const u8 *message, size_t message_size)
{
crypto_check_ctx ctx;
crypto_check_ctx_abstract *actx = (crypto_check_ctx_abstract*)&ctx;
crypto_check_init (actx, signature, public_key);
crypto_check_update(actx, message, message_size);
return crypto_check_final(actx);
}
////////////////////
/// Key exchange ///
////////////////////
void crypto_key_exchange(u8 shared_key[32],
const u8 your_secret_key [32],
const u8 their_public_key[32])
{
crypto_x25519(shared_key, your_secret_key, their_public_key);
crypto_hchacha20(shared_key, shared_key, zero);
}
////////////////////////////////
/// Authenticated encryption ///
////////////////////////////////
static void lock_auth(u8 mac[16], const u8 auth_key[32],
const u8 *ad , size_t ad_size,
const u8 *cipher_text, size_t text_size)
{
u8 sizes[16]; // Not secret, not wiped
store64_le(sizes + 0, ad_size);
store64_le(sizes + 8, text_size);
crypto_poly1305_ctx poly_ctx; // auto wiped...
crypto_poly1305_init (&poly_ctx, auth_key);
crypto_poly1305_update(&poly_ctx, ad , ad_size);
crypto_poly1305_update(&poly_ctx, zero , ALIGN(ad_size, 16));
crypto_poly1305_update(&poly_ctx, cipher_text, text_size);
crypto_poly1305_update(&poly_ctx, zero , ALIGN(text_size, 16));
crypto_poly1305_update(&poly_ctx, sizes , 16);
crypto_poly1305_final (&poly_ctx, mac); // ...here
}
void crypto_lock_aead(u8 mac[16],
u8 *cipher_text,
const u8 key[32],
const u8 nonce[24],
const u8 *ad , size_t ad_size,
const u8 *plain_text, size_t text_size)
{
u8 sub_key[32];
u8 auth_key[64]; // "Wasting" the whole Chacha block is faster
crypto_hchacha20(sub_key, key, nonce);
crypto_chacha20(auth_key, 0, 64, sub_key, nonce + 16);
crypto_chacha20_ctr(cipher_text, plain_text, text_size,
sub_key, nonce + 16, 1);
lock_auth(mac, auth_key, ad, ad_size, cipher_text, text_size);
WIPE_BUFFER(sub_key);
WIPE_BUFFER(auth_key);
}
int crypto_unlock_aead(u8 *plain_text,
const u8 key[32],
const u8 nonce[24],
const u8 mac[16],
const u8 *ad , size_t ad_size,
const u8 *cipher_text, size_t text_size)
{
u8 sub_key[32];
u8 auth_key[64]; // "Wasting" the whole Chacha block is faster
crypto_hchacha20(sub_key, key, nonce);
crypto_chacha20(auth_key, 0, 64, sub_key, nonce + 16);
u8 real_mac[16];
lock_auth(real_mac, auth_key, ad, ad_size, cipher_text, text_size);
WIPE_BUFFER(auth_key);
if (crypto_verify16(mac, real_mac)) {
WIPE_BUFFER(sub_key);
WIPE_BUFFER(real_mac);
return -1;
}
crypto_chacha20_ctr(plain_text, cipher_text, text_size,
sub_key, nonce + 16, 1);
WIPE_BUFFER(sub_key);
WIPE_BUFFER(real_mac);
return 0;
}
void crypto_lock(u8 mac[16],
u8 *cipher_text,
const u8 key[32],
const u8 nonce[24],
const u8 *plain_text, size_t text_size)
{
crypto_lock_aead(mac, cipher_text, key, nonce, 0, 0, plain_text, text_size);
}
int crypto_unlock(u8 *plain_text,
const u8 key[32],
const u8 nonce[24],
const u8 mac[16],
const u8 *cipher_text, size_t text_size)
{
return crypto_unlock_aead(plain_text, key, nonce, mac, 0, 0,
cipher_text, text_size);
}
|
977252.c | /*******************************************************************************
*
* Module Name: dsmthdat - control method arguments and local variables
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include "acpi.h"
#include "accommon.h"
#include "acdispat.h"
#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME ("dsmthdat")
/* Local prototypes */
static void
AcpiDsMethodDataDeleteValue (
UINT8 Type,
UINT32 Index,
ACPI_WALK_STATE *WalkState);
static ACPI_STATUS
AcpiDsMethodDataSetValue (
UINT8 Type,
UINT32 Index,
ACPI_OPERAND_OBJECT *Object,
ACPI_WALK_STATE *WalkState);
#ifdef ACPI_OBSOLETE_FUNCTIONS
ACPI_OBJECT_TYPE
AcpiDsMethodDataGetType (
UINT16 Opcode,
UINT32 Index,
ACPI_WALK_STATE *WalkState);
#endif
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataInit
*
* PARAMETERS: WalkState - Current walk state object
*
* RETURN: Status
*
* DESCRIPTION: Initialize the data structures that hold the method's arguments
* and locals. The data struct is an array of namespace nodes for
* each - this allows RefOf and DeRefOf to work properly for these
* special data types.
*
* NOTES: WalkState fields are initialized to zero by the
* ACPI_ALLOCATE_ZEROED().
*
* A pseudo-Namespace Node is assigned to each argument and local
* so that RefOf() can return a pointer to the Node.
*
******************************************************************************/
void
AcpiDsMethodDataInit (
ACPI_WALK_STATE *WalkState)
{
UINT32 i;
ACPI_FUNCTION_TRACE (DsMethodDataInit);
/* Init the method arguments */
for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++)
{
ACPI_MOVE_32_TO_32 (&WalkState->Arguments[i].Name,
NAMEOF_ARG_NTE);
WalkState->Arguments[i].Name.Integer |= (i << 24);
WalkState->Arguments[i].DescriptorType = ACPI_DESC_TYPE_NAMED;
WalkState->Arguments[i].Type = ACPI_TYPE_ANY;
WalkState->Arguments[i].Flags = ANOBJ_METHOD_ARG;
}
/* Init the method locals */
for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++)
{
ACPI_MOVE_32_TO_32 (&WalkState->LocalVariables[i].Name,
NAMEOF_LOCAL_NTE);
WalkState->LocalVariables[i].Name.Integer |= (i << 24);
WalkState->LocalVariables[i].DescriptorType = ACPI_DESC_TYPE_NAMED;
WalkState->LocalVariables[i].Type = ACPI_TYPE_ANY;
WalkState->LocalVariables[i].Flags = ANOBJ_METHOD_LOCAL;
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataDeleteAll
*
* PARAMETERS: WalkState - Current walk state object
*
* RETURN: None
*
* DESCRIPTION: Delete method locals and arguments. Arguments are only
* deleted if this method was called from another method.
*
******************************************************************************/
void
AcpiDsMethodDataDeleteAll (
ACPI_WALK_STATE *WalkState)
{
UINT32 Index;
ACPI_FUNCTION_TRACE (DsMethodDataDeleteAll);
/* Detach the locals */
for (Index = 0; Index < ACPI_METHOD_NUM_LOCALS; Index++)
{
if (WalkState->LocalVariables[Index].Object)
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Deleting Local%u=%p\n",
Index, WalkState->LocalVariables[Index].Object));
/* Detach object (if present) and remove a reference */
AcpiNsDetachObject (&WalkState->LocalVariables[Index]);
}
}
/* Detach the arguments */
for (Index = 0; Index < ACPI_METHOD_NUM_ARGS; Index++)
{
if (WalkState->Arguments[Index].Object)
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Deleting Arg%u=%p\n",
Index, WalkState->Arguments[Index].Object));
/* Detach object (if present) and remove a reference */
AcpiNsDetachObject (&WalkState->Arguments[Index]);
}
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataInitArgs
*
* PARAMETERS: *Params - Pointer to a parameter list for the method
* MaxParamCount - The arg count for this method
* WalkState - Current walk state object
*
* RETURN: Status
*
* DESCRIPTION: Initialize arguments for a method. The parameter list is a list
* of ACPI operand objects, either null terminated or whose length
* is defined by MaxParamCount.
*
******************************************************************************/
ACPI_STATUS
AcpiDsMethodDataInitArgs (
ACPI_OPERAND_OBJECT **Params,
UINT32 MaxParamCount,
ACPI_WALK_STATE *WalkState)
{
ACPI_STATUS Status;
UINT32 Index = 0;
ACPI_FUNCTION_TRACE_PTR (DsMethodDataInitArgs, Params);
if (!Params)
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"No parameter list passed to method\n"));
return_ACPI_STATUS (AE_OK);
}
/* Copy passed parameters into the new method stack frame */
while ((Index < ACPI_METHOD_NUM_ARGS) &&
(Index < MaxParamCount) &&
Params[Index])
{
/*
* A valid parameter.
* Store the argument in the method/walk descriptor.
* Do not copy the arg in order to implement call by reference
*/
Status = AcpiDsMethodDataSetValue (
ACPI_REFCLASS_ARG, Index, Params[Index], WalkState);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
Index++;
}
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%u args passed to method\n", Index));
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataGetNode
*
* PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* Index - Which Local or Arg whose type to get
* WalkState - Current walk state object
* Node - Where the node is returned.
*
* RETURN: Status and node
*
* DESCRIPTION: Get the Node associated with a local or arg.
*
******************************************************************************/
ACPI_STATUS
AcpiDsMethodDataGetNode (
UINT8 Type,
UINT32 Index,
ACPI_WALK_STATE *WalkState,
ACPI_NAMESPACE_NODE **Node)
{
ACPI_FUNCTION_TRACE (DsMethodDataGetNode);
/*
* Method Locals and Arguments are supported
*/
switch (Type)
{
case ACPI_REFCLASS_LOCAL:
if (Index > ACPI_METHOD_MAX_LOCAL)
{
ACPI_ERROR ((AE_INFO,
"Local index %u is invalid (max %u)",
Index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS (AE_AML_INVALID_INDEX);
}
/* Return a pointer to the pseudo-node */
*Node = &WalkState->LocalVariables[Index];
break;
case ACPI_REFCLASS_ARG:
if (Index > ACPI_METHOD_MAX_ARG)
{
ACPI_ERROR ((AE_INFO,
"Arg index %u is invalid (max %u)",
Index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS (AE_AML_INVALID_INDEX);
}
/* Return a pointer to the pseudo-node */
*Node = &WalkState->Arguments[Index];
break;
default:
ACPI_ERROR ((AE_INFO, "Type %u is invalid", Type));
return_ACPI_STATUS (AE_TYPE);
}
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataSetValue
*
* PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* Index - Which Local or Arg to get
* Object - Object to be inserted into the stack entry
* WalkState - Current walk state object
*
* RETURN: Status
*
* DESCRIPTION: Insert an object onto the method stack at entry Opcode:Index.
* Note: There is no "implicit conversion" for locals.
*
******************************************************************************/
static ACPI_STATUS
AcpiDsMethodDataSetValue (
UINT8 Type,
UINT32 Index,
ACPI_OPERAND_OBJECT *Object,
ACPI_WALK_STATE *WalkState)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_FUNCTION_TRACE (DsMethodDataSetValue);
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"NewObj %p Type %2.2X, Refs=%u [%s]\n", Object,
Type, Object->Common.ReferenceCount,
AcpiUtGetTypeName (Object->Common.Type)));
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Type, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/*
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
* (See ACPI Specification 2.0C)
*/
AcpiUtAddReference (Object);
/* Install the object */
Node->Object = Object;
return_ACPI_STATUS (Status);
}
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataGetValue
*
* PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* Index - Which localVar or argument to get
* WalkState - Current walk state object
* DestDesc - Where Arg or Local value is returned
*
* RETURN: Status
*
* DESCRIPTION: Retrieve value of selected Arg or Local for this method
* Used only in AcpiExResolveToValue().
*
******************************************************************************/
ACPI_STATUS
AcpiDsMethodDataGetValue (
UINT8 Type,
UINT32 Index,
ACPI_WALK_STATE *WalkState,
ACPI_OPERAND_OBJECT **DestDesc)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_OPERAND_OBJECT *Object;
ACPI_FUNCTION_TRACE (DsMethodDataGetValue);
/* Validate the object descriptor */
if (!DestDesc)
{
ACPI_ERROR ((AE_INFO, "Null object descriptor pointer"));
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Type, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/* Get the object from the node */
Object = Node->Object;
/* Examine the returned object, it must be valid. */
if (!Object)
{
/*
* Index points to uninitialized object.
* This means that either 1) The expected argument was
* not passed to the method, or 2) A local variable
* was referenced by the method (via the ASL)
* before it was initialized. Either case is an error.
*/
/* If slack enabled, init the LocalX/ArgX to an Integer of value zero */
if (AcpiGbl_EnableInterpreterSlack)
{
Object = AcpiUtCreateIntegerObject ((UINT64) 0);
if (!Object)
{
return_ACPI_STATUS (AE_NO_MEMORY);
}
Node->Object = Object;
}
/* Otherwise, return the error */
else switch (Type)
{
case ACPI_REFCLASS_ARG:
ACPI_ERROR ((AE_INFO,
"Uninitialized Arg[%u] at node %p",
Index, Node));
return_ACPI_STATUS (AE_AML_UNINITIALIZED_ARG);
case ACPI_REFCLASS_LOCAL:
/*
* No error message for this case, will be trapped again later to
* detect and ignore cases of Store(LocalX,LocalX)
*/
return_ACPI_STATUS (AE_AML_UNINITIALIZED_LOCAL);
default:
ACPI_ERROR ((AE_INFO, "Not a Arg/Local opcode: 0x%X", Type));
return_ACPI_STATUS (AE_AML_INTERNAL);
}
}
/*
* The Index points to an initialized and valid object.
* Return an additional reference to the object
*/
*DestDesc = Object;
AcpiUtAddReference (Object);
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataDeleteValue
*
* PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* Index - Which localVar or argument to delete
* WalkState - Current walk state object
*
* RETURN: None
*
* DESCRIPTION: Delete the entry at Opcode:Index. Inserts
* a null into the stack slot after the object is deleted.
*
******************************************************************************/
static void
AcpiDsMethodDataDeleteValue (
UINT8 Type,
UINT32 Index,
ACPI_WALK_STATE *WalkState)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_OPERAND_OBJECT *Object;
ACPI_FUNCTION_TRACE (DsMethodDataDeleteValue);
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Type, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_VOID;
}
/* Get the associated object */
Object = AcpiNsGetAttachedObject (Node);
/*
* Undefine the Arg or Local by setting its descriptor
* pointer to NULL. Locals/Args can contain both
* ACPI_OPERAND_OBJECTS and ACPI_NAMESPACE_NODEs
*/
Node->Object = NULL;
if ((Object) &&
(ACPI_GET_DESCRIPTOR_TYPE (Object) == ACPI_DESC_TYPE_OPERAND))
{
/*
* There is a valid object.
* Decrement the reference count by one to balance the
* increment when the object was stored.
*/
AcpiUtRemoveReference (Object);
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: AcpiDsStoreObjectToLocal
*
* PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
* Index - Which Local or Arg to set
* ObjDesc - Value to be stored
* WalkState - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Store a value in an Arg or Local. The ObjDesc is installed
* as the new value for the Arg or Local and the reference count
* for ObjDesc is incremented.
*
******************************************************************************/
ACPI_STATUS
AcpiDsStoreObjectToLocal (
UINT8 Type,
UINT32 Index,
ACPI_OPERAND_OBJECT *ObjDesc,
ACPI_WALK_STATE *WalkState)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_OPERAND_OBJECT *CurrentObjDesc;
ACPI_OPERAND_OBJECT *NewObjDesc;
ACPI_FUNCTION_TRACE (DsStoreObjectToLocal);
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Type=%2.2X Index=%u Obj=%p\n",
Type, Index, ObjDesc));
/* Parameter validation */
if (!ObjDesc)
{
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Type, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
CurrentObjDesc = AcpiNsGetAttachedObject (Node);
if (CurrentObjDesc == ObjDesc)
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p already installed!\n",
ObjDesc));
return_ACPI_STATUS (Status);
}
/*
* If the reference count on the object is more than one, we must
* take a copy of the object before we store. A reference count
* of exactly 1 means that the object was just created during the
* evaluation of an expression, and we can safely use it since it
* is not used anywhere else.
*/
NewObjDesc = ObjDesc;
if (ObjDesc->Common.ReferenceCount > 1)
{
Status = AcpiUtCopyIobjectToIobject (
ObjDesc, &NewObjDesc, WalkState);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
}
/*
* If there is an object already in this slot, we either
* have to delete it, or if this is an argument and there
* is an object reference stored there, we have to do
* an indirect store!
*/
if (CurrentObjDesc)
{
/*
* Check for an indirect store if an argument
* contains an object reference (stored as an Node).
* We don't allow this automatic dereferencing for
* locals, since a store to a local should overwrite
* anything there, including an object reference.
*
* If both Arg0 and Local0 contain RefOf (Local4):
*
* Store (1, Arg0) - Causes indirect store to local4
* Store (1, Local0) - Stores 1 in local0, overwriting
* the reference to local4
* Store (1, DeRefof (Local0)) - Causes indirect store to local4
*
* Weird, but true.
*/
if (Type == ACPI_REFCLASS_ARG)
{
/*
* If we have a valid reference object that came from RefOf(),
* do the indirect store
*/
if ((ACPI_GET_DESCRIPTOR_TYPE (CurrentObjDesc) ==
ACPI_DESC_TYPE_OPERAND) &&
(CurrentObjDesc->Common.Type ==
ACPI_TYPE_LOCAL_REFERENCE) &&
(CurrentObjDesc->Reference.Class ==
ACPI_REFCLASS_REFOF))
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"Arg (%p) is an ObjRef(Node), storing in node %p\n",
NewObjDesc, CurrentObjDesc));
/*
* Store this object to the Node (perform the indirect store)
* NOTE: No implicit conversion is performed, as per the ACPI
* specification rules on storing to Locals/Args.
*/
Status = AcpiExStoreObjectToNode (NewObjDesc,
CurrentObjDesc->Reference.Object, WalkState,
ACPI_NO_IMPLICIT_CONVERSION);
/* Remove local reference if we copied the object above */
if (NewObjDesc != ObjDesc)
{
AcpiUtRemoveReference (NewObjDesc);
}
return_ACPI_STATUS (Status);
}
}
/* Delete the existing object before storing the new one */
AcpiDsMethodDataDeleteValue (Type, Index, WalkState);
}
/*
* Install the Obj descriptor (*NewObjDesc) into
* the descriptor for the Arg or Local.
* (increments the object reference count by one)
*/
Status = AcpiDsMethodDataSetValue (Type, Index, NewObjDesc, WalkState);
/* Remove local reference if we copied the object above */
if (NewObjDesc != ObjDesc)
{
AcpiUtRemoveReference (NewObjDesc);
}
return_ACPI_STATUS (Status);
}
#ifdef ACPI_OBSOLETE_FUNCTIONS
/*******************************************************************************
*
* FUNCTION: AcpiDsMethodDataGetType
*
* PARAMETERS: Opcode - Either AML_FIRST LOCAL_OP or
* AML_FIRST_ARG_OP
* Index - Which Local or Arg whose type to get
* WalkState - Current walk state object
*
* RETURN: Data type of current value of the selected Arg or Local
*
* DESCRIPTION: Get the type of the object stored in the Local or Arg
*
******************************************************************************/
ACPI_OBJECT_TYPE
AcpiDsMethodDataGetType (
UINT16 Opcode,
UINT32 Index,
ACPI_WALK_STATE *WalkState)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_OPERAND_OBJECT *Object;
ACPI_FUNCTION_TRACE (DsMethodDataGetType);
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Opcode, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_VALUE ((ACPI_TYPE_NOT_FOUND));
}
/* Get the object */
Object = AcpiNsGetAttachedObject (Node);
if (!Object)
{
/* Uninitialized local/arg, return TYPE_ANY */
return_VALUE (ACPI_TYPE_ANY);
}
/* Get the object type */
return_VALUE (Object->Type);
}
#endif
|
950514.c | #include "test.h"
/*
* Test division and remainder. Failure code will look like
* - 0x3d = id 0x3, 'd' for division
* - 0x3e = id 0x3, 'e' for remainder
*/
struct s_divrem {
unsigned int id;
long long a;
long long b;
long long a_div_b; /* a / b */
long long a_rem_b; /* a % b */
} s_cases[] = {
{0x1, 310LL, 100LL, 3LL, 10LL},
{0x2, 310LL, -100LL, -3LL, 10LL},
{0x3, -310LL, 100LL, -3LL, -10LL},
{0x4, -310LL, -100LL, 3LL, -10LL},
{0x5, 3000000000000010LL, 100LL, 30000000000000LL, 10LL},
{0x6, 3000000000000010LL, -100LL, -30000000000000LL, 10LL},
{0x7, -3000000000000010LL, 100LL, -30000000000000LL, -10LL},
{0x8, -3000000000000010LL, -100LL, 30000000000000LL, -10LL},
{0x9, 3000000000000010LL, 1000000000000LL, 3000LL, 10LL},
{0xa, 3000000000000010LL, -1000000000000LL, -3000LL, 10LL},
{0xb, -3000000000000010LL, 1000000000000LL, -3000LL, -10LL},
{0xc, -3000000000000010LL, -1000000000000LL, 3000LL, -10LL},
/*
* In next 3 cases, i386 tries (a / (b >> 13)) >> 13 = 8,
* may need to correct the quotient from 8 to 7.
*/
{0x11, 0x864200000000LL, 0x10c840000000LL, 8LL, 0LL},
{0x12, 0x864200000000LL, 0x10c840000001LL, 7LL, 0x10c83ffffff9LL},
{0x13, 0x864200000000LL, 0x10c840001fffLL, 7LL, 0x10c83fff2007LL},
};
struct u_divrem {
unsigned int id;
unsigned long long a;
unsigned long long b;
unsigned long long a_div_b;
unsigned long long a_rem_b;
} u_cases[] = {
{0x81, 310ULL, 100ULL, 3ULL, 10ULL},
{0x82, 3000000000000010ULL, 100ULL, 30000000000000ULL, 10ULL},
{0x83, 3000000000000010ULL, 1000000000000ULL, 3000ULL, 10ULL},
{0x91, 0x8000000000000000ULL, 3ULL, 0x2aaaaaaaaaaaaaaaULL, 2ULL},
{0x92, 0xffffffffffffffffULL, 3ULL, 0x5555555555555555ULL, 0ULL},
};
#define LEN(ary) (sizeof(ary) / sizeof(ary[0]))
void _m_a_i_n(void) {
int i;
for (i = 0; i < LEN(s_cases); i++) {
struct s_divrem *s = &s_cases[i];
if (s->a / s->b != s->a_div_b)
fail((s->id << 4) | 0xd);
if (s->a % s->b != s->a_rem_b)
fail((s->id << 4) | 0xe);
}
for (i = 0; i < LEN(u_cases); i++) {
struct u_divrem *u = &u_cases[i];
if (u->a / u->b != u->a_div_b)
fail((u->id << 4) | 0xd);
if (u->a % u->b != u->a_rem_b)
fail((u->id << 4) | 0xe);
}
finished();
}
|
859561.c | /*
Copyright (C) 2014, The University of Texas at Austin
This file is part of libflame and is available under the 3-Clause
BSD license, which can be found in the LICENSE file at the top-level
directory, or at http://opensource.org/licenses/BSD-3-Clause
*/
#include "FLAME.h"
#include "FLA_queue_omp.h"
#include "FLA_task_partitioning.h"
#if FLA_MULTITHREADING_MODEL == FLA_OPENMP
#define NUM_THREADS_MAX 128
#define NUM_TAGS_MAX 5
int factors[NUM_THREADS_MAX+1][NUM_TAGS_MAX];
void FLA_Task_partitioning_init()
{
int i;
for( i = 0; i < NUM_THREADS_MAX; ++i )
FLA_Task_partitioning_set( i, -1, -1, -1, -1, -1 );
}
void FLA_Task_partitioning_set( int n_threads, int tag0_val, int tag1_val, int tag2_val, int tag3_val, int tag4_val )
{
factors[n_threads][0] = tag0_val;
factors[n_threads][1] = tag1_val;
factors[n_threads][2] = tag2_val;
factors[n_threads][3] = tag3_val;
factors[n_threads][4] = tag4_val;
}
int FLA_task_get_num_partitions( int n_threads, int tag )
{
return factors[n_threads][tag];
}
int FLA_Task_compute_blocksize( int tag, FLA_Obj A, FLA_Obj A_proc, FLA_Quadrant from )
{
int n_threads = FLA_Queue_get_num_threads();
int A_size, A_proc_size;
int n_part;
int b;
// Determine the sizes of the matrix partitions.
A_size = FLA_task_determine_matrix_size( A, from );
A_proc_size = FLA_task_determine_matrix_size( A_proc, from );
// Determine the raw blocksize value.
n_part = FLA_task_get_num_partitions( n_threads, tag );
// Determine the blocksize based on the sign of the value from
// _get_num_partitions().
if( n_part > 0 )
{
b = FLA_task_determine_absolute_blocksize( A_size,
A_proc_size,
n_part );
}
else if( n_part < 0 )
{
b = FLA_task_determine_relative_blocksize( A_size,
A_proc_size,
f2c_abs(n_part) );
}
else
{
FLA_Print_message( "Detected blocksize of 0!", __FILE__, __LINE__ );
FLA_Abort();
}
return b;
}
int FLA_task_determine_matrix_size( FLA_Obj A, FLA_Quadrant from )
{
int r_val = 0;
// Determine the size of the matrix dimension along which we are moving.
switch( from )
{
case FLA_TOP:
case FLA_BOTTOM:
{
r_val = FLA_Obj_length( A );
break;
}
case FLA_LEFT:
case FLA_RIGHT:
{
r_val = FLA_Obj_width( A );
break;
}
case FLA_TL:
case FLA_TR:
case FLA_BL:
case FLA_BR:
{
// If A happens to be the full object, we need to use min_dim() here
// because the matrix might be rectangular. If A is the processed
// partition, it is very probably square, and min_dim() doesn't hurt.
r_val = FLA_Obj_min_dim( A );
break;
}
default:
FLA_Print_message( "Unexpected default in switch statement!", __FILE__, __LINE__ );
FLA_Abort();
}
return r_val;
}
int FLA_task_determine_relative_blocksize( int A_size, int A_proc_size, int n_part )
{
int b, i, z;
// Return early if the size is zero (scenario (A)).
if( A_size == 0 ) return 0;
// Compute the base blocksize (according to (1) above).
b = A_size / n_part;
// If the base blocksize is zero (because A_size < n_part), then override
// n_part and proceed as if only one partition was requested (scenario (B)).
if( b == 0 )
{
n_part = 1;
b = A_size;
}
// Compute partition index i.
i = A_proc_size / b;
// Compute the index z below which we will use the base blocksize.
z = n_part - (A_size % n_part);
// If the current partition index i is at least z, then increment the
// base blocksize (according to (2) above).
if( z <= i ) b++;
// Return the blocksize.
return b;
}
int FLA_task_determine_absolute_blocksize( int A_size, int A_proc_size, int nb_alg )
{
int A_unproc_size = A_size - A_proc_size;
int b;
b = min( A_unproc_size, nb_alg );
return b;
}
#endif
|
665678.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
vint8mf8_t op2, size_t vl) {
return vwmacc_vv_i16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
size_t vl) {
return vwmacc_vx_i16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
vint8mf4_t op2, size_t vl) {
return vwmacc_vv_i16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
size_t vl) {
return vwmacc_vx_i16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
size_t vl) {
return vwmacc_vv_i16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
size_t vl) {
return vwmacc_vx_i16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
size_t vl) {
return vwmacc_vv_i16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
size_t vl) {
return vwmacc_vx_i16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
size_t vl) {
return vwmacc_vv_i16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
size_t vl) {
return vwmacc_vx_i16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
size_t vl) {
return vwmacc_vv_i16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
size_t vl) {
return vwmacc_vx_i16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
vint16mf4_t op2, size_t vl) {
return vwmacc_vv_i32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
size_t vl) {
return vwmacc_vx_i32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
vint16mf2_t op2, size_t vl) {
return vwmacc_vv_i32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
size_t vl) {
return vwmacc_vx_i32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
size_t vl) {
return vwmacc_vv_i32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
size_t vl) {
return vwmacc_vx_i32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
size_t vl) {
return vwmacc_vv_i32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
size_t vl) {
return vwmacc_vx_i32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
size_t vl) {
return vwmacc_vv_i32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
size_t vl) {
return vwmacc_vx_i32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
vint32mf2_t op2, size_t vl) {
return vwmacc_vv_i64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
size_t vl) {
return vwmacc_vx_i64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
size_t vl) {
return vwmacc_vv_i64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
size_t vl) {
return vwmacc_vx_i64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
size_t vl) {
return vwmacc_vv_i64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
size_t vl) {
return vwmacc_vx_i64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
size_t vl) {
return vwmacc_vv_i64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
size_t vl) {
return vwmacc_vx_i64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
vuint8mf8_t op2, size_t vl) {
return vwmaccu_vv_u16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
vuint8mf8_t op2, size_t vl) {
return vwmaccu_vx_u16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
vuint8mf4_t op2, size_t vl) {
return vwmaccu_vv_u16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
vuint8mf4_t op2, size_t vl) {
return vwmaccu_vx_u16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
vuint8mf2_t op2, size_t vl) {
return vwmaccu_vv_u16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
size_t vl) {
return vwmaccu_vx_u16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
vuint8m1_t op2, size_t vl) {
return vwmaccu_vv_u16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
size_t vl) {
return vwmaccu_vx_u16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
vuint8m2_t op2, size_t vl) {
return vwmaccu_vv_u16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
size_t vl) {
return vwmaccu_vx_u16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
vuint8m4_t op2, size_t vl) {
return vwmaccu_vv_u16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
size_t vl) {
return vwmaccu_vx_u16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
vuint16mf4_t op2, size_t vl) {
return vwmaccu_vv_u32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
vuint16mf4_t op2, size_t vl) {
return vwmaccu_vx_u32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
vuint16mf2_t op2, size_t vl) {
return vwmaccu_vv_u32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
vuint16mf2_t op2, size_t vl) {
return vwmaccu_vx_u32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
vuint16m1_t op2, size_t vl) {
return vwmaccu_vv_u32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
vuint16m1_t op2, size_t vl) {
return vwmaccu_vx_u32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
vuint16m2_t op2, size_t vl) {
return vwmaccu_vv_u32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
vuint16m2_t op2, size_t vl) {
return vwmaccu_vx_u32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
vuint16m4_t op2, size_t vl) {
return vwmaccu_vv_u32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
vuint16m4_t op2, size_t vl) {
return vwmaccu_vx_u32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
vuint32mf2_t op2, size_t vl) {
return vwmaccu_vv_u64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
vuint32mf2_t op2, size_t vl) {
return vwmaccu_vx_u64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
vuint32m1_t op2, size_t vl) {
return vwmaccu_vv_u64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
vuint32m1_t op2, size_t vl) {
return vwmaccu_vx_u64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
vuint32m2_t op2, size_t vl) {
return vwmaccu_vv_u64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
vuint32m2_t op2, size_t vl) {
return vwmaccu_vx_u64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
vuint32m4_t op2, size_t vl) {
return vwmaccu_vv_u64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
vuint32m4_t op2, size_t vl) {
return vwmaccu_vx_u64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
vuint8mf8_t op2, size_t vl) {
return vwmaccsu_vv_i16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
vuint8mf8_t op2, size_t vl) {
return vwmaccsu_vx_i16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
vuint8mf4_t op2, size_t vl) {
return vwmaccsu_vv_i16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
vuint8mf4_t op2, size_t vl) {
return vwmaccsu_vx_i16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
vuint8mf2_t op2, size_t vl) {
return vwmaccsu_vv_i16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
size_t vl) {
return vwmaccsu_vx_i16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
size_t vl) {
return vwmaccsu_vv_i16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
size_t vl) {
return vwmaccsu_vx_i16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
size_t vl) {
return vwmaccsu_vv_i16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
size_t vl) {
return vwmaccsu_vx_i16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
size_t vl) {
return vwmaccsu_vv_i16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
size_t vl) {
return vwmaccsu_vx_i16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
vuint16mf4_t op2, size_t vl) {
return vwmaccsu_vv_i32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
vuint16mf4_t op2, size_t vl) {
return vwmaccsu_vx_i32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
vuint16mf2_t op2, size_t vl) {
return vwmaccsu_vv_i32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
size_t vl) {
return vwmaccsu_vx_i32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
vuint16m1_t op2, size_t vl) {
return vwmaccsu_vv_i32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
size_t vl) {
return vwmaccsu_vx_i32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
vuint16m2_t op2, size_t vl) {
return vwmaccsu_vv_i32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
size_t vl) {
return vwmaccsu_vx_i32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
vuint16m4_t op2, size_t vl) {
return vwmaccsu_vv_i32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
size_t vl) {
return vwmaccsu_vx_i32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
vuint32mf2_t op2, size_t vl) {
return vwmaccsu_vv_i64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
size_t vl) {
return vwmaccsu_vx_i64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
vuint32m1_t op2, size_t vl) {
return vwmaccsu_vv_i64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
size_t vl) {
return vwmaccsu_vx_i64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
vuint32m2_t op2, size_t vl) {
return vwmaccsu_vv_i64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
size_t vl) {
return vwmaccsu_vx_i64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
vuint32m4_t op2, size_t vl) {
return vwmaccsu_vv_i64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
size_t vl) {
return vwmaccsu_vx_i64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
vint8mf8_t op2, size_t vl) {
return vwmaccus_vx_i16mf4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
vint8mf4_t op2, size_t vl) {
return vwmaccus_vx_i16mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
size_t vl) {
return vwmaccus_vx_i16m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
size_t vl) {
return vwmaccus_vx_i16m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
size_t vl) {
return vwmaccus_vx_i16m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
size_t vl) {
return vwmaccus_vx_i16m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
vint16mf4_t op2, size_t vl) {
return vwmaccus_vx_i32mf2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
size_t vl) {
return vwmaccus_vx_i32m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
size_t vl) {
return vwmaccus_vx_i32m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
size_t vl) {
return vwmaccus_vx_i32m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
size_t vl) {
return vwmaccus_vx_i32m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
size_t vl) {
return vwmaccus_vx_i64m1(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
size_t vl) {
return vwmaccus_vx_i64m2(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
size_t vl) {
return vwmaccus_vx_i64m4(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
size_t vl) {
return vwmaccus_vx_i64m8(acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vwmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
vint8mf8_t op2, size_t vl) {
return vwmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
return vwmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
vint8mf4_t op2, size_t vl) {
return vwmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
return vwmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
vint8mf2_t op2, size_t vl) {
return vwmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
vint8m1_t op2, size_t vl) {
return vwmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
vint8m1_t op2, size_t vl) {
return vwmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
vint8m2_t op2, size_t vl) {
return vwmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
vint8m2_t op2, size_t vl) {
return vwmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
vint8m4_t op2, size_t vl) {
return vwmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
vint8m4_t op2, size_t vl) {
return vwmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
vint16mf4_t op1, vint16mf4_t op2,
size_t vl) {
return vwmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
int16_t op1, vint16mf4_t op2, size_t vl) {
return vwmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
return vwmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
vint16mf2_t op2, size_t vl) {
return vwmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
vint16m1_t op1, vint16m1_t op2, size_t vl) {
return vwmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
vint16m1_t op2, size_t vl) {
return vwmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
vint16m2_t op2, size_t vl) {
return vwmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
vint16m2_t op2, size_t vl) {
return vwmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
vint16m4_t op2, size_t vl) {
return vwmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
vint16m4_t op2, size_t vl) {
return vwmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vwmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
vint32mf2_t op2, size_t vl) {
return vwmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
vint32m1_t op1, vint32m1_t op2, size_t vl) {
return vwmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
vint32m1_t op2, size_t vl) {
return vwmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
vint32m2_t op1, vint32m2_t op2, size_t vl) {
return vwmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
vint32m2_t op2, size_t vl) {
return vwmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
vint32m4_t op2, size_t vl) {
return vwmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
vint32m4_t op2, size_t vl) {
return vwmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
vuint8mf8_t op1, vuint8mf8_t op2,
size_t vl) {
return vwmaccu_vv_u16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
uint8_t op1, vuint8mf8_t op2, size_t vl) {
return vwmaccu_vx_u16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
vuint8mf4_t op1, vuint8mf4_t op2,
size_t vl) {
return vwmaccu_vv_u16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
uint8_t op1, vuint8mf4_t op2, size_t vl) {
return vwmaccu_vx_u16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
vuint8mf2_t op1, vuint8mf2_t op2,
size_t vl) {
return vwmaccu_vv_u16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
uint8_t op1, vuint8mf2_t op2, size_t vl) {
return vwmaccu_vx_u16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
return vwmaccu_vv_u16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
vuint8m1_t op2, size_t vl) {
return vwmaccu_vx_u16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
return vwmaccu_vv_u16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
vuint8m2_t op2, size_t vl) {
return vwmaccu_vx_u16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
return vwmaccu_vv_u16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
vuint8m4_t op2, size_t vl) {
return vwmaccu_vx_u16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
vuint16mf4_t op1, vuint16mf4_t op2,
size_t vl) {
return vwmaccu_vv_u32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
uint16_t op1, vuint16mf4_t op2,
size_t vl) {
return vwmaccu_vx_u32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
vuint16mf2_t op1, vuint16mf2_t op2,
size_t vl) {
return vwmaccu_vv_u32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
uint16_t op1, vuint16mf2_t op2, size_t vl) {
return vwmaccu_vx_u32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
vuint16m1_t op1, vuint16m1_t op2,
size_t vl) {
return vwmaccu_vv_u32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
uint16_t op1, vuint16m1_t op2, size_t vl) {
return vwmaccu_vx_u32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
vuint16m2_t op1, vuint16m2_t op2,
size_t vl) {
return vwmaccu_vv_u32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
uint16_t op1, vuint16m2_t op2, size_t vl) {
return vwmaccu_vx_u32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
vuint16m4_t op1, vuint16m4_t op2,
size_t vl) {
return vwmaccu_vv_u32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
uint16_t op1, vuint16m4_t op2, size_t vl) {
return vwmaccu_vx_u32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
vuint32mf2_t op1, vuint32mf2_t op2,
size_t vl) {
return vwmaccu_vv_u64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
uint32_t op1, vuint32mf2_t op2, size_t vl) {
return vwmaccu_vx_u64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
vuint32m1_t op1, vuint32m1_t op2,
size_t vl) {
return vwmaccu_vv_u64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
uint32_t op1, vuint32m1_t op2, size_t vl) {
return vwmaccu_vx_u64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
vuint32m2_t op1, vuint32m2_t op2,
size_t vl) {
return vwmaccu_vv_u64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
uint32_t op1, vuint32m2_t op2, size_t vl) {
return vwmaccu_vx_u64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
vuint32m4_t op1, vuint32m4_t op2,
size_t vl) {
return vwmaccu_vv_u64m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
uint32_t op1, vuint32m4_t op2, size_t vl) {
return vwmaccu_vx_u64m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
vint8mf8_t op1, vuint8mf8_t op2,
size_t vl) {
return vwmaccsu_vv_i16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
int8_t op1, vuint8mf8_t op2, size_t vl) {
return vwmaccsu_vx_i16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
vint8mf4_t op1, vuint8mf4_t op2,
size_t vl) {
return vwmaccsu_vv_i16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
int8_t op1, vuint8mf4_t op2, size_t vl) {
return vwmaccsu_vx_i16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
vint8mf2_t op1, vuint8mf2_t op2,
size_t vl) {
return vwmaccsu_vv_i16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
vuint8mf2_t op2, size_t vl) {
return vwmaccsu_vx_i16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
vint8m1_t op1, vuint8m1_t op2, size_t vl) {
return vwmaccsu_vv_i16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
vuint8m1_t op2, size_t vl) {
return vwmaccsu_vx_i16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
vint8m2_t op1, vuint8m2_t op2, size_t vl) {
return vwmaccsu_vv_i16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
vuint8m2_t op2, size_t vl) {
return vwmaccsu_vx_i16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
vint8m4_t op1, vuint8m4_t op2, size_t vl) {
return vwmaccsu_vv_i16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
vuint8m4_t op2, size_t vl) {
return vwmaccsu_vx_i16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
vint16mf4_t op1, vuint16mf4_t op2,
size_t vl) {
return vwmaccsu_vv_i32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
int16_t op1, vuint16mf4_t op2,
size_t vl) {
return vwmaccsu_vx_i32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
vint16mf2_t op1, vuint16mf2_t op2,
size_t vl) {
return vwmaccsu_vv_i32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
vuint16mf2_t op2, size_t vl) {
return vwmaccsu_vx_i32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
vint16m1_t op1, vuint16m1_t op2,
size_t vl) {
return vwmaccsu_vv_i32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
vuint16m1_t op2, size_t vl) {
return vwmaccsu_vx_i32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
vint16m2_t op1, vuint16m2_t op2,
size_t vl) {
return vwmaccsu_vv_i32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
vuint16m2_t op2, size_t vl) {
return vwmaccsu_vx_i32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
vint16m4_t op1, vuint16m4_t op2,
size_t vl) {
return vwmaccsu_vv_i32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
vuint16m4_t op2, size_t vl) {
return vwmaccsu_vx_i32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
vint32mf2_t op1, vuint32mf2_t op2,
size_t vl) {
return vwmaccsu_vv_i64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
vuint32mf2_t op2, size_t vl) {
return vwmaccsu_vx_i64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
vint32m1_t op1, vuint32m1_t op2,
size_t vl) {
return vwmaccsu_vv_i64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
vuint32m1_t op2, size_t vl) {
return vwmaccsu_vx_i64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
vint32m2_t op1, vuint32m2_t op2,
size_t vl) {
return vwmaccsu_vv_i64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
vuint32m2_t op2, size_t vl) {
return vwmaccsu_vx_i64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
vint32m4_t op1, vuint32m4_t op2,
size_t vl) {
return vwmaccsu_vv_i64m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
vuint32m4_t op2, size_t vl) {
return vwmaccsu_vx_i64m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
uint8_t op1, vint8mf8_t op2, size_t vl) {
return vwmaccus_vx_i16mf4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
uint8_t op1, vint8mf4_t op2, size_t vl) {
return vwmaccus_vx_i16mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
vint8mf2_t op2, size_t vl) {
return vwmaccus_vx_i16m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
vint8m1_t op2, size_t vl) {
return vwmaccus_vx_i16m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
vint8m2_t op2, size_t vl) {
return vwmaccus_vx_i16m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
vint8m4_t op2, size_t vl) {
return vwmaccus_vx_i16m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
uint16_t op1, vint16mf4_t op2,
size_t vl) {
return vwmaccus_vx_i32mf2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
uint16_t op1, vint16mf2_t op2, size_t vl) {
return vwmaccus_vx_i32m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
uint16_t op1, vint16m1_t op2, size_t vl) {
return vwmaccus_vx_i32m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
vint16m2_t op2, size_t vl) {
return vwmaccus_vx_i32m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
vint16m4_t op2, size_t vl) {
return vwmaccus_vx_i32m8_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
uint32_t op1, vint32mf2_t op2, size_t vl) {
return vwmaccus_vx_i64m1_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
uint32_t op1, vint32m1_t op2, size_t vl) {
return vwmaccus_vx_i64m2_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
uint32_t op1, vint32m2_t op2, size_t vl) {
return vwmaccus_vx_i64m4_m(mask, acc, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,
vint32m4_t op2, size_t vl) {
return vwmaccus_vx_i64m8_m(mask, acc, op1, op2, vl);
}
|
997714.c | /*
* Copyright (c) 2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <sys/errno.h>
#include <mach/mach_types.h>
#include <mach/mach_traps.h>
#include <mach/host_priv.h>
#include <mach/kern_return.h>
#include <mach/memory_object_control.h>
#include <mach/memory_object_types.h>
#include <mach/port.h>
#include <mach/policy.h>
#include <mach/upl.h>
#include <mach/thread_act.h>
#include <mach/mach_vm.h>
#include <kern/host.h>
#include <kern/kalloc.h>
#include <kern/queue.h>
#include <kern/thread.h>
#include <kern/ipc_kobject.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#include <vm/memory_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <vm/vm_shared_region.h>
/*
* SHARED REGION MEMORY PAGER
*
* This external memory manager (EMM) handles mappings of a dyld shared cache
* in shared regions, applying any necessary modifications (sliding,
* pointer signing, ...).
*
* It mostly handles page-in requests (from memory_object_data_request()) by
* getting the original data from its backing VM object, itself backed by
* the dyld shared cache file, modifying it if needed and providing it to VM.
*
* The modified pages will never be dirtied, so the memory manager doesn't
* need to handle page-out requests (from memory_object_data_return()). The
* pages need to be mapped copy-on-write, so that the originals stay clean.
*
* We don't expect to have to handle a large number of shared cache files,
* so the data structures are very simple (simple linked list) for now.
*/
/* forward declarations */
void shared_region_pager_reference(memory_object_t mem_obj);
void shared_region_pager_deallocate(memory_object_t mem_obj);
kern_return_t shared_region_pager_init(memory_object_t mem_obj,
memory_object_control_t control,
memory_object_cluster_size_t pg_size);
kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
vm_prot_t protection_required,
memory_object_fault_info_t fault_info);
kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t data_cnt,
memory_object_offset_t *resid_offset,
int *io_error,
boolean_t dirty,
boolean_t kernel_copy,
int upl_flags);
kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t data_cnt);
kern_return_t shared_region_pager_data_unlock(memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_size_t size,
vm_prot_t desired_access);
kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_size_t length,
vm_sync_t sync_flags);
kern_return_t shared_region_pager_map(memory_object_t mem_obj,
vm_prot_t prot);
kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
/*
* Vector of VM operations for this EMM.
* These routines are invoked by VM via the memory_object_*() interfaces.
*/
const struct memory_object_pager_ops shared_region_pager_ops = {
.memory_object_reference = shared_region_pager_reference,
.memory_object_deallocate = shared_region_pager_deallocate,
.memory_object_init = shared_region_pager_init,
.memory_object_terminate = shared_region_pager_terminate,
.memory_object_data_request = shared_region_pager_data_request,
.memory_object_data_return = shared_region_pager_data_return,
.memory_object_data_initialize = shared_region_pager_data_initialize,
.memory_object_data_unlock = shared_region_pager_data_unlock,
.memory_object_synchronize = shared_region_pager_synchronize,
.memory_object_map = shared_region_pager_map,
.memory_object_last_unmap = shared_region_pager_last_unmap,
.memory_object_data_reclaim = NULL,
.memory_object_pager_name = "shared_region"
};
/*
* The "shared_region_pager" describes a memory object backed by
* the "shared_region" EMM.
*/
typedef struct shared_region_pager {
/* mandatory generic header */
struct memory_object sc_pgr_hdr;
/* pager-specific data */
queue_chain_t pager_queue; /* next & prev pagers */
unsigned int ref_count; /* reference count */
boolean_t is_ready; /* is this pager ready ? */
boolean_t is_mapped; /* is this mem_obj mapped ? */
vm_object_t backing_object; /* VM obj for shared cache */
vm_object_offset_t backing_offset;
struct vm_shared_region_slide_info *scp_slide_info;
} *shared_region_pager_t;
#define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL)
/*
* List of memory objects managed by this EMM.
* The list is protected by the "shared_region_pager_lock" lock.
*/
int shared_region_pager_count = 0; /* number of pagers */
int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */
queue_head_t shared_region_pager_queue;
decl_lck_mtx_data(, shared_region_pager_lock);
/*
* Maximum number of unmapped pagers we're willing to keep around.
*/
int shared_region_pager_cache_limit = 0;
/*
* Statistics & counters.
*/
int shared_region_pager_count_max = 0;
int shared_region_pager_count_unmapped_max = 0;
int shared_region_pager_num_trim_max = 0;
int shared_region_pager_num_trim_total = 0;
lck_grp_t shared_region_pager_lck_grp;
lck_grp_attr_t shared_region_pager_lck_grp_attr;
lck_attr_t shared_region_pager_lck_attr;
uint64_t shared_region_pager_copied = 0;
uint64_t shared_region_pager_slid = 0;
uint64_t shared_region_pager_slid_error = 0;
uint64_t shared_region_pager_reclaimed = 0;
/* internal prototypes */
shared_region_pager_t shared_region_pager_create(
vm_object_t backing_object,
vm_object_offset_t backing_offset,
struct vm_shared_region_slide_info *slide_info);
shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
void shared_region_pager_dequeue(shared_region_pager_t pager);
void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
boolean_t locked);
void shared_region_pager_terminate_internal(shared_region_pager_t pager);
void shared_region_pager_trim(void);
#if DEBUG
int shared_region_pagerdebug = 0;
#define PAGER_ALL 0xffffffff
#define PAGER_INIT 0x00000001
#define PAGER_PAGEIN 0x00000002
#define PAGER_DEBUG(LEVEL, A) \
MACRO_BEGIN \
if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \
printf A; \
} \
MACRO_END
#else
#define PAGER_DEBUG(LEVEL, A)
#endif
void
shared_region_pager_bootstrap(void)
{
lck_grp_attr_setdefault(&shared_region_pager_lck_grp_attr);
lck_grp_init(&shared_region_pager_lck_grp, "shared_region", &shared_region_pager_lck_grp_attr);
lck_attr_setdefault(&shared_region_pager_lck_attr);
lck_mtx_init(&shared_region_pager_lock, &shared_region_pager_lck_grp, &shared_region_pager_lck_attr);
queue_init(&shared_region_pager_queue);
}
/*
* shared_region_pager_init()
*
* Initialize the memory object and makes it ready to be used and mapped.
*/
kern_return_t
shared_region_pager_init(
memory_object_t mem_obj,
memory_object_control_t control,
#if !DEBUG
__unused
#endif
memory_object_cluster_size_t pg_size)
{
shared_region_pager_t pager;
kern_return_t kr;
memory_object_attr_info_data_t attributes;
PAGER_DEBUG(PAGER_ALL,
("shared_region_pager_init: %p, %p, %x\n",
mem_obj, control, pg_size));
if (control == MEMORY_OBJECT_CONTROL_NULL) {
return KERN_INVALID_ARGUMENT;
}
pager = shared_region_pager_lookup(mem_obj);
memory_object_control_reference(control);
pager->sc_pgr_hdr.mo_control = control;
attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
attributes.cluster_size = (1 << (PAGE_SHIFT));
attributes.may_cache_object = FALSE;
attributes.temporary = TRUE;
kr = memory_object_change_attributes(
control,
MEMORY_OBJECT_ATTRIBUTE_INFO,
(memory_object_info_t) &attributes,
MEMORY_OBJECT_ATTR_INFO_COUNT);
if (kr != KERN_SUCCESS) {
panic("shared_region_pager_init: "
"memory_object_change_attributes() failed");
}
#if CONFIG_SECLUDED_MEMORY
if (secluded_for_filecache) {
#if 00
/*
* XXX FBDP do we want this in the secluded pool?
* Ideally, we'd want the shared region used by Camera to
* NOT be in the secluded pool, but all other shared regions
* in the secluded pool...
*/
memory_object_mark_eligible_for_secluded(control, TRUE);
#endif /* 00 */
}
#endif /* CONFIG_SECLUDED_MEMORY */
return KERN_SUCCESS;
}
/*
* shared_region_data_return()
*
* Handles page-out requests from VM. This should never happen since
* the pages provided by this EMM are not supposed to be dirty or dirtied
* and VM should simply discard the contents and reclaim the pages if it
* needs to.
*/
kern_return_t
shared_region_pager_data_return(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_cluster_size_t data_cnt,
__unused memory_object_offset_t *resid_offset,
__unused int *io_error,
__unused boolean_t dirty,
__unused boolean_t kernel_copy,
__unused int upl_flags)
{
panic("shared_region_pager_data_return: should never get called");
return KERN_FAILURE;
}
kern_return_t
shared_region_pager_data_initialize(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_cluster_size_t data_cnt)
{
panic("shared_region_pager_data_initialize: should never get called");
return KERN_FAILURE;
}
kern_return_t
shared_region_pager_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t size,
__unused vm_prot_t desired_access)
{
return KERN_FAILURE;
}
/*
* shared_region_pager_data_request()
*
* Handles page-in requests from VM.
*/
int shared_region_pager_data_request_debug = 0;
kern_return_t
shared_region_pager_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
memory_object_cluster_size_t length,
#if !DEBUG
__unused
#endif
vm_prot_t protection_required,
memory_object_fault_info_t mo_fault_info)
{
shared_region_pager_t pager;
memory_object_control_t mo_control;
upl_t upl;
int upl_flags;
upl_size_t upl_size;
upl_page_info_t *upl_pl;
unsigned int pl_count;
vm_object_t src_top_object, src_page_object, dst_object;
kern_return_t kr, retval;
vm_offset_t src_vaddr, dst_vaddr;
vm_offset_t cur_offset;
vm_offset_t offset_in_page;
kern_return_t error_code;
vm_prot_t prot;
vm_page_t src_page, top_page;
int interruptible;
struct vm_object_fault_info fault_info;
mach_vm_offset_t slide_start_address;
PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
retval = KERN_SUCCESS;
src_top_object = VM_OBJECT_NULL;
src_page_object = VM_OBJECT_NULL;
upl = NULL;
upl_pl = NULL;
fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
fault_info.stealth = TRUE;
fault_info.io_sync = FALSE;
fault_info.mark_zf_absent = FALSE;
fault_info.batch_pmap_op = FALSE;
interruptible = fault_info.interruptible;
pager = shared_region_pager_lookup(mem_obj);
assert(pager->is_ready);
assert(pager->ref_count > 1); /* pager is alive and mapped */
PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
/*
* Gather in a UPL all the VM pages requested by VM.
*/
mo_control = pager->sc_pgr_hdr.mo_control;
upl_size = length;
upl_flags =
UPL_RET_ONLY_ABSENT |
UPL_SET_LITE |
UPL_NO_SYNC |
UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
UPL_SET_INTERNAL;
pl_count = 0;
kr = memory_object_upl_request(mo_control,
offset, upl_size,
&upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
if (kr != KERN_SUCCESS) {
retval = kr;
goto done;
}
dst_object = mo_control->moc_object;
assert(dst_object != VM_OBJECT_NULL);
/*
* We'll map the original data in the kernel address space from the
* backing VM object (itself backed by the shared cache file via
* the vnode pager).
*/
src_top_object = pager->backing_object;
assert(src_top_object != VM_OBJECT_NULL);
vm_object_reference(src_top_object); /* keep the source object alive */
slide_start_address = pager->scp_slide_info->slid_address;
fault_info.lo_offset += pager->backing_offset;
fault_info.hi_offset += pager->backing_offset;
/*
* Fill in the contents of the pages requested by VM.
*/
upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
pl_count = length / PAGE_SIZE;
for (cur_offset = 0;
retval == KERN_SUCCESS && cur_offset < length;
cur_offset += PAGE_SIZE) {
ppnum_t dst_pnum;
if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
/* this page is not in the UPL: skip it */
continue;
}
/*
* Map the source (dyld shared cache) page in the kernel's
* virtual address space.
* We already hold a reference on the src_top_object.
*/
retry_src_fault:
vm_object_lock(src_top_object);
vm_object_paging_begin(src_top_object);
error_code = 0;
prot = VM_PROT_READ;
src_page = VM_PAGE_NULL;
kr = vm_fault_page(src_top_object,
pager->backing_offset + offset + cur_offset,
VM_PROT_READ,
FALSE,
FALSE, /* src_page not looked up */
&prot,
&src_page,
&top_page,
NULL,
&error_code,
FALSE,
FALSE,
&fault_info);
switch (kr) {
case VM_FAULT_SUCCESS:
break;
case VM_FAULT_RETRY:
goto retry_src_fault;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait(interruptible)) {
goto retry_src_fault;
}
/* fall thru */
case VM_FAULT_INTERRUPTED:
retval = MACH_SEND_INTERRUPTED;
goto done;
case VM_FAULT_SUCCESS_NO_VM_PAGE:
/* success but no VM page: fail */
vm_object_paging_end(src_top_object);
vm_object_unlock(src_top_object);
/*FALLTHROUGH*/
case VM_FAULT_MEMORY_ERROR:
/* the page is not there ! */
if (error_code) {
retval = error_code;
} else {
retval = KERN_MEMORY_ERROR;
}
goto done;
default:
panic("shared_region_pager_data_request: "
"vm_fault_page() unexpected error 0x%x\n",
kr);
}
assert(src_page != VM_PAGE_NULL);
assert(src_page->vmp_busy);
if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
vm_page_lockspin_queues();
if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
vm_page_speculate(src_page, FALSE);
}
vm_page_unlock_queues();
}
/*
* Establish pointers to the source
* and destination physical pages.
*/
dst_pnum = (ppnum_t)
upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
assert(dst_pnum != 0);
src_vaddr = (vm_map_offset_t)
phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
<< PAGE_SHIFT);
dst_vaddr = (vm_map_offset_t)
phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
src_page_object = VM_PAGE_OBJECT(src_page);
/*
* Validate the original page...
*/
if (src_page_object->code_signed) {
vm_page_validate_cs_mapped(
src_page,
(const void *) src_vaddr);
}
/*
* ... and transfer the results to the destination page.
*/
UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
src_page->vmp_cs_validated);
UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
src_page->vmp_cs_tainted);
UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
src_page->vmp_cs_nx);
/*
* The page provider might access a mapped file, so let's
* release the object lock for the source page to avoid a
* potential deadlock.
* The source page is kept busy and we have a
* "paging_in_progress" reference on its object, so it's safe
* to unlock the object here.
*/
assert(src_page->vmp_busy);
assert(src_page_object->paging_in_progress > 0);
vm_object_unlock(src_page_object);
/*
* Process the original contents of the source page
* into the destination page.
*/
for (offset_in_page = 0;
offset_in_page < PAGE_SIZE;
offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) {
vm_object_offset_t chunk_offset;
vm_object_offset_t offset_in_backing_object;
vm_object_offset_t offset_in_sliding_range;
chunk_offset = offset + cur_offset + offset_in_page;
bcopy((const char *)(src_vaddr +
offset_in_page),
(char *)(dst_vaddr + offset_in_page),
PAGE_SIZE_FOR_SR_SLIDE);
offset_in_backing_object = (chunk_offset +
pager->backing_offset);
if ((offset_in_backing_object < pager->scp_slide_info->start) ||
(offset_in_backing_object >= pager->scp_slide_info->end)) {
/* chunk is outside of sliding range: done */
shared_region_pager_copied++;
continue;
}
offset_in_sliding_range =
(offset_in_backing_object -
pager->scp_slide_info->start);
kr = vm_shared_region_slide_page(
pager->scp_slide_info,
dst_vaddr + offset_in_page,
(mach_vm_offset_t) (offset_in_sliding_range +
slide_start_address),
(uint32_t) (offset_in_sliding_range /
PAGE_SIZE_FOR_SR_SLIDE));
if (shared_region_pager_data_request_debug) {
printf("shared_region_data_request"
"(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
"in sliding range [0x%llx:0x%llx]: "
"SLIDE offset 0x%llx="
"(0x%llx+0x%llx+0x%llx+0x%04llx)"
"[0x%016llx 0x%016llx] "
"code_signed=%d "
"cs_validated=%d "
"cs_tainted=%d "
"cs_nx=%d "
"kr=0x%x\n",
pager,
offset,
(uint64_t) cur_offset,
(uint64_t) offset_in_page,
chunk_offset,
pager->scp_slide_info->start,
pager->scp_slide_info->end,
(pager->backing_offset +
offset +
cur_offset +
offset_in_page),
pager->backing_offset,
offset,
(uint64_t) cur_offset,
(uint64_t) offset_in_page,
*(uint64_t *)(dst_vaddr + offset_in_page),
*(uint64_t *)(dst_vaddr + offset_in_page + 8),
src_page_object->code_signed,
src_page->vmp_cs_validated,
src_page->vmp_cs_tainted,
src_page->vmp_cs_nx,
kr);
}
if (kr != KERN_SUCCESS) {
shared_region_pager_slid_error++;
break;
}
shared_region_pager_slid++;
}
assert(VM_PAGE_OBJECT(src_page) == src_page_object);
assert(src_page->vmp_busy);
assert(src_page_object->paging_in_progress > 0);
vm_object_lock(src_page_object);
/*
* Cleanup the result of vm_fault_page() of the source page.
*/
PAGE_WAKEUP_DONE(src_page);
src_page = VM_PAGE_NULL;
vm_object_paging_end(src_page_object);
vm_object_unlock(src_page_object);
if (top_page != VM_PAGE_NULL) {
assert(VM_PAGE_OBJECT(top_page) == src_top_object);
vm_object_lock(src_top_object);
VM_PAGE_FREE(top_page);
vm_object_paging_end(src_top_object);
vm_object_unlock(src_top_object);
}
}
done:
if (upl != NULL) {
/* clean up the UPL */
/*
* The pages are currently dirty because we've just been
* writing on them, but as far as we're concerned, they're
* clean since they contain their "original" contents as
* provided by us, the pager.
* Tell the UPL to mark them "clean".
*/
upl_clear_dirty(upl, TRUE);
/* abort or commit the UPL */
if (retval != KERN_SUCCESS) {
upl_abort(upl, 0);
} else {
boolean_t empty;
upl_commit_range(upl, 0, upl->size,
UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
upl_pl, pl_count, &empty);
}
/* and deallocate the UPL */
upl_deallocate(upl);
upl = NULL;
}
if (src_top_object != VM_OBJECT_NULL) {
vm_object_deallocate(src_top_object);
}
return retval;
}
/*
* shared_region_pager_reference()
*
* Get a reference on this memory object.
* For external usage only. Assumes that the initial reference count is not 0,
* i.e one should not "revive" a dead pager this way.
*/
void
shared_region_pager_reference(
memory_object_t mem_obj)
{
shared_region_pager_t pager;
pager = shared_region_pager_lookup(mem_obj);
lck_mtx_lock(&shared_region_pager_lock);
assert(pager->ref_count > 0);
pager->ref_count++;
lck_mtx_unlock(&shared_region_pager_lock);
}
/*
* shared_region_pager_dequeue:
*
* Removes a pager from the list of pagers.
*
* The caller must hold "shared_region_pager_lock".
*/
void
shared_region_pager_dequeue(
shared_region_pager_t pager)
{
assert(!pager->is_mapped);
queue_remove(&shared_region_pager_queue,
pager,
shared_region_pager_t,
pager_queue);
pager->pager_queue.next = NULL;
pager->pager_queue.prev = NULL;
shared_region_pager_count--;
}
/*
* shared_region_pager_terminate_internal:
*
* Trigger the asynchronous termination of the memory object associated
* with this pager.
* When the memory object is terminated, there will be one more call
* to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
* to finish the clean up.
*
* "shared_region_pager_lock" should not be held by the caller.
* We don't need the lock because the pager has already been removed from
* the pagers' list and is now ours exclusively.
*/
void
shared_region_pager_terminate_internal(
shared_region_pager_t pager)
{
assert(pager->is_ready);
assert(!pager->is_mapped);
if (pager->backing_object != VM_OBJECT_NULL) {
vm_object_deallocate(pager->backing_object);
pager->backing_object = VM_OBJECT_NULL;
}
/* trigger the destruction of the memory object */
memory_object_destroy(pager->sc_pgr_hdr.mo_control, 0);
}
/*
* shared_region_pager_deallocate_internal()
*
* Release a reference on this pager and free it when the last
* reference goes away.
* Can be called with shared_region_pager_lock held or not but always returns
* with it unlocked.
*/
void
shared_region_pager_deallocate_internal(
shared_region_pager_t pager,
boolean_t locked)
{
boolean_t needs_trimming;
int count_unmapped;
if (!locked) {
lck_mtx_lock(&shared_region_pager_lock);
}
count_unmapped = (shared_region_pager_count -
shared_region_pager_count_mapped);
if (count_unmapped > shared_region_pager_cache_limit) {
/* we have too many unmapped pagers: trim some */
needs_trimming = TRUE;
} else {
needs_trimming = FALSE;
}
/* drop a reference on this pager */
pager->ref_count--;
if (pager->ref_count == 1) {
/*
* Only the "named" reference is left, which means that
* no one is really holding on to this pager anymore.
* Terminate it.
*/
shared_region_pager_dequeue(pager);
/* the pager is all ours: no need for the lock now */
lck_mtx_unlock(&shared_region_pager_lock);
shared_region_pager_terminate_internal(pager);
} else if (pager->ref_count == 0) {
/*
* Dropped the existence reference; the memory object has
* been terminated. Do some final cleanup and release the
* pager structure.
*/
lck_mtx_unlock(&shared_region_pager_lock);
if (pager->sc_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
memory_object_control_deallocate(pager->sc_pgr_hdr.mo_control);
pager->sc_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
}
kfree(pager, sizeof(*pager));
pager = SHARED_REGION_PAGER_NULL;
} else {
/* there are still plenty of references: keep going... */
lck_mtx_unlock(&shared_region_pager_lock);
}
if (needs_trimming) {
shared_region_pager_trim();
}
/* caution: lock is not held on return... */
}
/*
* shared_region_pager_deallocate()
*
* Release a reference on this pager and free it when the last
* reference goes away.
*/
void
shared_region_pager_deallocate(
memory_object_t mem_obj)
{
shared_region_pager_t pager;
PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
pager = shared_region_pager_lookup(mem_obj);
shared_region_pager_deallocate_internal(pager, FALSE);
}
/*
*
*/
kern_return_t
shared_region_pager_terminate(
#if !DEBUG
__unused
#endif
memory_object_t mem_obj)
{
PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
return KERN_SUCCESS;
}
/*
*
*/
kern_return_t
shared_region_pager_synchronize(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
__unused memory_object_size_t length,
__unused vm_sync_t sync_flags)
{
panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported\n");
return KERN_FAILURE;
}
/*
* shared_region_pager_map()
*
* This allows VM to let us, the EMM, know that this memory object
* is currently mapped one or more times. This is called by VM each time
* the memory object gets mapped and we take one extra reference on the
* memory object to account for all its mappings.
*/
kern_return_t
shared_region_pager_map(
memory_object_t mem_obj,
__unused vm_prot_t prot)
{
shared_region_pager_t pager;
PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
pager = shared_region_pager_lookup(mem_obj);
lck_mtx_lock(&shared_region_pager_lock);
assert(pager->is_ready);
assert(pager->ref_count > 0); /* pager is alive */
if (pager->is_mapped == FALSE) {
/*
* First mapping of this pager: take an extra reference
* that will remain until all the mappings of this pager
* are removed.
*/
pager->is_mapped = TRUE;
pager->ref_count++;
shared_region_pager_count_mapped++;
}
lck_mtx_unlock(&shared_region_pager_lock);
return KERN_SUCCESS;
}
/*
* shared_region_pager_last_unmap()
*
* This is called by VM when this memory object is no longer mapped anywhere.
*/
kern_return_t
shared_region_pager_last_unmap(
memory_object_t mem_obj)
{
shared_region_pager_t pager;
int count_unmapped;
PAGER_DEBUG(PAGER_ALL,
("shared_region_pager_last_unmap: %p\n", mem_obj));
pager = shared_region_pager_lookup(mem_obj);
lck_mtx_lock(&shared_region_pager_lock);
if (pager->is_mapped) {
/*
* All the mappings are gone, so let go of the one extra
* reference that represents all the mappings of this pager.
*/
shared_region_pager_count_mapped--;
count_unmapped = (shared_region_pager_count -
shared_region_pager_count_mapped);
if (count_unmapped > shared_region_pager_count_unmapped_max) {
shared_region_pager_count_unmapped_max = count_unmapped;
}
pager->is_mapped = FALSE;
shared_region_pager_deallocate_internal(pager, TRUE);
/* caution: deallocate_internal() released the lock ! */
} else {
lck_mtx_unlock(&shared_region_pager_lock);
}
return KERN_SUCCESS;
}
/*
*
*/
shared_region_pager_t
shared_region_pager_lookup(
memory_object_t mem_obj)
{
shared_region_pager_t pager;
assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
pager = (shared_region_pager_t)(uintptr_t) mem_obj;
assert(pager->ref_count > 0);
return pager;
}
shared_region_pager_t
shared_region_pager_create(
vm_object_t backing_object,
vm_object_offset_t backing_offset,
struct vm_shared_region_slide_info *slide_info)
{
shared_region_pager_t pager;
memory_object_control_t control;
kern_return_t kr;
vm_object_t object;
pager = (shared_region_pager_t) kalloc(sizeof(*pager));
if (pager == SHARED_REGION_PAGER_NULL) {
return SHARED_REGION_PAGER_NULL;
}
/*
* The vm_map call takes both named entry ports and raw memory
* objects in the same parameter. We need to make sure that
* vm_map does not see this object as a named entry port. So,
* we reserve the first word in the object for a fake ip_kotype
* setting - that will tell vm_map to use it as a memory object.
*/
pager->sc_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
pager->sc_pgr_hdr.mo_pager_ops = &shared_region_pager_ops;
pager->sc_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
pager->is_ready = FALSE;/* not ready until it has a "name" */
pager->ref_count = 1; /* existence reference (for the cache) */
pager->ref_count++; /* for the caller */
pager->is_mapped = FALSE;
pager->backing_object = backing_object;
pager->backing_offset = backing_offset;
pager->scp_slide_info = slide_info;
vm_object_reference(backing_object);
lck_mtx_lock(&shared_region_pager_lock);
/* enter new pager at the head of our list of pagers */
queue_enter_first(&shared_region_pager_queue,
pager,
shared_region_pager_t,
pager_queue);
shared_region_pager_count++;
if (shared_region_pager_count > shared_region_pager_count_max) {
shared_region_pager_count_max = shared_region_pager_count;
}
lck_mtx_unlock(&shared_region_pager_lock);
kr = memory_object_create_named((memory_object_t) pager,
0,
&control);
assert(kr == KERN_SUCCESS);
memory_object_mark_trusted(control);
lck_mtx_lock(&shared_region_pager_lock);
/* the new pager is now ready to be used */
pager->is_ready = TRUE;
object = memory_object_to_vm_object((memory_object_t) pager);
assert(object);
/*
* No one knows about this object and so we get away without the object lock.
* This object is _eventually_ backed by the dyld shared cache and so we want
* to benefit from the lock priority boosting.
*/
object->object_is_shared_cache = TRUE;
lck_mtx_unlock(&shared_region_pager_lock);
/* wakeup anyone waiting for this pager to be ready */
thread_wakeup(&pager->is_ready);
return pager;
}
/*
* shared_region_pager_setup()
*
* Provide the caller with a memory object backed by the provided
* "backing_object" VM object.
*/
memory_object_t
shared_region_pager_setup(
vm_object_t backing_object,
vm_object_offset_t backing_offset,
struct vm_shared_region_slide_info *slide_info)
{
shared_region_pager_t pager;
/* create new pager */
pager = shared_region_pager_create(
backing_object,
backing_offset,
slide_info);
if (pager == SHARED_REGION_PAGER_NULL) {
/* could not create a new pager */
return MEMORY_OBJECT_NULL;
}
lck_mtx_lock(&shared_region_pager_lock);
while (!pager->is_ready) {
lck_mtx_sleep(&shared_region_pager_lock,
LCK_SLEEP_DEFAULT,
&pager->is_ready,
THREAD_UNINT);
}
lck_mtx_unlock(&shared_region_pager_lock);
return (memory_object_t) pager;
}
void
shared_region_pager_trim(void)
{
shared_region_pager_t pager, prev_pager;
queue_head_t trim_queue;
int num_trim;
int count_unmapped;
lck_mtx_lock(&shared_region_pager_lock);
/*
* We have too many pagers, try and trim some unused ones,
* starting with the oldest pager at the end of the queue.
*/
queue_init(&trim_queue);
num_trim = 0;
for (pager = (shared_region_pager_t)
queue_last(&shared_region_pager_queue);
!queue_end(&shared_region_pager_queue,
(queue_entry_t) pager);
pager = prev_pager) {
/* get prev elt before we dequeue */
prev_pager = (shared_region_pager_t)
queue_prev(&pager->pager_queue);
if (pager->ref_count == 2 &&
pager->is_ready &&
!pager->is_mapped) {
/* this pager can be trimmed */
num_trim++;
/* remove this pager from the main list ... */
shared_region_pager_dequeue(pager);
/* ... and add it to our trim queue */
queue_enter_first(&trim_queue,
pager,
shared_region_pager_t,
pager_queue);
count_unmapped = (shared_region_pager_count -
shared_region_pager_count_mapped);
if (count_unmapped <= shared_region_pager_cache_limit) {
/* we have enough pagers to trim */
break;
}
}
}
if (num_trim > shared_region_pager_num_trim_max) {
shared_region_pager_num_trim_max = num_trim;
}
shared_region_pager_num_trim_total += num_trim;
lck_mtx_unlock(&shared_region_pager_lock);
/* terminate the trimmed pagers */
while (!queue_empty(&trim_queue)) {
queue_remove_first(&trim_queue,
pager,
shared_region_pager_t,
pager_queue);
pager->pager_queue.next = NULL;
pager->pager_queue.prev = NULL;
assert(pager->ref_count == 2);
/*
* We can't call deallocate_internal() because the pager
* has already been dequeued, but we still need to remove
* a reference.
*/
pager->ref_count--;
shared_region_pager_terminate_internal(pager);
}
}
|
204412.c | /* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecma-alloc.h"
#include "ecma-builtins.h"
#include "ecma-conversion.h"
#include "ecma-exceptions.h"
#include "ecma-gc.h"
#include "ecma-globals.h"
#include "ecma-helpers.h"
#include "ecma-builtin-helpers.h"
#include "ecma-objects.h"
#include "ecma-function-object.h"
#include "jrt.h"
#include "jcontext.h"
#if ENABLED (JERRY_BUILTIN_ERRORS)
#define ECMA_BUILTINS_INTERNAL
#include "ecma-builtins-internal.h"
#define BUILTIN_INC_HEADER_NAME "ecma-builtin-urierror.inc.h"
#define BUILTIN_UNDERSCORED_ID uri_error
#include "ecma-builtin-internal-routines-template.inc.h"
/** \addtogroup ecma ECMA
* @{
*
* \addtogroup ecmabuiltins
* @{
*
* \addtogroup urierror ECMA UriError object built-in
* @{
*/
/**
* Handle calling [[Call]] of built-in UriError object
*
* @return ecma value
*/
ecma_value_t
ecma_builtin_uri_error_dispatch_call (const ecma_value_t *arguments_list_p, /**< arguments list */
uint32_t arguments_list_len) /**< number of arguments */
{
return ecma_builtin_helper_error_dispatch_call (ECMA_ERROR_URI, arguments_list_p, arguments_list_len);
} /* ecma_builtin_uri_error_dispatch_call */
/**
* Handle calling [[Construct]] of built-in UriError object
*
* @return ecma value
*/
ecma_value_t
ecma_builtin_uri_error_dispatch_construct (const ecma_value_t *arguments_list_p, /**< arguments list */
uint32_t arguments_list_len) /**< number of arguments */
{
#if !ENABLED (JERRY_ESNEXT)
return ecma_builtin_uri_error_dispatch_call (arguments_list_p, arguments_list_len);
#else /* ENABLED (JERRY_ESNEXT) */
ecma_object_t *proto_p = ecma_op_get_prototype_from_constructor (JERRY_CONTEXT (current_new_target_p),
ECMA_BUILTIN_ID_URI_ERROR_PROTOTYPE);
if (proto_p == NULL)
{
return ECMA_VALUE_ERROR;
}
ecma_value_t result = ecma_builtin_uri_error_dispatch_call (arguments_list_p, arguments_list_len);
if (!ECMA_IS_VALUE_ERROR (result))
{
ecma_object_t *object_p = ecma_get_object_from_value (result);
ECMA_SET_NON_NULL_POINTER (object_p->u2.prototype_cp, proto_p);
}
ecma_deref_object (proto_p);
return result;
#endif /* ENABLED (JERRY_ESNEXT) */
} /* ecma_builtin_uri_error_dispatch_construct */
/**
* @}
* @}
* @}
*/
#endif /* ENABLED (JERRY_BUILTIN_ERRORS) */
|
722881.c | #pragma bank 255
// Scene: Path to Sample Town
// Sprites
#include "gbs_types.h"
#include "data/spritesheet_13.h"
#include "data/spritesheet_28.h"
#include "data/spritesheet_18.h"
BANKREF(scene_10_sprites)
const far_ptr_t scene_10_sprites[] = {
TO_FAR_PTR_T(spritesheet_13),
TO_FAR_PTR_T(spritesheet_28),
TO_FAR_PTR_T(spritesheet_18)
};
|
816865.c | /* The IGEN simulator generator for GDB, the GNU Debugger.
Copyright 2002-2021 Free Software Foundation, Inc.
Contributed by Andrew Cagney.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "misc.h"
#include "lf.h"
#include "table.h"
#include "filter.h"
#include "igen.h"
#include "ld-insn.h"
#include "ld-decode.h"
#include "gen.h"
#include "gen-semantics.h"
#include "gen-support.h"
static void
print_support_function_name (lf *file,
function_entry * function,
int is_function_definition)
{
if (function->is_internal)
{
lf_print__function_type_function (file, print_semantic_function_type,
"INLINE_SUPPORT",
(is_function_definition ? "\n" :
" "));
print_function_name (file, function->name, NULL, NULL, NULL,
function_name_prefix_semantics);
lf_printf (file, "\n(");
lf_indent (file, +1);
print_semantic_function_formal (file, 0);
lf_indent (file, -1);
lf_printf (file, ")");
if (!is_function_definition)
lf_printf (file, ";");
lf_printf (file, "\n");
}
else
{
/* map the name onto a globally valid name */
if (!is_function_definition
&& strcmp (options.module.support.prefix.l, "") != 0)
{
lf_indent_suppress (file);
lf_printf (file, "#define %s %s%s\n",
function->name,
options.module.support.prefix.l, function->name);
}
lf_print__function_type (file,
function->type,
"INLINE_SUPPORT",
(is_function_definition ? "\n" : " "));
lf_printf (file, "%s%s\n(",
options.module.support.prefix.l, function->name);
if (options.gen.smp)
lf_printf (file,
"sim_cpu *cpu, %sinstruction_address cia, int MY_INDEX",
options.module.support.prefix.l);
else
lf_printf (file,
"SIM_DESC sd, %sinstruction_address cia, int MY_INDEX",
options.module.support.prefix.l);
if (function->param != NULL && strlen (function->param) > 0)
lf_printf (file, ", %s", function->param);
lf_printf (file, ")%s", (is_function_definition ? "\n" : ";\n"));
}
}
static void
support_h_function (lf *file, function_entry * function, void *data)
{
ASSERT (function->type != NULL);
print_support_function_name (file, function, 0 /*!is_definition */ );
lf_printf (file, "\n");
}
extern void
gen_support_h (lf *file, insn_table *table)
{
/* output the definition of `SD_' */
if (options.gen.smp)
{
lf_printf (file, "#define SD CPU_STATE (cpu)\n");
lf_printf (file, "#define CPU cpu\n");
lf_printf (file, "#define CPU_ cpu\n");
}
else
{
lf_printf (file, "#define SD sd\n");
lf_printf (file, "#define CPU (STATE_CPU (sd, 0))\n");
lf_printf (file, "#define CPU_ sd\n");
}
lf_printf (file, "#define CIA_ cia\n");
if (options.gen.delayed_branch)
{
lf_printf (file, "#define CIA cia.ip\n");
lf_printf (file,
"/* #define NIA nia.dp -- do not define, ambigious */\n");
}
else
{
lf_printf (file, "#define CIA cia\n");
lf_printf (file, "#define NIA nia\n");
}
lf_printf (file, "\n");
lf_printf (file, "#define SD_ CPU_, CIA_, MY_INDEX\n");
lf_printf (file, "#define _SD SD_ /* deprecated */\n");
lf_printf (file, "\n");
/* Map <PREFIX>_xxxx onto the shorter xxxx for the following names:
instruction_word
idecode_issue
semantic_illegal
Map defined here as name space problems are created when the name is
defined in idecode.h */
if (strcmp (options.module.idecode.prefix.l, "") != 0)
{
lf_indent_suppress (file);
lf_printf (file, "#define %s %s%s\n",
"instruction_word",
options.module.idecode.prefix.l, "instruction_word");
lf_printf (file, "\n");
lf_indent_suppress (file);
lf_printf (file, "#define %s %s%s\n",
"idecode_issue",
options.module.idecode.prefix.l, "idecode_issue");
lf_printf (file, "\n");
lf_indent_suppress (file);
lf_printf (file, "#define %s %s%s\n",
"semantic_illegal",
options.module.idecode.prefix.l, "semantic_illegal");
lf_printf (file, "\n");
}
/* output a declaration for all functions */
function_entry_traverse (file, table->functions, support_h_function, NULL);
lf_printf (file, "\n");
lf_printf (file, "#if defined(SUPPORT_INLINE)\n");
lf_printf (file, "# if ((SUPPORT_INLINE & INCLUDE_MODULE)\\\n");
lf_printf (file, " && (SUPPORT_INLINE & INCLUDED_BY_MODULE))\n");
lf_printf (file, "# include \"%ssupport.c\"\n",
options.module.support.prefix.l);
lf_printf (file, "# endif\n");
lf_printf (file, "#endif\n");
}
static void
support_c_function (lf *file, function_entry * function, void *data)
{
ASSERT (function->type != NULL);
print_support_function_name (file, function, 1 /*!is_definition */ );
lf_printf (file, "{\n");
lf_indent (file, +2);
if (function->code == NULL)
error (function->line, "Function without body (or null statement)");
lf_print__line_ref (file, function->code->line);
table_print_code (file, function->code);
if (function->is_internal)
{
lf_printf (file,
"sim_engine_abort (SD, CPU, cia, \"Internal function must longjump\\n\");\n");
lf_printf (file, "return cia;\n");
}
lf_indent (file, -2);
lf_printf (file, "}\n");
lf_print__internal_ref (file);
lf_printf (file, "\n");
}
void
gen_support_c (lf *file, insn_table *table)
{
lf_printf (file, "#include \"sim-main.h\"\n");
lf_printf (file, "#include \"%sidecode.h\"\n",
options.module.idecode.prefix.l);
lf_printf (file, "#include \"%sitable.h\"\n",
options.module.itable.prefix.l);
lf_printf (file, "#include \"%ssupport.h\"\n",
options.module.support.prefix.l);
lf_printf (file, "\n");
/* output a definition (c-code) for all functions */
function_entry_traverse (file, table->functions, support_c_function, NULL);
}
|
223354.c | // REQUIRES: system-darwin
// RUN: %clang -target x86_64-apple-darwin10 -fsyntax-only -std=c11 -isysroot %S/Inputs %s
#include <tgmath.h>
// Test the #include_next of tgmath.h works on Darwin.
#ifndef SYS_TGMATH_H
#error "SYS_TGMATH_H not defined"
#endif
#ifndef __CLANG_TGMATH_H
#error "__CLANG_TGMATH_H not defined"
#endif
|
3275.c |
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
#if (NGX_TEST_BUILD_EPOLL)
/* epoll declarations */
#define EPOLLIN 0x001
#define EPOLLPRI 0x002
#define EPOLLOUT 0x004
#define EPOLLRDNORM 0x040
#define EPOLLRDBAND 0x080
#define EPOLLWRNORM 0x100
#define EPOLLWRBAND 0x200
#define EPOLLMSG 0x400
#define EPOLLERR 0x008
#define EPOLLHUP 0x010
#define EPOLLRDHUP 0x2000
#define EPOLLET 0x80000000
#define EPOLLONESHOT 0x40000000
#define EPOLL_CTL_ADD 1
#define EPOLL_CTL_DEL 2
#define EPOLL_CTL_MOD 3
typedef union epoll_data {
void *ptr;
int fd;
uint32_t u32;
uint64_t u64;
} epoll_data_t;
struct epoll_event {
uint32_t events;
epoll_data_t data;
};
int epoll_create(int size);
int epoll_create(int size)
{
return -1;
}
int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event);
int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
{
return -1;
}
int epoll_wait(int epfd, struct epoll_event *events, int nevents, int timeout);
int epoll_wait(int epfd, struct epoll_event *events, int nevents, int timeout)
{
return -1;
}
#if (NGX_HAVE_FILE_AIO)
#define SYS_io_setup 245
#define SYS_io_destroy 246
#define SYS_io_getevents 247
#define SYS_eventfd 323
typedef u_int aio_context_t;
struct io_event {
uint64_t data; /* the data field from the iocb */
uint64_t obj; /* what iocb this event came from */
int64_t res; /* result code for this event */
int64_t res2; /* secondary result */
};
#endif
#endif
typedef struct {
ngx_uint_t events;
ngx_uint_t aio_requests;
} ngx_epoll_conf_t;
static ngx_int_t ngx_epoll_init(ngx_cycle_t *cycle, ngx_msec_t timer);
static void ngx_epoll_done(ngx_cycle_t *cycle);
static ngx_int_t ngx_epoll_add_event(ngx_event_t *ev, ngx_int_t event,
ngx_uint_t flags);
static ngx_int_t ngx_epoll_del_event(ngx_event_t *ev, ngx_int_t event,
ngx_uint_t flags);
static ngx_int_t ngx_epoll_add_connection(ngx_connection_t *c);
static ngx_int_t ngx_epoll_del_connection(ngx_connection_t *c,
ngx_uint_t flags);
static ngx_int_t ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
ngx_uint_t flags);
#if (NGX_HAVE_FILE_AIO)
static void ngx_epoll_eventfd_handler(ngx_event_t *ev);
#endif
static void *ngx_epoll_create_conf(ngx_cycle_t *cycle);
static char *ngx_epoll_init_conf(ngx_cycle_t *cycle, void *conf);
static int ep = -1;
static struct epoll_event *event_list;
static ngx_uint_t nevents;
#if (NGX_HAVE_FILE_AIO)
int ngx_eventfd = -1;
aio_context_t ngx_aio_ctx = 0;
static ngx_event_t ngx_eventfd_event;
static ngx_connection_t ngx_eventfd_conn;
#endif
static ngx_str_t epoll_name = ngx_string("epoll");
static ngx_command_t ngx_epoll_commands[] = {
{ ngx_string("epoll_events"),
NGX_EVENT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
0,
offsetof(ngx_epoll_conf_t, events),
NULL },
{ ngx_string("worker_aio_requests"),
NGX_EVENT_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
0,
offsetof(ngx_epoll_conf_t, aio_requests),
NULL },
ngx_null_command
};
ngx_event_module_t ngx_epoll_module_ctx = {
&epoll_name,
ngx_epoll_create_conf, /* create configuration */
ngx_epoll_init_conf, /* init configuration */
{
ngx_epoll_add_event, /* add an event */
ngx_epoll_del_event, /* delete an event */
ngx_epoll_add_event, /* enable an event */
ngx_epoll_del_event, /* disable an event */
ngx_epoll_add_connection, /* add an connection */
ngx_epoll_del_connection, /* delete an connection */
NULL, /* process the changes */
ngx_epoll_process_events, /* process the events */
ngx_epoll_init, /* init the events */
ngx_epoll_done, /* done the events */
}
};
ngx_module_t ngx_epoll_module = {
NGX_MODULE_V1,
&ngx_epoll_module_ctx, /* module context */
ngx_epoll_commands, /* module directives */
NGX_EVENT_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
#if (NGX_HAVE_FILE_AIO)
/*
* We call io_setup(), io_destroy() io_submit(), and io_getevents() directly
* as syscalls instead of libaio usage, because the library header file
* supports eventfd() since 0.3.107 version only.
*
* Also we do not use eventfd() in glibc, because glibc supports it
* since 2.8 version and glibc maps two syscalls eventfd() and eventfd2()
* into single eventfd() function with different number of parameters.
*/
static int
io_setup(u_int nr_reqs, aio_context_t *ctx)
{
return syscall(SYS_io_setup, nr_reqs, ctx);
}
static int
io_destroy(aio_context_t ctx)
{
return syscall(SYS_io_destroy, ctx);
}
static int
io_getevents(aio_context_t ctx, long min_nr, long nr, struct io_event *events,
struct timespec *tmo)
{
return syscall(SYS_io_getevents, ctx, min_nr, nr, events, tmo);
}
static void
ngx_epoll_aio_init(ngx_cycle_t *cycle, ngx_epoll_conf_t *epcf)
{
int n;
struct epoll_event ee;
ngx_eventfd = syscall(SYS_eventfd, 0);
if (ngx_eventfd == -1) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"eventfd() failed");
ngx_file_aio = 0;
return;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"eventfd: %d", ngx_eventfd);
n = 1;
if (ioctl(ngx_eventfd, FIONBIO, &n) == -1) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"ioctl(eventfd, FIONBIO) failed");
goto failed;
}
if (io_setup(epcf->aio_requests, &ngx_aio_ctx) == -1) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"io_setup() failed");
goto failed;
}
ngx_eventfd_event.data = &ngx_eventfd_conn;
ngx_eventfd_event.handler = ngx_epoll_eventfd_handler;
ngx_eventfd_event.log = cycle->log;
ngx_eventfd_event.active = 1;
ngx_eventfd_conn.fd = ngx_eventfd;
ngx_eventfd_conn.read = &ngx_eventfd_event;
ngx_eventfd_conn.log = cycle->log;
ee.events = EPOLLIN|EPOLLET;
ee.data.ptr = &ngx_eventfd_conn;
if (epoll_ctl(ep, EPOLL_CTL_ADD, ngx_eventfd, &ee) != -1) {
return;
}
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"epoll_ctl(EPOLL_CTL_ADD, eventfd) failed");
if (io_destroy(ngx_aio_ctx) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"io_destroy() failed");
}
failed:
if (close(ngx_eventfd) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"eventfd close() failed");
}
ngx_eventfd = -1;
ngx_aio_ctx = 0;
ngx_file_aio = 0;
}
#endif
static ngx_int_t
ngx_epoll_init(ngx_cycle_t *cycle, ngx_msec_t timer)
{
ngx_epoll_conf_t *epcf;
epcf = ngx_event_get_conf(cycle->conf_ctx, ngx_epoll_module);
if (ep == -1) {
ep = epoll_create(cycle->connection_n / 2);
if (ep == -1) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
"epoll_create() failed");
return NGX_ERROR;
}
#if (NGX_HAVE_FILE_AIO)
ngx_epoll_aio_init(cycle, epcf);
#endif
}
if (nevents < epcf->events) {
if (event_list) {
ngx_free(event_list);
}
event_list = ngx_alloc(sizeof(struct epoll_event) * epcf->events,
cycle->log);
if (event_list == NULL) {
return NGX_ERROR;
}
}
nevents = epcf->events;
ngx_io = ngx_os_io;
ngx_event_actions = ngx_epoll_module_ctx.actions;
#if (NGX_HAVE_CLEAR_EVENT)
ngx_event_flags = NGX_USE_CLEAR_EVENT
#else
ngx_event_flags = NGX_USE_LEVEL_EVENT
#endif
|NGX_USE_GREEDY_EVENT
|NGX_USE_EPOLL_EVENT;
return NGX_OK;
}
static void
ngx_epoll_done(ngx_cycle_t *cycle)
{
if (close(ep) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"epoll close() failed");
}
ep = -1;
#if (NGX_HAVE_FILE_AIO)
if (ngx_eventfd != -1) {
if (io_destroy(ngx_aio_ctx) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"io_destroy() failed");
}
if (close(ngx_eventfd) == -1) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"eventfd close() failed");
}
ngx_eventfd = -1;
}
ngx_aio_ctx = 0;
#endif
ngx_free(event_list);
event_list = NULL;
nevents = 0;
}
static ngx_int_t
ngx_epoll_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags)
{
int op;
uint32_t events, prev;
ngx_event_t *e;
ngx_connection_t *c;
struct epoll_event ee;
c = ev->data;
events = (uint32_t) event;
if (event == NGX_READ_EVENT) {
e = c->write;
prev = EPOLLOUT;
#if (NGX_READ_EVENT != EPOLLIN|EPOLLRDHUP)
events = EPOLLIN|EPOLLRDHUP;
#endif
} else {
e = c->read;
prev = EPOLLIN|EPOLLRDHUP;
#if (NGX_WRITE_EVENT != EPOLLOUT)
events = EPOLLOUT;
#endif
}
if (e->active) {
op = EPOLL_CTL_MOD;
events |= prev;
} else {
op = EPOLL_CTL_ADD;
}
ee.events = events | (uint32_t) flags;
ee.data.ptr = (void *) ((uintptr_t) c | ev->instance);
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"epoll add event: fd:%d op:%d ev:%08XD",
c->fd, op, ee.events);
if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno,
"epoll_ctl(%d, %d) failed", op, c->fd);
return NGX_ERROR;
}
ev->active = 1;
#if 0
ev->oneshot = (flags & NGX_ONESHOT_EVENT) ? 1 : 0;
#endif
return NGX_OK;
}
static ngx_int_t
ngx_epoll_del_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags)
{
int op;
uint32_t prev;
ngx_event_t *e;
ngx_connection_t *c;
struct epoll_event ee;
/*
* when the file descriptor is closed, the epoll automatically deletes
* it from its queue, so we do not need to delete explicitly the event
* before the closing the file descriptor
*/
if (flags & NGX_CLOSE_EVENT) {
ev->active = 0;
return NGX_OK;
}
c = ev->data;
if (event == NGX_READ_EVENT) {
e = c->write;
prev = EPOLLOUT;
} else {
e = c->read;
prev = EPOLLIN|EPOLLRDHUP;
}
if (e->active) {
op = EPOLL_CTL_MOD;
ee.events = prev | (uint32_t) flags;
ee.data.ptr = (void *) ((uintptr_t) c | ev->instance);
} else {
op = EPOLL_CTL_DEL;
ee.events = 0;
ee.data.ptr = NULL;
}
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"epoll del event: fd:%d op:%d ev:%08XD",
c->fd, op, ee.events);
if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno,
"epoll_ctl(%d, %d) failed", op, c->fd);
return NGX_ERROR;
}
ev->active = 0;
return NGX_OK;
}
static ngx_int_t
ngx_epoll_add_connection(ngx_connection_t *c)
{
struct epoll_event ee;
ee.events = EPOLLIN|EPOLLOUT|EPOLLET|EPOLLRDHUP;
ee.data.ptr = (void *) ((uintptr_t) c | c->read->instance);
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0,
"epoll add connection: fd:%d ev:%08XD", c->fd, ee.events);
if (epoll_ctl(ep, EPOLL_CTL_ADD, c->fd, &ee) == -1) {
ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno,
"epoll_ctl(EPOLL_CTL_ADD, %d) failed", c->fd);
return NGX_ERROR;
}
c->read->active = 1;
c->write->active = 1;
return NGX_OK;
}
static ngx_int_t
ngx_epoll_del_connection(ngx_connection_t *c, ngx_uint_t flags)
{
int op;
struct epoll_event ee;
/*
* when the file descriptor is closed the epoll automatically deletes
* it from its queue so we do not need to delete explicitly the event
* before the closing the file descriptor
*/
if (flags & NGX_CLOSE_EVENT) {
c->read->active = 0;
c->write->active = 0;
return NGX_OK;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0,
"epoll del connection: fd:%d", c->fd);
op = EPOLL_CTL_DEL;
ee.events = 0;
ee.data.ptr = NULL;
if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno,
"epoll_ctl(%d, %d) failed", op, c->fd);
return NGX_ERROR;
}
c->read->active = 0;
c->write->active = 0;
return NGX_OK;
}
static ngx_int_t
ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags)
{
int events;
uint32_t revents;
ngx_int_t instance, i;
ngx_uint_t level;
ngx_err_t err;
ngx_event_t *rev, *wev, **queue;
ngx_connection_t *c;
/* NGX_TIMER_INFINITE == INFTIM */
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll timer: %M", timer);
events = epoll_wait(ep, event_list, (int) nevents, timer);
err = (events == -1) ? ngx_errno : 0;
if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) {
ngx_time_update();
}
if (err) {
if (err == NGX_EINTR) {
if (ngx_event_timer_alarm) {
ngx_event_timer_alarm = 0;
return NGX_OK;
}
level = NGX_LOG_INFO;
} else {
level = NGX_LOG_ALERT;
}
ngx_log_error(level, cycle->log, err, "epoll_wait() failed");
return NGX_ERROR;
}
if (events == 0) {
if (timer != NGX_TIMER_INFINITE) {
return NGX_OK;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"epoll_wait() returned no events without timeout");
return NGX_ERROR;
}
ngx_mutex_lock(ngx_posted_events_mutex);
for (i = 0; i < events; i++) {
c = event_list[i].data.ptr;
instance = (uintptr_t) c & 1;
c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1);
rev = c->read;
if (c->fd == -1 || rev->instance != instance) {
/*
* the stale event from a file descriptor
* that was just closed in this iteration
*/
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll: stale event %p", c);
continue;
}
revents = event_list[i].events;
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll: fd:%d ev:%04XD d:%p",
c->fd, revents, event_list[i].data.ptr);
if (revents & (EPOLLERR|EPOLLHUP)) {
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll_wait() error on fd:%d ev:%04XD",
c->fd, revents);
}
#if 0
if (revents & ~(EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"strange epoll_wait() events fd:%d ev:%04XD",
c->fd, revents);
}
#endif
if ((revents & (EPOLLERR|EPOLLHUP))
&& (revents & (EPOLLIN|EPOLLOUT)) == 0)
{
/*
* if the error events were returned without EPOLLIN or EPOLLOUT,
* then add these flags to handle the events at least in one
* active handler
*/
revents |= EPOLLIN|EPOLLOUT;
}
if ((revents & EPOLLIN) && rev->active) {
#if (NGX_HAVE_EPOLLRDHUP)
if (revents & EPOLLRDHUP) {
rev->pending_eof = 1;
}
#endif
if ((flags & NGX_POST_THREAD_EVENTS) && !rev->accept) {
rev->posted_ready = 1;
} else {
rev->ready = 1;
}
if (flags & NGX_POST_EVENTS) {
queue = (ngx_event_t **) (rev->accept ?
&ngx_posted_accept_events : &ngx_posted_events);
ngx_locked_post_event(rev, queue);
} else {
rev->handler(rev);
}
}
wev = c->write;
if ((revents & EPOLLOUT) && wev->active) {
if (c->fd == -1 || wev->instance != instance) {
/*
* the stale event from a file descriptor
* that was just closed in this iteration
*/
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"epoll: stale event %p", c);
continue;
}
if (flags & NGX_POST_THREAD_EVENTS) {
wev->posted_ready = 1;
} else {
wev->ready = 1;
}
if (flags & NGX_POST_EVENTS) {
ngx_locked_post_event(wev, &ngx_posted_events);
} else {
wev->handler(wev);
}
}
}
ngx_mutex_unlock(ngx_posted_events_mutex);
return NGX_OK;
}
#if (NGX_HAVE_FILE_AIO)
static void
ngx_epoll_eventfd_handler(ngx_event_t *ev)
{
int n, events;
long i;
uint64_t ready;
ngx_err_t err;
ngx_event_t *e;
ngx_event_aio_t *aio;
struct io_event event[64];
struct timespec ts;
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, "eventfd handler");
n = read(ngx_eventfd, &ready, 8);
err = ngx_errno;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ev->log, 0, "eventfd: %d", n);
if (n != 8) {
if (n == -1) {
if (err == NGX_EAGAIN) {
return;
}
ngx_log_error(NGX_LOG_ALERT, ev->log, err, "read(eventfd) failed");
return;
}
ngx_log_error(NGX_LOG_ALERT, ev->log, 0,
"read(eventfd) returned only %d bytes", n);
return;
}
ts.tv_sec = 0;
ts.tv_nsec = 0;
while (ready) {
events = io_getevents(ngx_aio_ctx, 1, 64, event, &ts);
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"io_getevents: %l", events);
if (events > 0) {
ready -= events;
for (i = 0; i < events; i++) {
ngx_log_debug4(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"io_event: %uXL %uXL %L %L",
event[i].data, event[i].obj,
event[i].res, event[i].res2);
e = (ngx_event_t *) (uintptr_t) event[i].data;
e->complete = 1;
e->active = 0;
e->ready = 1;
aio = e->data;
aio->res = event[i].res;
ngx_post_event(e, &ngx_posted_events);
}
continue;
}
if (events == 0) {
return;
}
/* events == -1 */
ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno,
"io_getevents() failed");
return;
}
}
#endif
static void *
ngx_epoll_create_conf(ngx_cycle_t *cycle)
{
ngx_epoll_conf_t *epcf;
epcf = ngx_palloc(cycle->pool, sizeof(ngx_epoll_conf_t));
if (epcf == NULL) {
return NULL;
}
epcf->events = NGX_CONF_UNSET;
epcf->aio_requests = NGX_CONF_UNSET;
return epcf;
}
static char *
ngx_epoll_init_conf(ngx_cycle_t *cycle, void *conf)
{
ngx_epoll_conf_t *epcf = conf;
ngx_conf_init_uint_value(epcf->events, 512);
ngx_conf_init_uint_value(epcf->aio_requests, 32);
return NGX_CONF_OK;
}
|
249445.c | /*
* Copyright (c) 2015, Freescale Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* o Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* o Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* o Neither the name of Freescale Semiconductor, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///////////////////////////////////////////////////////////////////////////////
// Includes
///////////////////////////////////////////////////////////////////////////////
// Standard C Included Files
#include <stdio.h>
#include <stdint.h>
// SDK Included Files
#include "fsl_power_manager.h"
#include "fsl_clock_manager.h"
#if FSL_FEATURE_SOC_VREF_COUNT
power_manager_error_code_t vref_pm_callback(power_manager_notify_struct_t * notify,
power_manager_callback_data_t * dataPtr)
{
power_manager_error_code_t result = kPowerManagerSuccess;
switch (notify->notifyType)
{
case kPowerManagerNotifyRecover:
/* TODO */
/* Add code here. */
break;
case kPowerManagerNotifyBefore:
/* TODO */
/* Add code here. */
break;
case kPowerManagerNotifyAfter:
/* TODO */
/* Add code here. */
break;
default:
result = kPowerManagerError;
break;
}
return result;
}
clock_manager_error_code_t vref_cm_callback(clock_notify_struct_t *notify,
void* dataPtr)
{
clock_manager_error_code_t result = kClockManagerSuccess;
switch (notify->notifyType)
{
case kClockManagerNotifyBefore:
/* TODO */
/* Add code here. */
break;
case kClockManagerNotifyRecover:
/* TODO */
/* Add code here. */
break;
case kClockManagerNotifyAfter:
/* TODO */
/* Add code here. */
break;
default:
result = kClockManagerError;
break;
}
return result;
}
#endif
|
561408.c | /*-
* Copyright (c) 2017 Jonathan Anderson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define _POSIX_C_SOURCE 200809L
#include <sys/types.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <err.h>
#include <fcntl.h>
#include <stddef.h>
#include <stdio.h>
#include <unistd.h>
#include <libconfparse.h>
int main(int argc, char *argv[])
{
if (argc < 3)
{
fprintf(stderr, "Usage: do_stuff <config dir> <scratch dir>\n");
return (1);
}
// Open read-only config directory
int config_dir = open(argv[1], O_DIRECTORY);
if (config_dir < 0)
{
err(-1, "error opening config dir '%s'", argv[1]);
}
// Open writable scratch directory
int scratch_dir = open(argv[2], O_DIRECTORY);
if (scratch_dir < 0)
{
err(-1, "error opening scratch dir '%s'", argv[1]);
}
// Enter sandbox!
// (the following was derived from https://eigenstate.org/notes/seccomp)
#define ArchField offsetof(struct seccomp_data, arch)
#define Allow(syscall) \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, SYS_##syscall, 0, 1), \
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
static const size_t SYSCALL_NUM_OFFSET =
offsetof(struct seccomp_data, nr);
struct sock_filter filter[] = {
// Check architecture: syscall numbers arch-dependent!
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, ArchField),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, AUDIT_ARCH_X86_64, 1, 0),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL),
// Check syscall:
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, SYSCALL_NUM_OFFSET),
Allow(brk), // allow stack extension
Allow(close), // allow closing files!
Allow(exit_group), // called on exit(3)
Allow(fstat), // we need to check file sizes
Allow(mmap), // we map config files when reading
Allow(munmap), // we also unmap things
Allow(openat), // to permit openat(config_dir), etc.
Allow(write), // we write(2) to stdout
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRAP), // or die!
};
struct sock_fprog filterprog = {
.len = sizeof(filter)/sizeof(filter[0]),
.filter = filter
};
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("Could not start seccomp:");
return (1);
}
if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &filterprog) == -1) { perror("Could not start seccomp:");
return (1);
}
// Parse config file(s)
const struct config *conf = parse_config(config_dir);
if (conf == NULL)
{
errx(-1, "error parsing configuration file(s)");
}
// Interpret "configuration"
if (!interpret_config(conf, scratch_dir))
{
errx(-1, "error interpreting configuration file");
}
return 0;
}
|
16478.c | /*-
* COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
* code or tables extracted from it, as desired without restriction.
*
* First, the polynomial itself and its table of feedback terms. The
* polynomial is
* X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
*
* Note that we take it "backwards" and put the highest-order term in
* the lowest-order bit. The X^32 term is "implied"; the LSB is the
* X^31 term, etc. The X^0 term (usually shown as "+1") results in
* the MSB being 1
*
* Note that the usual hardware shift register implementation, which
* is what we're using (we're merely optimizing it by doing eight-bit
* chunks at a time) shifts bits into the lowest-order term. In our
* implementation, that means shifting towards the right. Why do we
* do it this way? Because the calculated CRC must be transmitted in
* order from highest-order term to lowest-order term. UARTs transmit
* characters in order from LSB to MSB. By storing the CRC this way
* we hand it to the UART in the order low-byte to high-byte; the UART
* sends each low-bit to hight-bit; and the result is transmission bit
* by bit from highest- to lowest-order term without requiring any bit
* shuffling on our part. Reception works similarly
*
* The feedback terms table consists of 256, 32-bit entries. Notes
*
* The table can be generated at runtime if desired; code to do so
* is shown later. It might not be obvious, but the feedback
* terms simply represent the results of eight shift/xor opera
* tions for all combinations of data and CRC register values
*
* The values must be right-shifted by eight bits by the "updcrc
* logic; the shift must be unsigned (bring in zeroes). On some
* hardware you could probably optimize the shift in assembler by
* using byte-swap instructions
* polynomial $edb88320
*/
/*
Implementation of CRC32 hashing for generic data.
This file was further modified by the author of the matilda software for
distribution with a compatible license. No permission was asked or given by the
original authors. For licensing enquiries contact the authors of matilda.
*/
#include "config.h"
#include "types.h"
static u32 crc32_tab[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
/*
Generate a CRC32 hash of the data specified.
RETURNS CRC32 hash
*/
u32 crc32(
const void * buf,
u32 size
) {
u32 crc = 0xedb88320; /* magic number */
const u8 * p = buf;
crc = crc ^ ~0U;
while (size--) {
crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
}
return crc ^ ~0U;
}
|
340430.c | /*
* Squeezelite - lightweight headless squeezebox emulator
*
* (c) Adrian Smith 2012-2015, [email protected]
* Ralph Irving 2015-2017, [email protected]
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
// decode thread
#include "squeezelite.h"
log_level loglevel;
extern struct buffer *streambuf;
extern struct buffer *outputbuf;
extern struct streamstate stream;
extern struct outputstate output;
extern struct processstate process;
struct decodestate decode;
struct codec *codecs[MAX_CODECS];
struct codec *codec;
static bool running = true;
#define LOCK_S mutex_lock(streambuf->mutex)
#define UNLOCK_S mutex_unlock(streambuf->mutex)
#define LOCK_O mutex_lock(outputbuf->mutex)
#define UNLOCK_O mutex_unlock(outputbuf->mutex)
#define LOCK_D mutex_lock(decode.mutex);
#define UNLOCK_D mutex_unlock(decode.mutex);
#if PROCESS
#define IF_DIRECT(x) if (decode.direct) { x }
#define IF_PROCESS(x) if (!decode.direct) { x }
#define MAY_PROCESS(x) { x }
#else
#define IF_DIRECT(x) { x }
#define IF_PROCESS(x)
#define MAY_PROCESS(x)
#endif
static void *decode_thread() {
while (running) {
size_t bytes, space, min_space;
bool toend;
bool ran = false;
LOCK_S;
bytes = _buf_used(streambuf);
toend = (stream.state <= DISCONNECT);
UNLOCK_S;
LOCK_O;
space = _buf_space(outputbuf);
UNLOCK_O;
LOCK_D;
if (decode.state == DECODE_RUNNING && codec) {
LOG_SDEBUG("streambuf bytes: %u outputbuf space: %u", bytes, space);
IF_DIRECT(
min_space = codec->min_space;
);
IF_PROCESS(
min_space = process.max_out_frames * BYTES_PER_FRAME;
);
if (space > min_space && (bytes > codec->min_read_bytes || toend)) {
decode.state = codec->decode();
IF_PROCESS(
if (process.in_frames) {
process_samples();
}
if (decode.state == DECODE_COMPLETE) {
process_drain();
}
);
if (decode.state != DECODE_RUNNING) {
LOG_INFO("decode %s", decode.state == DECODE_COMPLETE ? "complete" : "error");
LOCK_O;
if (output.fade_mode) _checkfade(false);
UNLOCK_O;
wake_controller();
}
ran = true;
}
}
UNLOCK_D;
if (!ran) {
usleep(100000);
}
}
return 0;
}
static void sort_codecs(int pry, struct codec* ptr) {
static int priority[MAX_CODECS];
int i, tpry;
struct codec* tptr;
for (i = 0; i < MAX_CODECS; i++) {
if (!codecs[i]) {
codecs[i] = ptr;
priority[i] = pry;
return;
}
if (pry < priority[i]) {
tptr = codecs[i];
codecs[i] = ptr;
ptr = tptr;
tpry = priority[i];
priority[i] = pry;
pry = tpry;
}
}
}
static thread_type thread;
void decode_init(log_level level, const char *include_codecs, const char *exclude_codecs) {
int i;
char* order_codecs;
loglevel = level;
LOG_INFO("init decode");
// register codecs
// dsf,dff,alc,wma,wmap,wmal,aac,spt,ogg,ogf,flc,aif,pcm,mp3
i = 0;
#if DSD
if (!strstr(exclude_codecs, "dsd") && (!include_codecs || (order_codecs = strstr(include_codecs, "dsd"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_dsd());
#endif
#if ALAC
if (!strstr(exclude_codecs, "alac") && (!include_codecs || (order_codecs = strstr(include_codecs, "alac"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_alac());
#elif FFMPEG
if (!strstr(exclude_codecs, "alac") && (!include_codecs || (order_codecs = strstr(include_codecs, "alac"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_ff("alc"));
if (!strstr(exclude_codecs, "wma") && (!include_codecs || (order_codecs = strstr(include_codecs, "wma"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_ff("wma"));
#endif
#ifndef NO_FAAD
if (!strstr(exclude_codecs, "aac") && (!include_codecs || (order_codecs = strstr(include_codecs, "aac"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_faad());
#endif
if (!strstr(exclude_codecs, "ogg") && (!include_codecs || (order_codecs = strstr(include_codecs, "ogg"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_vorbis());
#if OPUS
if (!strstr(exclude_codecs, "ops") && (!include_codecs || (order_codecs = strstr(include_codecs, "ops"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_opus());
#endif
if (!strstr(exclude_codecs, "flac") && (!include_codecs || (order_codecs = strstr(include_codecs, "flac"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_flac());
if (!strstr(exclude_codecs, "pcm") && (!include_codecs || (order_codecs = strstr(include_codecs, "pcm"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_pcm());
// try mad then mpg for mp3 unless command line option passed
if (!(strstr(exclude_codecs, "mp3") || strstr(exclude_codecs, "mad")) &&
(!include_codecs || (order_codecs = strstr(include_codecs, "mp3")) || (order_codecs = strstr(include_codecs, "mad"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_mad());
else if (!(strstr(exclude_codecs, "mp3") || strstr(exclude_codecs, "mpg")) &&
(!include_codecs || (order_codecs = strstr(include_codecs, "mp3")) || (order_codecs = strstr(include_codecs, "mpg"))))
sort_codecs((include_codecs ? order_codecs - include_codecs : i), register_mpg());
LOG_DEBUG("include codecs: %s exclude codecs: %s", include_codecs ? include_codecs : "", exclude_codecs);
mutex_create(decode.mutex);
#if LINUX || OSX || FREEBSD
pthread_attr_t attr;
pthread_attr_init(&attr);
#ifdef PTHREAD_STACK_MIN
pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN + DECODE_THREAD_STACK_SIZE);
#endif
pthread_create(&thread, &attr, decode_thread, NULL);
pthread_attr_destroy(&attr);
#endif
#if WIN
thread = CreateThread(NULL, DECODE_THREAD_STACK_SIZE, (LPTHREAD_START_ROUTINE)&decode_thread, NULL, 0, NULL);
#endif
decode.new_stream = true;
decode.state = DECODE_STOPPED;
MAY_PROCESS(
decode.direct = true;
decode.process = false;
);
}
void decode_close(void) {
LOG_INFO("close decode");
LOCK_D;
if (codec) {
codec->close();
codec = NULL;
}
running = false;
UNLOCK_D;
#if LINUX || OSX || FREEBSD
pthread_join(thread, NULL);
#endif
mutex_destroy(decode.mutex);
}
void decode_flush(void) {
LOG_INFO("decode flush");
LOCK_D;
decode.state = DECODE_STOPPED;
IF_PROCESS(
process_flush();
);
UNLOCK_D;
}
unsigned decode_newstream(unsigned sample_rate, unsigned supported_rates[]) {
// called with O locked to get sample rate for potentially processed output stream
// release O mutex during process_newstream as it can take some time
MAY_PROCESS(
if (decode.process) {
UNLOCK_O;
sample_rate = process_newstream(&decode.direct, sample_rate, supported_rates);
LOCK_O;
}
);
return sample_rate;
}
void codec_open(u8_t format, u8_t sample_size, u8_t sample_rate, u8_t channels, u8_t endianness) {
int i;
LOG_INFO("codec open: '%c'", format);
LOCK_D;
decode.new_stream = true;
decode.state = DECODE_STOPPED;
MAY_PROCESS(
decode.direct = true; // potentially changed within codec when processing enabled
);
// find the required codec
for (i = 0; i < MAX_CODECS; ++i) {
if (codecs[i] && codecs[i]->id == format) {
if (codec && codec != codecs[i]) {
LOG_INFO("closing codec: '%c'", codec->id);
codec->close();
}
codec = codecs[i];
codec->open(sample_size, sample_rate, channels, endianness);
decode.state = DECODE_READY;
UNLOCK_D;
return;
}
}
UNLOCK_D;
LOG_ERROR("codec not found");
}
|
952847.c | /*
* Copyright (c) 2012-2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Test kernel mutex APIs
*
*
* This module demonstrates the kernel's priority inheritance algorithm.
* A thread that owns a mutex is promoted to the priority level of the
* highest-priority thread attempting to lock the mutex.
*
* In addition, recursive locking capabilities and the use of a private mutex
* are also tested.
*
* This module tests the following mutex routines:
*
* sys_mutex_lock
* sys_mutex_unlock
*
* Timeline for priority inheritance testing:
* - 0.0 sec: thread_05, thread_06, thread_07, thread_08, thread_09, sleep
* : main thread takes mutex_1 then sleeps
* - 0.0 sec: thread_11 sleeps
* - 0.5 sec: thread_09 wakes and waits on mutex_1
* - 1.0 sec: main thread (@ priority 9) takes mutex_2 then sleeps
* - 1.5 sec: thread_08 wakes and waits on mutex_2
* - 2.0 sec: main thread (@ priority 8) takes mutex_3 then sleeps
* - 2.5 sec: thread_07 wakes and waits on mutex_3
* - 3.0 sec: main thread (@ priority 7) takes mutex_4 then sleeps
* - 3.5 sec: thread_05 wakes and waits on mutex_4
* - 3.5 sec: thread_11 wakes and waits on mutex_3
* - 3.75 sec: thread_06 wakes and waits on mutex_4
* - 4.0 sec: main thread wakes (@ priority 5) then sleeps
* - 4.5 sec: thread_05 times out
* - 5.0 sec: main thread wakes (@ priority 6) then gives mutex_4
* : main thread (@ priority 7) sleeps
* - 5.5 sec: thread_07 times out on mutex_3
* - 6.0 sec: main thread (@ priority 8) gives mutex_3
* : main thread (@ priority 8) gives mutex_2
* : main thread (@ priority 9) gives mutex_1
* : main thread (@ priority 10) sleeps
*/
#include <tc_util.h>
#include <zephyr.h>
#include <ztest.h>
#include <sys/mutex.h>
#define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
static ZTEST_DMEM int tc_rc = TC_PASS; /* test case return code */
ZTEST_BMEM SYS_MUTEX_DEFINE(private_mutex);
ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_1);
ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_2);
ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_3);
ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_4);
#ifdef CONFIG_USERSPACE
static SYS_MUTEX_DEFINE(no_access_mutex);
#endif
static ZTEST_BMEM SYS_MUTEX_DEFINE(not_my_mutex);
static ZTEST_BMEM SYS_MUTEX_DEFINE(bad_count_mutex);
/**
*
* thread_05 -
*
* @return N/A
*/
void thread_05(void)
{
int rv;
k_sleep(K_MSEC(3500));
/* Wait and boost owner priority to 5 */
rv = sys_mutex_lock(&mutex_4, K_SECONDS(1));
if (rv != -EAGAIN) {
tc_rc = TC_FAIL;
TC_ERROR("Failed to timeout on mutex 0x%x\n",
(u32_t)&mutex_4);
return;
}
}
/**
*
* thread_06 -
*
* @return N/A
*/
void thread_06(void)
{
int rv;
k_sleep(K_MSEC(3750));
/*
* Wait for the mutex. There is a higher priority level thread waiting
* on the mutex, so request will not immediately contribute to raising
* the priority of the owning thread (main thread). When thread_05
* times out this thread will become the highest priority waiting
* thread. The priority of the owning thread (main thread) will not
* drop back to 7, but will instead drop to 6.
*/
rv = sys_mutex_lock(&mutex_4, K_SECONDS(2));
if (rv != 0) {
tc_rc = TC_FAIL;
TC_ERROR("Failed to take mutex 0x%x\n", (u32_t)&mutex_4);
return;
}
sys_mutex_unlock(&mutex_4);
}
/**
*
* thread_07 -
*
* @return N/A
*/
void thread_07(void)
{
int rv;
k_sleep(K_MSEC(2500));
/*
* Wait and boost owner priority to 7. While waiting, another thread of
* a very low priority level will also wait for the mutex. thread_07 is
* expected to time out around the 5.5 second mark. When it times out,
* thread_11 will become the only waiting thread for this mutex and the
* priority of the owning main thread will drop to 8.
*/
rv = sys_mutex_lock(&mutex_3, K_SECONDS(3));
if (rv != -EAGAIN) {
tc_rc = TC_FAIL;
TC_ERROR("Failed to timeout on mutex 0x%x\n",
(u32_t)&mutex_3);
return;
}
}
/**
*
* thread_08 -
*
* @return N/A
*/
void thread_08(void)
{
int rv;
k_sleep(K_MSEC(1500));
/* Wait and boost owner priority to 8 */
rv = sys_mutex_lock(&mutex_2, K_FOREVER);
if (rv != 0) {
tc_rc = TC_FAIL;
TC_ERROR("Failed to take mutex 0x%x\n", (u32_t)&mutex_2);
return;
}
sys_mutex_unlock(&mutex_2);
}
/**
*
* thread_09 -
*
* @return N/A
*/
void thread_09(void)
{
int rv;
k_sleep(K_MSEC(500)); /* Allow lower priority thread to run */
/*<mutex_1> is already locked. */
rv = sys_mutex_lock(&mutex_1, K_NO_WAIT);
if (rv != -EBUSY) { /* This attempt to lock the mutex */
/* should not succeed. */
tc_rc = TC_FAIL;
TC_ERROR("Failed to NOT take locked mutex 0x%x\n",
(u32_t)&mutex_1);
return;
}
/* Wait and boost owner priority to 9 */
rv = sys_mutex_lock(&mutex_1, K_FOREVER);
if (rv != 0) {
tc_rc = TC_FAIL;
TC_ERROR("Failed to take mutex 0x%x\n", (u32_t)&mutex_1);
return;
}
sys_mutex_unlock(&mutex_1);
}
/**
*
* thread_11 -
*
* @return N/A
*/
void thread_11(void)
{
int rv;
k_sleep(K_MSEC(3500));
rv = sys_mutex_lock(&mutex_3, K_FOREVER);
if (rv != 0) {
tc_rc = TC_FAIL;
TC_ERROR("Failed to take mutex 0x%x\n", (u32_t)&mutex_2);
return;
}
sys_mutex_unlock(&mutex_3);
}
K_THREAD_STACK_DEFINE(thread_12_stack_area, STACKSIZE);
struct k_thread thread_12_thread_data;
extern void thread_12(void);
/**
*
* @brief Main thread to test thread_mutex_xxx interfaces
*
* This thread will lock on mutex_1, mutex_2, mutex_3 and mutex_4. It later
* recursively locks private_mutex, releases it, then re-locks it.
*
* @return N/A
*/
void test_mutex(void)
{
/*
* Main thread(test_main) priority was 10 but ztest thread runs at
* priority -1. To run the test smoothly make both main and ztest
* threads run at same priority level.
*/
k_thread_priority_set(k_current_get(), 10);
int rv;
int i;
struct sys_mutex *mutexes[4] = { &mutex_1, &mutex_2, &mutex_3,
&mutex_4 };
struct sys_mutex *givemutex[3] = { &mutex_3, &mutex_2, &mutex_1 };
int priority[4] = { 9, 8, 7, 5 };
int droppri[3] = { 8, 8, 9 };
#ifdef CONFIG_USERSPACE
int thread_flags = K_USER | K_INHERIT_PERMS;
#else
int thread_flags = 0;
#endif
TC_START("Test kernel Mutex API");
PRINT_LINE;
/*
* 1st iteration: Take mutex_1; thread_09 waits on mutex_1
* 2nd iteration: Take mutex_2: thread_08 waits on mutex_2
* 3rd iteration: Take mutex_3; thread_07 waits on mutex_3
* 4th iteration: Take mutex_4; thread_05 waits on mutex_4
*/
for (i = 0; i < 4; i++) {
rv = sys_mutex_lock(mutexes[i], K_NO_WAIT);
zassert_equal(rv, 0, "Failed to lock mutex 0x%x\n",
(u32_t)mutexes[i]);
k_sleep(K_SECONDS(1));
rv = k_thread_priority_get(k_current_get());
zassert_equal(rv, priority[i], "expected priority %d, not %d\n",
priority[i], rv);
/* Catch any errors from other threads */
zassert_equal(tc_rc, TC_PASS, NULL);
}
/* ~ 4 seconds have passed */
TC_PRINT("Done LOCKING! Current priority = %d\n",
k_thread_priority_get(k_current_get()));
k_sleep(K_SECONDS(1)); /* thread_05 should time out */
/* ~ 5 seconds have passed */
rv = k_thread_priority_get(k_current_get());
zassert_equal(rv, 6, "%s timed out and out priority should drop.\n",
"thread_05");
zassert_equal(rv, 6, "Expected priority %d, not %d\n", 6, rv);
sys_mutex_unlock(&mutex_4);
rv = k_thread_priority_get(k_current_get());
zassert_equal(rv, 7, "Gave %s and priority should drop.\n", "mutex_4");
zassert_equal(rv, 7, "Expected priority %d, not %d\n", 7, rv);
k_sleep(K_SECONDS(1)); /* thread_07 should time out */
/* ~ 6 seconds have passed */
for (i = 0; i < 3; i++) {
rv = k_thread_priority_get(k_current_get());
zassert_equal(rv, droppri[i], "Expected priority %d, not %d\n",
droppri[i], rv);
sys_mutex_unlock(givemutex[i]);
zassert_equal(tc_rc, TC_PASS, NULL);
}
rv = k_thread_priority_get(k_current_get());
zassert_equal(rv, 10, "Expected priority %d, not %d\n", 10, rv);
k_sleep(K_SECONDS(1)); /* Give thread_11 time to run */
zassert_equal(tc_rc, TC_PASS, NULL);
/* test recursive locking using a private mutex */
TC_PRINT("Testing recursive locking\n");
rv = sys_mutex_lock(&private_mutex, K_NO_WAIT);
zassert_equal(rv, 0, "Failed to lock private mutex");
rv = sys_mutex_lock(&private_mutex, K_NO_WAIT);
zassert_equal(rv, 0, "Failed to recursively lock private mutex");
/* Start thread */
k_thread_create(&thread_12_thread_data, thread_12_stack_area, STACKSIZE,
(k_thread_entry_t)thread_12, NULL, NULL, NULL,
K_PRIO_PREEMPT(12), thread_flags, K_NO_WAIT);
k_sleep(1); /* Give thread_12 a chance to block on the mutex */
sys_mutex_unlock(&private_mutex);
sys_mutex_unlock(&private_mutex); /* thread_12 should now have lock */
rv = sys_mutex_lock(&private_mutex, K_NO_WAIT);
zassert_equal(rv, -EBUSY, "Unexpectedly got lock on private mutex");
rv = sys_mutex_lock(&private_mutex, K_SECONDS(1));
zassert_equal(rv, 0, "Failed to re-obtain lock on private mutex");
sys_mutex_unlock(&private_mutex);
TC_PRINT("Recursive locking tests successful\n");
}
void test_supervisor_access(void)
{
int rv;
#ifdef CONFIG_USERSPACE
/* coverage for get_k_mutex checks */
rv = sys_mutex_lock((struct sys_mutex *)NULL, K_NO_WAIT);
zassert_true(rv == -EINVAL, "accepted bad mutex pointer");
rv = sys_mutex_lock((struct sys_mutex *)k_current_get(), K_NO_WAIT);
zassert_true(rv == -EINVAL, "accepted object that was not a mutex");
rv = sys_mutex_unlock((struct sys_mutex *)NULL);
zassert_true(rv == -EINVAL, "accepted bad mutex pointer");
rv = sys_mutex_unlock((struct sys_mutex *)k_current_get());
zassert_true(rv == -EINVAL, "accepted object that was not a mutex");
#endif /* CONFIG_USERSPACE */
rv = sys_mutex_unlock(¬_my_mutex);
zassert_true(rv == -EPERM, "unlocked a mutex that wasn't owner");
rv = sys_mutex_unlock(&bad_count_mutex);
zassert_true(rv == -EINVAL, "mutex wasn't locked");
}
void test_user_access(void)
{
#ifdef CONFIG_USERSPACE
int rv;
rv = sys_mutex_lock(&no_access_mutex, K_NO_WAIT);
zassert_true(rv == -EACCES, "accessed mutex not in memory domain");
rv = sys_mutex_unlock(&no_access_mutex);
zassert_true(rv == -EACCES, "accessed mutex not in memory domain");
#else
ztest_test_skip();
#endif /* CONFIG_USERSPACE */
}
K_THREAD_DEFINE(THREAD_05, STACKSIZE, thread_05, NULL, NULL, NULL,
5, K_USER, K_NO_WAIT);
K_THREAD_DEFINE(THREAD_06, STACKSIZE, thread_06, NULL, NULL, NULL,
6, K_USER, K_NO_WAIT);
K_THREAD_DEFINE(THREAD_07, STACKSIZE, thread_07, NULL, NULL, NULL,
7, K_USER, K_NO_WAIT);
K_THREAD_DEFINE(THREAD_08, STACKSIZE, thread_08, NULL, NULL, NULL,
8, K_USER, K_NO_WAIT);
K_THREAD_DEFINE(THREAD_09, STACKSIZE, thread_09, NULL, NULL, NULL,
9, K_USER, K_NO_WAIT);
K_THREAD_DEFINE(THREAD_11, STACKSIZE, thread_11, NULL, NULL, NULL,
11, K_USER, K_NO_WAIT);
/*test case main entry*/
void test_main(void)
{
#ifdef CONFIG_USERSPACE
k_thread_access_grant(k_current_get(),
&thread_12_thread_data, &thread_12_stack_area);
k_mem_domain_add_thread(&ztest_mem_domain, THREAD_05);
k_mem_domain_add_thread(&ztest_mem_domain, THREAD_06);
k_mem_domain_add_thread(&ztest_mem_domain, THREAD_07);
k_mem_domain_add_thread(&ztest_mem_domain, THREAD_08);
k_mem_domain_add_thread(&ztest_mem_domain, THREAD_09);
k_mem_domain_add_thread(&ztest_mem_domain, THREAD_11);
#endif
sys_mutex_lock(¬_my_mutex, K_NO_WAIT);
/* We deliberately disable userspace, even on platforms that
* support it, so that the alternate implementation of sys_mutex
* (which is just a very thin wrapper to k_mutex) is exercised.
* This requires us to not attempt to start the tests in user
* mode, as this will otherwise fail an assertion in the thread code.
*/
#ifdef CONFIG_USERSPACE
ztest_test_suite(mutex_complex,
ztest_user_unit_test(test_mutex),
ztest_user_unit_test(test_user_access),
ztest_unit_test(test_supervisor_access));
ztest_run_test_suite(mutex_complex);
#else
ztest_test_suite(mutex_complex,
ztest_unit_test(test_mutex),
ztest_unit_test(test_user_access),
ztest_unit_test(test_supervisor_access));
ztest_run_test_suite(mutex_complex);
#endif
}
|
367376.c | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
* All rights reserved.
*/
/** \file
* \ingroup edtransform
*/
#include <stdlib.h>
#include "BLI_math.h"
#include "BLI_string.h"
#include "BKE_context.h"
#include "ED_screen.h"
#include "UI_interface.h"
#include "BLT_translation.h"
#include "transform.h"
#include "transform_mode.h"
/* -------------------------------------------------------------------- */
/* Transform (Mirror) */
/** \name Transform Mirror
* \{ */
static void applyMirror(TransInfo *t, const int UNUSED(mval[2]))
{
float size[3], mat[3][3];
int i;
char str[UI_MAX_DRAW_STR];
copy_v3_v3(t->values_final, t->values);
/* OPTIMIZATION:
* This still recalculates transformation on mouse move
* while it should only recalculate on constraint change. */
/* if an axis has been selected */
if (t->con.mode & CON_APPLY) {
size[0] = size[1] = size[2] = -1;
size_to_mat3(mat, size);
if (t->con.applySize) {
t->con.applySize(t, NULL, NULL, mat);
}
BLI_snprintf(str, sizeof(str), TIP_("Mirror%s"), t->con.text);
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
TransData *td = tc->data;
for (i = 0; i < tc->data_len; i++, td++) {
if (td->flag & TD_NOACTION) {
break;
}
if (td->flag & TD_SKIP) {
continue;
}
ElementResize(t, tc, td, mat);
}
}
recalcData(t);
ED_area_status_text(t->area, str);
}
else {
size[0] = size[1] = size[2] = 1;
size_to_mat3(mat, size);
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
TransData *td = tc->data;
for (i = 0; i < tc->data_len; i++, td++) {
if (td->flag & TD_NOACTION) {
break;
}
if (td->flag & TD_SKIP) {
continue;
}
ElementResize(t, tc, td, mat);
}
}
recalcData(t);
if (t->flag & T_2D_EDIT) {
ED_area_status_text(t->area, TIP_("Select a mirror axis (X, Y)"));
}
else {
ED_area_status_text(t->area, TIP_("Select a mirror axis (X, Y, Z)"));
}
}
}
void initMirror(TransInfo *t)
{
t->transform = applyMirror;
initMouseInputMode(t, &t->mouse, INPUT_NONE);
t->flag |= T_NULL_ONE;
if ((t->flag & T_EDIT) == 0) {
t->flag |= T_NO_ZERO;
}
}
/** \} */
|
188966.c | /* $OpenBSD: certhash.c,v 1.18 2021/08/28 08:16:39 tb Exp $ */
/*
* Copyright (c) 2014, 2015 Joel Sing <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <dirent.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/pem.h>
#include <openssl/x509.h>
#include "apps.h"
static struct {
int dryrun;
int verbose;
} certhash_config;
static const struct option certhash_options[] = {
{
.name = "n",
.desc = "Perform a dry-run - do not make any changes",
.type = OPTION_FLAG,
.opt.flag = &certhash_config.dryrun,
},
{
.name = "v",
.desc = "Verbose",
.type = OPTION_FLAG,
.opt.flag = &certhash_config.verbose,
},
{ NULL },
};
struct hashinfo {
char *filename;
char *target;
unsigned long hash;
unsigned int index;
unsigned char fingerprint[EVP_MAX_MD_SIZE];
int is_crl;
int is_dup;
int exists;
int changed;
struct hashinfo *reference;
struct hashinfo *next;
};
static struct hashinfo *
hashinfo(const char *filename, unsigned long hash, unsigned char *fingerprint)
{
struct hashinfo *hi;
if ((hi = calloc(1, sizeof(*hi))) == NULL)
return (NULL);
if (filename != NULL) {
if ((hi->filename = strdup(filename)) == NULL) {
free(hi);
return (NULL);
}
}
hi->hash = hash;
if (fingerprint != NULL)
memcpy(hi->fingerprint, fingerprint, sizeof(hi->fingerprint));
return (hi);
}
static void
hashinfo_free(struct hashinfo *hi)
{
if (hi == NULL)
return;
free(hi->filename);
free(hi->target);
free(hi);
}
#ifdef DEBUG
static void
hashinfo_print(struct hashinfo *hi)
{
int i;
printf("hashinfo %s %08lx %u %i\n", hi->filename, hi->hash,
hi->index, hi->is_crl);
for (i = 0; i < (int)EVP_MAX_MD_SIZE; i++) {
printf("%02X%c", hi->fingerprint[i],
(i + 1 == (int)EVP_MAX_MD_SIZE) ? '\n' : ':');
}
}
#endif
static int
hashinfo_compare(const void *a, const void *b)
{
struct hashinfo *hia = *(struct hashinfo **)a;
struct hashinfo *hib = *(struct hashinfo **)b;
int rv;
rv = hia->hash < hib->hash ? -1 : hia->hash > hib->hash;
if (rv != 0)
return (rv);
rv = memcmp(hia->fingerprint, hib->fingerprint,
sizeof(hia->fingerprint));
if (rv != 0)
return (rv);
return strcmp(hia->filename, hib->filename);
}
static struct hashinfo *
hashinfo_chain(struct hashinfo *head, struct hashinfo *entry)
{
struct hashinfo *hi = head;
if (hi == NULL)
return (entry);
while (hi->next != NULL)
hi = hi->next;
hi->next = entry;
return (head);
}
static void
hashinfo_chain_free(struct hashinfo *hi)
{
struct hashinfo *next;
while (hi != NULL) {
next = hi->next;
hashinfo_free(hi);
hi = next;
}
}
static size_t
hashinfo_chain_length(struct hashinfo *hi)
{
int len = 0;
while (hi != NULL) {
len++;
hi = hi->next;
}
return (len);
}
static int
hashinfo_chain_sort(struct hashinfo **head)
{
struct hashinfo **list, *entry;
size_t len;
int i;
if (*head == NULL)
return (0);
len = hashinfo_chain_length(*head);
if ((list = reallocarray(NULL, len, sizeof(struct hashinfo *))) == NULL)
return (-1);
for (entry = *head, i = 0; entry != NULL; entry = entry->next, i++)
list[i] = entry;
qsort(list, len, sizeof(struct hashinfo *), hashinfo_compare);
*head = entry = list[0];
for (i = 1; i < len; i++) {
entry->next = list[i];
entry = list[i];
}
entry->next = NULL;
free(list);
return (0);
}
static char *
hashinfo_linkname(struct hashinfo *hi)
{
char *filename;
if (asprintf(&filename, "%08lx.%s%u", hi->hash,
(hi->is_crl ? "r" : ""), hi->index) == -1)
return (NULL);
return (filename);
}
static int
filename_is_hash(const char *filename)
{
const char *p = filename;
while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f'))
p++;
if (*p++ != '.')
return (0);
if (*p == 'r') /* CRL format. */
p++;
while (*p >= '0' && *p <= '9')
p++;
if (*p != '\0')
return (0);
return (1);
}
static int
filename_is_pem(const char *filename)
{
const char *q, *p = filename;
if ((q = strchr(p, '\0')) == NULL)
return (0);
if ((q - p) < 4)
return (0);
if (strncmp((q - 4), ".pem", 4) != 0)
return (0);
return (1);
}
static struct hashinfo *
hashinfo_from_linkname(const char *linkname, const char *target)
{
struct hashinfo *hi = NULL;
const char *errstr;
char *l, *p, *ep;
long long val;
if ((l = strdup(linkname)) == NULL)
goto err;
if ((p = strchr(l, '.')) == NULL)
goto err;
*p++ = '\0';
if ((hi = hashinfo(linkname, 0, NULL)) == NULL)
goto err;
if ((hi->target = strdup(target)) == NULL)
goto err;
errno = 0;
val = strtoll(l, &ep, 16);
if (l[0] == '\0' || *ep != '\0')
goto err;
if (errno == ERANGE && (val == LLONG_MAX || val == LLONG_MIN))
goto err;
if (val < 0 || val > ULONG_MAX)
goto err;
hi->hash = (unsigned long)val;
if (*p == 'r') {
hi->is_crl = 1;
p++;
}
val = strtonum(p, 0, 0xffffffff, &errstr);
if (errstr != NULL)
goto err;
hi->index = (unsigned int)val;
goto done;
err:
hashinfo_free(hi);
hi = NULL;
done:
free(l);
return (hi);
}
static struct hashinfo *
certhash_cert(BIO *bio, const char *filename)
{
unsigned char fingerprint[EVP_MAX_MD_SIZE];
struct hashinfo *hi = NULL;
const EVP_MD *digest;
X509 *cert = NULL;
unsigned long hash;
unsigned int len;
if ((cert = PEM_read_bio_X509(bio, NULL, NULL, NULL)) == NULL)
goto err;
hash = X509_subject_name_hash(cert);
digest = EVP_sha256();
if (X509_digest(cert, digest, fingerprint, &len) != 1) {
fprintf(stderr, "out of memory\n");
goto err;
}
hi = hashinfo(filename, hash, fingerprint);
err:
X509_free(cert);
return (hi);
}
static struct hashinfo *
certhash_crl(BIO *bio, const char *filename)
{
unsigned char fingerprint[EVP_MAX_MD_SIZE];
struct hashinfo *hi = NULL;
const EVP_MD *digest;
X509_CRL *crl = NULL;
unsigned long hash;
unsigned int len;
if ((crl = PEM_read_bio_X509_CRL(bio, NULL, NULL, NULL)) == NULL)
return (NULL);
hash = X509_NAME_hash(X509_CRL_get_issuer(crl));
digest = EVP_sha256();
if (X509_CRL_digest(crl, digest, fingerprint, &len) != 1) {
fprintf(stderr, "out of memory\n");
goto err;
}
hi = hashinfo(filename, hash, fingerprint);
err:
X509_CRL_free(crl);
return (hi);
}
static int
certhash_addlink(struct hashinfo **links, struct hashinfo *hi)
{
struct hashinfo *link = NULL;
if ((link = hashinfo(NULL, hi->hash, hi->fingerprint)) == NULL)
goto err;
if ((link->filename = hashinfo_linkname(hi)) == NULL)
goto err;
link->reference = hi;
link->changed = 1;
*links = hashinfo_chain(*links, link);
hi->reference = link;
return (0);
err:
hashinfo_free(link);
return (-1);
}
static void
certhash_findlink(struct hashinfo *links, struct hashinfo *hi)
{
struct hashinfo *link;
for (link = links; link != NULL; link = link->next) {
if (link->is_crl == hi->is_crl &&
link->hash == hi->hash &&
link->index == hi->index &&
link->reference == NULL) {
link->reference = hi;
if (link->target == NULL ||
strcmp(link->target, hi->filename) != 0)
link->changed = 1;
hi->reference = link;
break;
}
}
}
static void
certhash_index(struct hashinfo *head, const char *name)
{
struct hashinfo *last, *entry;
int index = 0;
last = NULL;
for (entry = head; entry != NULL; entry = entry->next) {
if (last != NULL) {
if (entry->hash == last->hash) {
if (memcmp(entry->fingerprint,
last->fingerprint,
sizeof(entry->fingerprint)) == 0) {
fprintf(stderr, "WARNING: duplicate %s "
"in %s (using %s), ignoring...\n",
name, entry->filename,
last->filename);
entry->is_dup = 1;
continue;
}
index++;
} else {
index = 0;
}
}
entry->index = index;
last = entry;
}
}
static int
certhash_merge(struct hashinfo **links, struct hashinfo **certs,
struct hashinfo **crls)
{
struct hashinfo *cert, *crl;
/* Pass 1 - sort and index entries. */
if (hashinfo_chain_sort(certs) == -1)
return (-1);
if (hashinfo_chain_sort(crls) == -1)
return (-1);
certhash_index(*certs, "certificate");
certhash_index(*crls, "CRL");
/* Pass 2 - map to existing links. */
for (cert = *certs; cert != NULL; cert = cert->next) {
if (cert->is_dup == 1)
continue;
certhash_findlink(*links, cert);
}
for (crl = *crls; crl != NULL; crl = crl->next) {
if (crl->is_dup == 1)
continue;
certhash_findlink(*links, crl);
}
/* Pass 3 - determine missing links. */
for (cert = *certs; cert != NULL; cert = cert->next) {
if (cert->is_dup == 1 || cert->reference != NULL)
continue;
if (certhash_addlink(links, cert) == -1)
return (-1);
}
for (crl = *crls; crl != NULL; crl = crl->next) {
if (crl->is_dup == 1 || crl->reference != NULL)
continue;
if (certhash_addlink(links, crl) == -1)
return (-1);
}
return (0);
}
static int
certhash_link(struct dirent *dep, struct hashinfo **links)
{
struct hashinfo *hi = NULL;
char target[PATH_MAX];
struct stat sb;
int n;
if (lstat(dep->d_name, &sb) == -1) {
fprintf(stderr, "failed to stat %s\n", dep->d_name);
return (-1);
}
if (!S_ISLNK(sb.st_mode))
return (0);
n = readlink(dep->d_name, target, sizeof(target) - 1);
if (n == -1) {
fprintf(stderr, "failed to readlink %s\n", dep->d_name);
return (-1);
}
target[n] = '\0';
hi = hashinfo_from_linkname(dep->d_name, target);
if (hi == NULL) {
fprintf(stderr, "failed to get hash info %s\n", dep->d_name);
return (-1);
}
hi->exists = 1;
*links = hashinfo_chain(*links, hi);
return (0);
}
static int
certhash_file(struct dirent *dep, struct hashinfo **certs,
struct hashinfo **crls)
{
struct hashinfo *hi = NULL;
int has_cert, has_crl;
int ret = -1;
BIO *bio = NULL;
FILE *f;
has_cert = has_crl = 0;
if ((f = fopen(dep->d_name, "r")) == NULL) {
fprintf(stderr, "failed to fopen %s\n", dep->d_name);
goto err;
}
if ((bio = BIO_new_fp(f, BIO_CLOSE)) == NULL) {
fprintf(stderr, "failed to create bio\n");
fclose(f);
goto err;
}
if ((hi = certhash_cert(bio, dep->d_name)) != NULL) {
has_cert = 1;
*certs = hashinfo_chain(*certs, hi);
}
if (BIO_reset(bio) != 0) {
fprintf(stderr, "BIO_reset failed\n");
goto err;
}
if ((hi = certhash_crl(bio, dep->d_name)) != NULL) {
has_crl = hi->is_crl = 1;
*crls = hashinfo_chain(*crls, hi);
}
if (!has_cert && !has_crl)
fprintf(stderr, "PEM file %s does not contain a certificate "
"or CRL, ignoring...\n", dep->d_name);
ret = 0;
err:
BIO_free(bio);
return (ret);
}
static int
certhash_directory(const char *path)
{
struct hashinfo *links = NULL, *certs = NULL, *crls = NULL, *link;
int ret = 0;
struct dirent *dep;
DIR *dip = NULL;
if ((dip = opendir(".")) == NULL) {
fprintf(stderr, "failed to open directory %s\n", path);
goto err;
}
if (certhash_config.verbose)
fprintf(stdout, "scanning directory %s\n", path);
/* Create lists of existing hash links, certs and CRLs. */
while ((dep = readdir(dip)) != NULL) {
if (filename_is_hash(dep->d_name)) {
if (certhash_link(dep, &links) == -1)
goto err;
}
if (filename_is_pem(dep->d_name)) {
if (certhash_file(dep, &certs, &crls) == -1)
goto err;
}
}
if (certhash_merge(&links, &certs, &crls) == -1) {
fprintf(stderr, "certhash merge failed\n");
goto err;
}
/* Remove spurious links. */
for (link = links; link != NULL; link = link->next) {
if (link->exists == 0 ||
(link->reference != NULL && link->changed == 0))
continue;
if (certhash_config.verbose)
fprintf(stdout, "%s link %s -> %s\n",
(certhash_config.dryrun ? "would remove" :
"removing"), link->filename, link->target);
if (certhash_config.dryrun)
continue;
if (unlink(link->filename) == -1) {
fprintf(stderr, "failed to remove link %s\n",
link->filename);
goto err;
}
}
/* Create missing links. */
for (link = links; link != NULL; link = link->next) {
if (link->exists == 1 && link->changed == 0)
continue;
if (certhash_config.verbose)
fprintf(stdout, "%s link %s -> %s\n",
(certhash_config.dryrun ? "would create" :
"creating"), link->filename,
link->reference->filename);
if (certhash_config.dryrun)
continue;
if (symlink(link->reference->filename, link->filename) == -1) {
fprintf(stderr, "failed to create link %s -> %s\n",
link->filename, link->reference->filename);
goto err;
}
}
goto done;
err:
ret = 1;
done:
hashinfo_chain_free(certs);
hashinfo_chain_free(crls);
hashinfo_chain_free(links);
if (dip != NULL)
closedir(dip);
return (ret);
}
static void
certhash_usage(void)
{
fprintf(stderr, "usage: certhash [-nv] dir ...\n");
options_usage(certhash_options);
}
int
certhash_main(int argc, char **argv)
{
int argsused;
int i, cwdfd, ret = 0;
if (single_execution) {
if (pledge("stdio cpath wpath rpath", NULL) == -1) {
perror("pledge");
exit(1);
}
}
memset(&certhash_config, 0, sizeof(certhash_config));
if (options_parse(argc, argv, certhash_options, NULL, &argsused) != 0) {
certhash_usage();
return (1);
}
if ((cwdfd = open(".", O_RDONLY)) == -1) {
perror("failed to open current directory");
return (1);
}
for (i = argsused; i < argc; i++) {
if (chdir(argv[i]) == -1) {
fprintf(stderr,
"failed to change to directory %s: %s\n",
argv[i], strerror(errno));
ret = 1;
continue;
}
ret |= certhash_directory(argv[i]);
if (fchdir(cwdfd) == -1) {
perror("failed to restore current directory");
ret = 1;
break; /* can't continue safely */
}
}
close(cwdfd);
return (ret);
}
|
127124.c | #include "triforce.h"
static uint32_t frames = 0;
static uint32_t render_triforce_flag = 0;
#define FRAMES_PER_CYCLE 2
#define TRIFORCE_SPRITE_FRAMES 16
#define TRIFORCE_FRAMES_VISIBLE 100 // 20 Frames seems to be about 1 second
#define TRIFORCE_FRAMES_FADE_AWAY 80
#define TRIFORCE_FRAMES_FADE_INTO 5
void set_triforce_render() {
render_triforce_flag = 1;
frames = frames > TRIFORCE_FRAMES_FADE_INTO ? TRIFORCE_FRAMES_FADE_INTO : frames;
}
void draw_triforce_count(z64_disp_buf_t *db) {
// Must be triforce hunt and triforce should be drawable, and we should either be on the pause screen or the render triforce flag should be set
if (!(TRIFORCE_HUNT_ENABLED && CAN_DRAW_TRIFORCE && (render_triforce_flag == 1 || z64_game.pause_ctxt.state == 6))) {
return;
}
uint8_t alpha;
// In the pause screen always draw
if (z64_game.pause_ctxt.state == 6) {
alpha = 255;
frames = frames % (TRIFORCE_SPRITE_FRAMES * FRAMES_PER_CYCLE);
} else {
// Do a fade in/out effect if not in pause screen
if ( frames <= TRIFORCE_FRAMES_FADE_INTO ) {
alpha = frames * 255 / TRIFORCE_FRAMES_FADE_INTO;
} else if (frames <= TRIFORCE_FRAMES_FADE_INTO + TRIFORCE_FRAMES_VISIBLE ) {
alpha = 255;
} else if (frames <= TRIFORCE_FRAMES_FADE_INTO + TRIFORCE_FRAMES_VISIBLE + TRIFORCE_FRAMES_FADE_AWAY) {
alpha = (frames - TRIFORCE_FRAMES_FADE_INTO - TRIFORCE_FRAMES_VISIBLE) * 255 / TRIFORCE_FRAMES_FADE_AWAY;
alpha = 255 - alpha;
} else {
render_triforce_flag = 0;
frames = 0;
return;
}
}
frames++;
int pieces = z64_file.scene_flags[0x48].unk_00_; //Unused word in scene x48.
// Get length of string to draw
// Theres probably a better way to do this, log 10 wasnt working though
int pieces_digits = 0;
int pieces_copy = pieces;
while(pieces_copy >= 1) {
pieces_digits++;
pieces_copy /= 10;
}
pieces_digits = pieces_digits == 0 ? 1 : pieces_digits;
int required_digits = 0;
int required_copy = TRIFORCE_PIECES_REQUIRED;
while(required_copy >= 1) {
required_digits++;
required_copy /= 10;
}
required_digits = required_digits == 0 ? 1 : required_digits;
// Setup draw location
int str_len = required_digits + pieces_digits + 1;
int total_w = str_len * font_sprite.tile_w + triforce_sprite.tile_w;
int draw_x = Z64_SCREEN_WIDTH / 2 - total_w / 2;
int draw_y_text = Z64_SCREEN_HEIGHT - (font_sprite.tile_h * 1.5) + 1;
int draw_y_triforce = Z64_SCREEN_HEIGHT - (triforce_sprite.tile_h * 1.5) + 3 + 1;
// Create collected/required string
char text[str_len + 1];
text[str_len] = 0;
pieces_copy = pieces;
for(int i = pieces_digits - 1; i >= 0; i--) {
text[i] = (pieces_copy % 10) + '0';
pieces_copy /= 10;
}
text[pieces_digits] = 0x2F; // writes a slash (/)
required_copy = TRIFORCE_PIECES_REQUIRED;
for(int i = str_len - 1; i > pieces_digits; i--) {
text[i] = (required_copy % 10) + '0';
required_copy /= 10;
}
// Call setup display list
gSPDisplayList(db->p++, &setup_db);
gDPPipeSync(db->p++);
gDPSetCombineMode(db->p++, G_CC_MODULATEIA_PRIM, G_CC_MODULATEIA_PRIM);
gDPSetPrimColor(db->p++, 0, 0, 0xDA, 0xD3, 0x0B, alpha);
text_print(text , draw_x, draw_y_text);
draw_x += str_len * font_sprite.tile_w;
gDPSetPrimColor(db->p++, 0, 0, 0xF4, 0xEC, 0x30, alpha);
// Draw triforce
int sprite = (frames / FRAMES_PER_CYCLE) % TRIFORCE_SPRITE_FRAMES;
sprite_load(db, &triforce_sprite, sprite, 1);
sprite_draw(db, &triforce_sprite, 0, draw_x, draw_y_triforce, triforce_sprite.tile_w, triforce_sprite.tile_h);
text_flush(db);
gDPFullSync(db->p++);
gSPEndDisplayList(db->p++);
}
|
100662.c | /* -*- c-file-style: "ruby"; indent-tabs-mode: nil -*- */
/**********************************************************************
rbglib_i18n.c -
$Author: ktou $
$Date: 2006/02/08 14:54:51 $
Copyright (C) 2006 Kouhei Sutou
**********************************************************************/
#include "rbgprivate.h"
#include "rbglib.h"
#undef _
#include <glib/gi18n.h>
#if GLIB_CHECK_VERSION(2,6,0)
static VALUE
rbglib_m_language_names(self)
VALUE self;
{
const gchar * const *languages;
const gchar *language;
VALUE rb_languages = rb_ary_new();
languages = g_get_language_names();
for (language = *languages; *languages; language = *(++languages)) {
rb_ary_push(rb_languages, CSTR2RVAL(language));
}
return rb_languages;
}
#endif
void
Init_glib_i18n()
{
/* glib/gi18n.h */
#if GLIB_CHECK_VERSION(2,6,0)
rb_define_module_function(mGLib, "language_names",
rbglib_m_language_names, 0);
#endif
}
|
560780.c | #include <stdio.h>
#include <stdlib.h>
int main(){
int c;
FILE *in, *out;
in = fopen("file.in","r");
out=fopen("file.out","w");
while((c=fgetc(in))!=EOF){
fputc(c,out);
}
exit(0);
}
|
206941.c | /* -*- c-basic-offset: 2 -*- */
/*
Copyright(C) 2014-2015 Brazil
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License version 2.1 as published by the Free Software Foundation.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../grn_ctx_impl.h"
#ifdef GRN_WITH_MRUBY
#include <mruby.h>
#include <mruby/class.h>
#include <mruby/data.h>
#include <mruby/string.h>
#include "mrb_ctx.h"
#include "mrb_hash_table.h"
#include "mrb_options.h"
static struct mrb_data_type mrb_grn_hash_table_type = {
"Groonga::HashTable",
NULL
};
static mrb_value
mrb_grn_hash_table_class_create(mrb_state *mrb, mrb_value klass)
{
grn_ctx *ctx = (grn_ctx *)mrb->ud;
mrb_value mrb_options = mrb_nil_value();
const char *name = NULL;
unsigned int name_size = 0;
const char *path = NULL;
grn_obj_flags flags = GRN_OBJ_TABLE_HASH_KEY;
grn_obj *key_type = NULL;
grn_obj *value_type = NULL;
grn_obj *table;
mrb_get_args(mrb, "|H", &mrb_options);
if (!mrb_nil_p(mrb_options)) {
mrb_value mrb_name;
mrb_value mrb_flags;
mrb_value mrb_key_type;
mrb_value mrb_value_type;
mrb_name = grn_mrb_options_get_lit(mrb, mrb_options, "name");
if (!mrb_nil_p(mrb_name)) {
name = RSTRING_PTR(mrb_name);
name_size = RSTRING_LEN(mrb_name);
}
mrb_flags = grn_mrb_options_get_lit(mrb, mrb_options, "flags");
if (!mrb_nil_p(mrb_flags)) {
flags |= mrb_fixnum(mrb_flags);
}
mrb_key_type = grn_mrb_options_get_lit(mrb, mrb_options, "key_type");
if (!mrb_nil_p(mrb_key_type)) {
key_type = DATA_PTR(mrb_key_type);
}
mrb_value_type = grn_mrb_options_get_lit(mrb, mrb_options, "value_type");
if (!mrb_nil_p(mrb_value_type)) {
key_type = DATA_PTR(mrb_value_type);
}
}
table = grn_table_create(ctx, name, name_size, path, flags,
key_type, value_type);
grn_mrb_ctx_check(mrb);
return mrb_funcall(mrb, klass, "new", 1, mrb_cptr_value(mrb, table));
}
static mrb_value
mrb_grn_hash_table_initialize(mrb_state *mrb, mrb_value self)
{
mrb_value mrb_hash_table_ptr;
mrb_get_args(mrb, "o", &mrb_hash_table_ptr);
DATA_TYPE(self) = &mrb_grn_hash_table_type;
DATA_PTR(self) = mrb_cptr(mrb_hash_table_ptr);
return self;
}
void
grn_mrb_hash_table_init(grn_ctx *ctx)
{
grn_mrb_data *data = &(ctx->impl->mrb);
mrb_state *mrb = data->state;
struct RClass *module = data->module;
struct RClass *table_class;
struct RClass *klass;
table_class = mrb_class_get_under(mrb, module, "Table");
klass = mrb_define_class_under(mrb, module, "HashTable", table_class);
MRB_SET_INSTANCE_TT(klass, MRB_TT_DATA);
mrb_define_class_method(mrb, klass, "create",
mrb_grn_hash_table_class_create,
MRB_ARGS_OPT(1));
mrb_define_method(mrb, klass, "initialize",
mrb_grn_hash_table_initialize, MRB_ARGS_REQ(1));
}
#endif
|
833270.c | /* zgemv.f -- translated by f2c (version 19991025). You must link the resulting object file with the libraries: -lf2c -lm (in that order) */
#include "FLA_f2c.h"
/* Subroutine */
int zgemv_(char *trans, integer *m, integer *n, doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex * x, integer *incx, doublecomplex *beta, doublecomplex *y, integer * incy)
{
/* System generated locals */
integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
doublecomplex z__1, z__2, z__3;
/* Builtin functions */
void d_cnjg(doublecomplex *, doublecomplex *);
/* Local variables */
integer info;
doublecomplex temp;
integer lenx, leny, i__, j;
extern logical lsame_(char *, char *);
integer ix, iy, jx, jy, kx, ky;
extern /* Subroutine */
int xerbla_(char *, integer *);
logical noconj;
/* .. Scalar Arguments .. */
/* .. Array Arguments .. */
/* .. */
/* Purpose */
/* ======= */
/* ZGEMV performs one of the matrix-vector operations */
/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or */
/* y := alpha*conjg( A' )*x + beta*y, */
/* where alpha and beta are scalars, x and y are vectors and A is an */
/* m by n matrix. */
/* Parameters */
/* ========== */
/* TRANS - CHARACTER*1. */
/* On entry, TRANS specifies the operation to be performed as */
/* follows: */
/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
/* TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y. */
/* Unchanged on exit. */
/* M - INTEGER. */
/* On entry, M specifies the number of rows of the matrix A. */
/* M must be at least zero. */
/* Unchanged on exit. */
/* N - INTEGER. */
/* On entry, N specifies the number of columns of the matrix A. */
/* N must be at least zero. */
/* Unchanged on exit. */
/* ALPHA - COMPLEX*16 . */
/* On entry, ALPHA specifies the scalar alpha. */
/* Unchanged on exit. */
/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
/* Before entry, the leading m by n part of the array A must */
/* contain the matrix of coefficients. */
/* Unchanged on exit. */
/* LDA - INTEGER. */
/* On entry, LDA specifies the first dimension of A as declared */
/* in the calling (sub) program. LDA must be at least */
/* max( 1, m ). */
/* Unchanged on exit. */
/* X - COMPLEX*16 array of DIMENSION at least */
/* ( 1 + ( n - 1 )*f2c_abs( INCX ) ) when TRANS = 'N' or 'n' */
/* and at least */
/* ( 1 + ( m - 1 )*f2c_abs( INCX ) ) otherwise. */
/* Before entry, the incremented array X must contain the */
/* vector x. */
/* Unchanged on exit. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
/* X. INCX must not be zero. */
/* Unchanged on exit. */
/* BETA - COMPLEX*16 . */
/* On entry, BETA specifies the scalar beta. When BETA is */
/* supplied as zero then Y need not be set on input. */
/* Unchanged on exit. */
/* Y - COMPLEX*16 array of DIMENSION at least */
/* ( 1 + ( m - 1 )*f2c_abs( INCY ) ) when TRANS = 'N' or 'n' */
/* and at least */
/* ( 1 + ( n - 1 )*f2c_abs( INCY ) ) otherwise. */
/* Before entry with BETA non-zero, the incremented array Y */
/* must contain the vector y. On exit, Y is overwritten by the */
/* updated vector y. */
/* INCY - INTEGER. */
/* On entry, INCY specifies the increment for the elements of */
/* Y. INCY must not be zero. */
/* Unchanged on exit. */
/* Level 2 Blas routine. */
/* -- Written on 22-October-1986. */
/* Jack Dongarra, Argonne National Lab. */
/* Jeremy Du Croz, Nag Central Office. */
/* Sven Hammarling, Nag Central Office. */
/* Richard Hanson, Sandia National Labs. */
/* .. Parameters .. */
/* .. Local Scalars .. */
/* .. External Functions .. */
/* .. External Subroutines .. */
/* .. Intrinsic Functions .. */
/* .. */
/* .. Executable Statements .. */
/* Test the input parameters. */
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1 * 1;
a -= a_offset;
--x;
--y;
/* Function Body */
info = 0;
if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C") )
{
info = 1;
}
else if (*m < 0)
{
info = 2;
}
else if (*n < 0)
{
info = 3;
}
else if (*lda < max(1,*m))
{
info = 6;
}
else if (*incx == 0)
{
info = 8;
}
else if (*incy == 0)
{
info = 11;
}
if (info != 0)
{
xerbla_("ZGEMV ", &info);
return 0;
}
/* Quick return if possible. */
if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r == 1. && beta->i == 0.))
{
return 0;
}
noconj = lsame_(trans, "T");
/* Set LENX and LENY, the lengths of the vectors x and y, and set */
/* up the start points in X and Y. */
if (lsame_(trans, "N"))
{
lenx = *n;
leny = *m;
}
else
{
lenx = *m;
leny = *n;
}
if (*incx > 0)
{
kx = 1;
}
else
{
kx = 1 - (lenx - 1) * *incx;
}
if (*incy > 0)
{
ky = 1;
}
else
{
ky = 1 - (leny - 1) * *incy;
}
/* Start the operations. In this version the elements of A are */
/* accessed sequentially with one pass through A. */
/* First form y := beta*y. */
if (beta->r != 1. || beta->i != 0.)
{
if (*incy == 1)
{
if (beta->r == 0. && beta->i == 0.)
{
i__1 = leny;
for (i__ = 1;
i__ <= i__1;
++i__)
{
i__2 = i__;
y[i__2].r = 0., y[i__2].i = 0.;
/* L10: */
}
}
else
{
i__1 = leny;
for (i__ = 1;
i__ <= i__1;
++i__)
{
i__2 = i__;
i__3 = i__;
z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] .r;
y[i__2].r = z__1.r, y[i__2].i = z__1.i;
/* L20: */
}
}
}
else
{
iy = ky;
if (beta->r == 0. && beta->i == 0.)
{
i__1 = leny;
for (i__ = 1;
i__ <= i__1;
++i__)
{
i__2 = iy;
y[i__2].r = 0., y[i__2].i = 0.;
iy += *incy;
/* L30: */
}
}
else
{
i__1 = leny;
for (i__ = 1;
i__ <= i__1;
++i__)
{
i__2 = iy;
i__3 = iy;
z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] .r;
y[i__2].r = z__1.r, y[i__2].i = z__1.i;
iy += *incy;
/* L40: */
}
}
}
}
if (alpha->r == 0. && alpha->i == 0.)
{
return 0;
}
if (lsame_(trans, "N"))
{
/* Form y := alpha*A*x + y. */
jx = kx;
if (*incy == 1)
{
i__1 = *n;
for (j = 1;
j <= i__1;
++j)
{
i__2 = jx;
if (x[i__2].r != 0. || x[i__2].i != 0.)
{
i__2 = jx;
z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2] .r;
temp.r = z__1.r, temp.i = z__1.i;
i__2 = *m;
for (i__ = 1;
i__ <= i__2;
++i__)
{
i__3 = i__;
i__4 = i__;
i__5 = i__ + j * a_dim1;
z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, z__2.i = temp.r * a[i__5].i + temp.i * a[i__5] .r;
z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
y[i__3].r = z__1.r, y[i__3].i = z__1.i;
/* L50: */
}
}
jx += *incx;
/* L60: */
}
}
else
{
i__1 = *n;
for (j = 1;
j <= i__1;
++j)
{
i__2 = jx;
if (x[i__2].r != 0. || x[i__2].i != 0.)
{
i__2 = jx;
z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2] .r;
temp.r = z__1.r, temp.i = z__1.i;
iy = ky;
i__2 = *m;
for (i__ = 1;
i__ <= i__2;
++i__)
{
i__3 = iy;
i__4 = iy;
i__5 = i__ + j * a_dim1;
z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, z__2.i = temp.r * a[i__5].i + temp.i * a[i__5] .r;
z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
y[i__3].r = z__1.r, y[i__3].i = z__1.i;
iy += *incy;
/* L70: */
}
}
jx += *incx;
/* L80: */
}
}
}
else
{
/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
jy = ky;
if (*incx == 1)
{
i__1 = *n;
for (j = 1;
j <= i__1;
++j)
{
temp.r = 0., temp.i = 0.;
if (noconj)
{
i__2 = *m;
for (i__ = 1;
i__ <= i__2;
++i__)
{
i__3 = i__ + j * a_dim1;
i__4 = i__;
z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4] .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3] .i * x[i__4].r;
z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
temp.r = z__1.r, temp.i = z__1.i;
/* L90: */
}
}
else
{
i__2 = *m;
for (i__ = 1;
i__ <= i__2;
++i__)
{
d_cnjg(&z__3, &a[i__ + j * a_dim1]);
i__3 = i__;
z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3] .r;
z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
temp.r = z__1.r, temp.i = z__1.i;
/* L100: */
}
}
i__2 = jy;
i__3 = jy;
z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i = alpha->r * temp.i + alpha->i * temp.r;
z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
y[i__2].r = z__1.r, y[i__2].i = z__1.i;
jy += *incy;
/* L110: */
}
}
else
{
i__1 = *n;
for (j = 1;
j <= i__1;
++j)
{
temp.r = 0., temp.i = 0.;
ix = kx;
if (noconj)
{
i__2 = *m;
for (i__ = 1;
i__ <= i__2;
++i__)
{
i__3 = i__ + j * a_dim1;
i__4 = ix;
z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4] .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3] .i * x[i__4].r;
z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
temp.r = z__1.r, temp.i = z__1.i;
ix += *incx;
/* L120: */
}
}
else
{
i__2 = *m;
for (i__ = 1;
i__ <= i__2;
++i__)
{
d_cnjg(&z__3, &a[i__ + j * a_dim1]);
i__3 = ix;
z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3] .r;
z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
temp.r = z__1.r, temp.i = z__1.i;
ix += *incx;
/* L130: */
}
}
i__2 = jy;
i__3 = jy;
z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i = alpha->r * temp.i + alpha->i * temp.r;
z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
y[i__2].r = z__1.r, y[i__2].i = z__1.i;
jy += *incy;
/* L140: */
}
}
}
return 0;
/* End of ZGEMV . */
}
/* zgemv_ */
|
707294.c | #include <third_party/base64.h>
#include "unit.h"
#include "trivia/util.h"
#include <string.h>
static void
base64_test(const char *str, int options, const char *no_symbols,
int no_symbols_len)
{
plan(3 + no_symbols_len);
int len = strlen(str);
int base64_buflen = base64_bufsize(len + 1, options);
char *base64_buf = malloc(base64_buflen);
char *strbuf = malloc(len + 1);
int rc = base64_encode(str, len + 1, base64_buf, base64_buflen,
options);
ok(rc <= base64_buflen, "length");
for (int i = 0; i < no_symbols_len; ++i) {
char c = no_symbols[i];
if (c == '\n') {
is(memchr(base64_buf, no_symbols[i], base64_buflen),
NULL, "no \\n symbols");
} else {
is(memchr(base64_buf, no_symbols[i], base64_buflen),
NULL, "no %c symbols", no_symbols[i]);
}
}
is(base64_decode(base64_buf, rc, strbuf, len + 1), len + 1,
"decode length ok");
is(strcmp(str, strbuf), 0, "encode/decode");
free(base64_buf);
free(strbuf);
check_plan();
}
static void
base64_urlsafe_test(const char *str)
{
const char symbols[] = { '\n', '+', '=' };
base64_test(str, BASE64_URLSAFE, symbols, lengthof(symbols));
}
static void
base64_nopad_test(const char *str)
{
const char symbols[] = { '=' };
base64_test(str, BASE64_NOPAD, symbols, lengthof(symbols));
}
static void
base64_nowrap_test(const char *str)
{
const char symbols[] = { '\n' };
base64_test(str, BASE64_NOWRAP, symbols, lengthof(symbols));
}
static void
base64_invalid_chars_test(void)
{
plan(1);
/* Upper bit must be cleared */
const char invalid_data[] = { '\x7b', '\x7c', '\x7d', '\x7e' };
char outbuf[8];
/* Invalid chars should be ignored, not decoded into garbage */
is(base64_decode(invalid_data, sizeof(invalid_data),
outbuf, sizeof(outbuf)),
0, "ignoring invalid chars");
check_plan();
}
static void
base64_no_space_test(void)
{
plan(1);
const char *const in = "sIIpHw==";
const int in_len = strlen(in);
const int rc = base64_decode(in, in_len, NULL, 0);
is(rc, 0, "no space in out buffer");
check_plan();
}
int main(int argc, char *argv[])
{
plan(30);
header();
const char *option_tests[] = {
"", "a", "123", "1234567", "12345678",
"\001\002\003\004\005\006\253\254\255",
"Test +/+/+/ test test test test test test test test test "\
"test test test test test test test test test test test test "\
"test test test test test test test test test test test test "\
"test test test test test test test test test test\n\n"
};
for (size_t i = 0; i < lengthof(option_tests); ++i) {
base64_test(option_tests[i], 0, NULL, 0);
base64_urlsafe_test(option_tests[i]);
base64_nopad_test(option_tests[i]);
base64_nowrap_test(option_tests[i]);
}
base64_invalid_chars_test();
base64_no_space_test();
footer();
return check_plan();
}
|
814536.c | /*
* Intel Medfield MSIC GPIO driver>
* Copyright (c) 2011, Intel Corporation.
*
* Author: Mathias Nyman <[email protected]>
* Based on intel_pmic_gpio.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_msic.h>
/* the offset for the mapping of global gpio pin to irq */
#define MSIC_GPIO_IRQ_OFFSET 0x100
#define MSIC_GPIO_DIR_IN 0
#define MSIC_GPIO_DIR_OUT BIT(5)
#define MSIC_GPIO_TRIG_FALL BIT(1)
#define MSIC_GPIO_TRIG_RISE BIT(2)
/* masks for msic gpio output GPIOxxxxCTLO registers */
#define MSIC_GPIO_DIR_MASK BIT(5)
#define MSIC_GPIO_DRV_MASK BIT(4)
#define MSIC_GPIO_REN_MASK BIT(3)
#define MSIC_GPIO_RVAL_MASK (BIT(2) | BIT(1))
#define MSIC_GPIO_DOUT_MASK BIT(0)
/* masks for msic gpio input GPIOxxxxCTLI registers */
#define MSIC_GPIO_GLBYP_MASK BIT(5)
#define MSIC_GPIO_DBNC_MASK (BIT(4) | BIT(3))
#define MSIC_GPIO_INTCNT_MASK (BIT(2) | BIT(1))
#define MSIC_GPIO_DIN_MASK BIT(0)
#define MSIC_NUM_GPIO 24
struct msic_gpio {
struct platform_device *pdev;
struct mutex buslock;
struct gpio_chip chip;
int irq;
unsigned irq_base;
unsigned long trig_change_mask;
unsigned trig_type;
};
/*
* MSIC has 24 gpios, 16 low voltage (1.2-1.8v) and 8 high voltage (3v).
* Both the high and low voltage gpios are divided in two banks.
* GPIOs are numbered with GPIO0LV0 as gpio_base in the following order:
* GPIO0LV0..GPIO0LV7: low voltage, bank 0, gpio_base
* GPIO1LV0..GPIO1LV7: low voltage, bank 1, gpio_base + 8
* GPIO0HV0..GPIO0HV3: high voltage, bank 0, gpio_base + 16
* GPIO1HV0..GPIO1HV3: high voltage, bank 1, gpio_base + 20
*/
static int msic_gpio_to_ireg(unsigned offset)
{
if (offset >= MSIC_NUM_GPIO)
return -EINVAL;
if (offset < 8)
return INTEL_MSIC_GPIO0LV0CTLI - offset;
if (offset < 16)
return INTEL_MSIC_GPIO1LV0CTLI - offset + 8;
if (offset < 20)
return INTEL_MSIC_GPIO0HV0CTLI - offset + 16;
return INTEL_MSIC_GPIO1HV0CTLI - offset + 20;
}
static int msic_gpio_to_oreg(unsigned offset)
{
if (offset >= MSIC_NUM_GPIO)
return -EINVAL;
if (offset < 8)
return INTEL_MSIC_GPIO0LV0CTLO - offset;
if (offset < 16)
return INTEL_MSIC_GPIO1LV0CTLO - offset + 8;
if (offset < 20)
return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
}
static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
int reg;
reg = msic_gpio_to_oreg(offset);
if (reg < 0)
return reg;
return intel_msic_reg_update(reg, MSIC_GPIO_DIR_IN, MSIC_GPIO_DIR_MASK);
}
static int msic_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
int reg;
unsigned mask;
value = (!!value) | MSIC_GPIO_DIR_OUT;
mask = MSIC_GPIO_DIR_MASK | MSIC_GPIO_DOUT_MASK;
reg = msic_gpio_to_oreg(offset);
if (reg < 0)
return reg;
return intel_msic_reg_update(reg, value, mask);
}
static int msic_gpio_get(struct gpio_chip *chip, unsigned offset)
{
u8 r;
int ret;
int reg;
reg = msic_gpio_to_ireg(offset);
if (reg < 0)
return reg;
ret = intel_msic_reg_read(reg, &r);
if (ret < 0)
return ret;
return r & MSIC_GPIO_DIN_MASK;
}
static void msic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
int reg;
reg = msic_gpio_to_oreg(offset);
if (reg < 0)
return;
intel_msic_reg_update(reg, !!value , MSIC_GPIO_DOUT_MASK);
}
/*
* This is called from genirq with mg->buslock locked and
* irq_desc->lock held. We can not access the scu bus here, so we
* store the change and update in the bus_sync_unlock() function below
*/
static int msic_irq_type(struct irq_data *data, unsigned type)
{
struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
u32 gpio = data->irq - mg->irq_base;
if (gpio >= mg->chip.ngpio)
return -EINVAL;
/* mark for which gpio the trigger changed, protected by buslock */
mg->trig_change_mask |= (1 << gpio);
mg->trig_type = type;
return 0;
}
static int msic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct msic_gpio *mg = container_of(chip, struct msic_gpio, chip);
return mg->irq_base + offset;
}
static void msic_bus_lock(struct irq_data *data)
{
struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
mutex_lock(&mg->buslock);
}
static void msic_bus_sync_unlock(struct irq_data *data)
{
struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
int offset;
int reg;
u8 trig = 0;
/* We can only get one change at a time as the buslock covers the
entire transaction. The irq_desc->lock is dropped before we are
called but that is fine */
if (mg->trig_change_mask) {
offset = __ffs(mg->trig_change_mask);
reg = msic_gpio_to_ireg(offset);
if (reg < 0)
goto out;
if (mg->trig_type & IRQ_TYPE_EDGE_RISING)
trig |= MSIC_GPIO_TRIG_RISE;
if (mg->trig_type & IRQ_TYPE_EDGE_FALLING)
trig |= MSIC_GPIO_TRIG_FALL;
intel_msic_reg_update(reg, trig, MSIC_GPIO_INTCNT_MASK);
mg->trig_change_mask = 0;
}
out:
mutex_unlock(&mg->buslock);
}
/* Firmware does all the masking and unmasking for us, no masking here. */
static void msic_irq_unmask(struct irq_data *data) { }
static void msic_irq_mask(struct irq_data *data) { }
static struct irq_chip msic_irqchip = {
.name = "MSIC-GPIO",
.irq_mask = msic_irq_mask,
.irq_unmask = msic_irq_unmask,
.irq_set_type = msic_irq_type,
.irq_bus_lock = msic_bus_lock,
.irq_bus_sync_unlock = msic_bus_sync_unlock,
};
static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct intel_msic *msic = pdev_to_intel_msic(mg->pdev);
int i;
int bitnr;
u8 pin;
unsigned long pending = 0;
for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) {
intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin);
pending = pin;
if (pending) {
for_each_set_bit(bitnr, &pending, BITS_PER_BYTE)
generic_handle_irq(mg->irq_base +
(i * BITS_PER_BYTE) + bitnr);
}
}
chip->irq_eoi(data);
}
static int platform_msic_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct intel_msic_gpio_pdata *pdata = dev_get_platdata(dev);
struct msic_gpio *mg;
int irq = platform_get_irq(pdev, 0);
int retval;
int i;
if (irq < 0) {
dev_err(dev, "no IRQ line\n");
return -EINVAL;
}
if (!pdata || !pdata->gpio_base) {
dev_err(dev, "incorrect or missing platform data\n");
return -EINVAL;
}
mg = kzalloc(sizeof(*mg), GFP_KERNEL);
if (!mg)
return -ENOMEM;
dev_set_drvdata(dev, mg);
mg->pdev = pdev;
mg->irq = irq;
mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET;
mg->chip.label = "msic_gpio";
mg->chip.direction_input = msic_gpio_direction_input;
mg->chip.direction_output = msic_gpio_direction_output;
mg->chip.get = msic_gpio_get;
mg->chip.set = msic_gpio_set;
mg->chip.to_irq = msic_gpio_to_irq;
mg->chip.base = pdata->gpio_base;
mg->chip.ngpio = MSIC_NUM_GPIO;
mg->chip.can_sleep = true;
mg->chip.dev = dev;
mutex_init(&mg->buslock);
retval = gpiochip_add(&mg->chip);
if (retval) {
dev_err(dev, "Adding MSIC gpio chip failed\n");
goto err;
}
for (i = 0; i < mg->chip.ngpio; i++) {
irq_set_chip_data(i + mg->irq_base, mg);
irq_set_chip_and_handler(i + mg->irq_base,
&msic_irqchip,
handle_simple_irq);
}
irq_set_chained_handler(mg->irq, msic_gpio_irq_handler);
irq_set_handler_data(mg->irq, mg);
return 0;
err:
kfree(mg);
return retval;
}
static struct platform_driver platform_msic_gpio_driver = {
.driver = {
.name = "msic_gpio",
},
.probe = platform_msic_gpio_probe,
};
static int __init platform_msic_gpio_init(void)
{
return platform_driver_register(&platform_msic_gpio_driver);
}
subsys_initcall(platform_msic_gpio_init);
MODULE_AUTHOR("Mathias Nyman <[email protected]>");
MODULE_DESCRIPTION("Intel Medfield MSIC GPIO driver");
MODULE_LICENSE("GPL v2");
|
387630.c | /*
* Copyright (c) 2020 Endian Technologies AB
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <lvgl.h>
#include "gfx.h"
#include "log.h"
#include "version.h"
#include <string.h>
/* ********** Macros and constants ********** */
LV_FONT_DECLARE(rubik_regular_68);
LV_FONT_DECLARE(rubik_regular_34);
#define BAT_LABEL_MARGIN 3
/* ********** ******* ********** */
/* ********** Variables ********** */
static lv_obj_t *battery_label;
static lv_obj_t *bt_label;
//lv_obj_t *time_label2;
lv_obj_t *time_label;
static lv_obj_t *date_label;
static lv_obj_t *info_label;
//lv_obj_t *hello_world_label; //jj
//static lv_style_t style;
//static lv_style_t style_time;
//static lv_style_t style_date;
/* ********** Functions ********** */
void gfx_init(void)
{
/* Create styles for time, date and the rest */
// lv_style_copy(&style, &lv_style_plain);
// lv_style_copy(&style_time, &lv_style_plain);
// lv_style_copy(&style_date, &lv_style_plain);
/* Default style */
// style.body.main_color = LV_COLOR_BLACK;
// style.body.grad_color = LV_COLOR_BLACK;
// style.text.color = LV_COLOR_WHITE;
// style.text.font = &lv_font_roboto_22;
// lv_obj_set_style(lv_scr_act(), &style);
// jj test
time_label = lv_label_create(lv_scr_act(), NULL);
// lv_label_set_style(time_label, LV_LABEL_STYLE_MAIN, &style_time);
lv_label_set_text(time_label, "00:00");
lv_obj_align(time_label, NULL, LV_ALIGN_CENTER, 0, 0); //jj
// hello_world_label = lv_label_create(lv_scr_act(), NULL);
//lv_label_set_text(hello_world_label, "tijd is nu 12:23");
// lv_obj_align(hello_world_label, NULL, LV_ALIGN_CENTER, 0, 8);
/* Battery label */
// battery_label = lv_label_create(lv_scr_act(), NULL);
// lv_label_set_style(battery_label, LV_LABEL_STYLE_MAIN, &style);
// lv_label_set_text(battery_label, "");
/* Bluetooth label */
bt_label = lv_label_create(lv_scr_act(), NULL);
lv_obj_align(bt_label, NULL, LV_ALIGN_CENTER, 10, -8);
// lv_label_set_style(bt_label, LV_LABEL_STYLE_MAIN, &style);
lv_label_set_text(bt_label, LV_SYMBOL_WIFI);
/* Time label and style */
// style_time.body.main_color = LV_COLOR_BLACK;
// style_time.body.grad_color = LV_COLOR_BLACK;
// style_time.text.font = &rubik_regular_68;
// style_time.text.color = LV_COLOR_WHITE;
// style_time.text.color = LV_COLOR_WHITE;
// lv_obj_set_style(lv_scr_act(), &style_time);
// time_label = lv_label_create(lv_scr_act(), NULL);
// lv_label_set_style(time_label, LV_LABEL_STYLE_MAIN, &style_time);
// lv_label_set_text(time_label, "00:00");
// lv_obj_align(time_label, NULL, LV_ALIGN_CENTER, 0, 0); //jj
/*
info_label = lv_label_create(lv_scr_act(), NULL);
// lv_label_set_style(info_label, LV_LABEL_STYLE_MAIN, &style);
if (strlen(FW_VERSION) < 10) {
lv_label_set_text(info_label, "Hypnos " FW_VERSION "\n\n"
"This is Free Software" "\n"
"without any warranty." "\n\n"
"https://github.com/" "\n"
"endian-albin/" "\n"
"pinetime-hypnos");
} else {
lv_label_set_text(info_label, "Hypnos" "\n"
FW_VERSION "\n\n"
"This is Free Software" "\n"
"without any warranty." "\n"
"https://github.com/" "\n"
"endian-albin/" "\n"
"pinetime-hypnos");
}
lv_obj_set_hidden(info_label, true);
*/
/* Date label and style */
// style_date.body.main_color = LV_COLOR_BLACK;
// style_date.body.grad_color = LV_COLOR_BLACK;
// style_date.text.font = &rubik_regular_34;
// style_date.text.color = LV_COLOR_YELLOW;
// lv_obj_set_style(lv_scr_act(), &style_date);
date_label = lv_label_create(lv_scr_act(), NULL);
// lv_label_set_style(date_label, LV_LABEL_STYLE_MAIN, &style_date);
lv_label_set_text(date_label, "Mon 10 Jan");
lv_obj_align(date_label, NULL, LV_ALIGN_CENTER, 0, 2);
LOG_INF("Graphics init: Done");
}
void gfx_update(void)
{
lv_task_handler();
}
void gfx_time_set_label(char *str)
{
lv_label_set_text(time_label, str);
lv_obj_align(time_label, NULL, LV_ALIGN_CENTER, 0, 0);
}
void gfx_date_set_label(char *str)
{
lv_label_set_text(date_label, str);
lv_obj_align(date_label, NULL, LV_ALIGN_CENTER, 0, 8);
}
void gfx_bt_set_label(enum bt_symbol s)
{
switch (s) {
case BT_ADVERTISING_ON:
lv_label_set_text(bt_label, LV_SYMBOL_WIFI);
break;
case BT_CONNECTED:
lv_label_set_text(bt_label, LV_SYMBOL_BLUETOOTH);
break;
default:
lv_label_set_text(bt_label, LV_SYMBOL_BLUETOOTH); //jj
//lv_label_set_text(bt_label, "");
}
}
void gfx_battery_set_label(enum battery_symbol s)
{
switch (s) {
case BAT_CHARGE:
lv_label_set_text(battery_label, LV_SYMBOL_CHARGE);
lv_obj_align(battery_label, NULL, LV_ALIGN_IN_TOP_RIGHT, -BAT_LABEL_MARGIN, BAT_LABEL_MARGIN);
break;
case BAT_FULL:
lv_label_set_text(battery_label, LV_SYMBOL_BATTERY_FULL);
break;
case BAT_3:
lv_label_set_text(battery_label, LV_SYMBOL_BATTERY_3);
break;
case BAT_2:
lv_label_set_text(battery_label, LV_SYMBOL_BATTERY_2);
break;
case BAT_1:
lv_label_set_text(battery_label, LV_SYMBOL_BATTERY_1);
break;
default:
lv_label_set_text(battery_label, LV_SYMBOL_BATTERY_EMPTY);
}
lv_obj_align(battery_label, NULL, LV_ALIGN_IN_TOP_RIGHT, -BAT_LABEL_MARGIN, 0);
}
void gfx_show_info(void)
{
lv_obj_set_hidden(time_label, true);
lv_obj_set_hidden(date_label, true);
lv_obj_set_hidden(bt_label, true);
lv_obj_set_hidden(info_label, false);
}
void gfx_show_watch(void)
{
lv_obj_set_hidden(time_label, false);
lv_obj_set_hidden(date_label, false);
lv_obj_set_hidden(bt_label, false);
lv_obj_set_hidden(info_label, true);
}
|
427979.c | /**
* bs.c
* A simple binary search library.
*
* Authors: <YOUR NAMES HERE>
*/
// +---------+-------------------------------------------------------
// | Headers |
// +---------+
#include "bs.h"
// +---------------+-------------------------------------------------
// | Local Helpers |
// +---------------+
int
binary_search_ints_helper (int v, int a[], int lb, int ub)
{
return -1; // STUB
} // binary_search_ints_helper
// +--------------------+--------------------------------------------
// | Exported Functions |
// +--------------------+
int
binary_search_ints (int v, int a[], int n)
{
return -1; // STUB
} // binary_search_ints
|
290481.c | /* Copyright (C) 2019 Sara Berman.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject
to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
*/
#include "hextoval.h"
unsigned char hextoval(char in)
{
if((in>='0')&&(in<='9')) return in-'0';
else if((in>='A')&&(in<='F')) return in+10-'A';
else if((in>='a')&&(in<='f')) return in+10-'a';
else return 0;
}
|
719050.c | /*
* Copyright (C) 2008-2014 Netronome Systems, Inc. All rights reserved.
*/
#ifndef __NFP_CLS_REFLECT_C__
#define __NFP_CLS_REFLECT_C__
#ifndef NFP_LIB_ANY_NFCC_VERSION
#if (!defined(__NFP_TOOL_NFCC) || \
(__NFP_TOOL_NFCC < NFP_SW_VERSION(5, 0, 0, 0)) || \
(__NFP_TOOL_NFCC > NFP_SW_VERSION(6, 0, 1, 255)))
#error "This standard library is not supported for the version of the SDK currently in use."
#endif
#endif
/*
* NOTE: When writing inline-asm, it's important not to use reserved words
* for variables. Common ones are 'a','b', 'csr', 'state', and
* 'inp_state'.
*/
#include <nfp.h>
#include <nfp_intrinsic.h>
#include <nfp_cls.h>
#pragma diag_suppress 279 /* ignore while(0) etc in macros */
// 4 bits allowed for ME
#define _CLS_REFLECT_ME_RANGE_CHECK() \
if (__is_ct_const(remote_ME)) \
{ \
CT_ASSERT((remote_ME & (~0xf))==0); \
}
// 6 bits allowed for island number
#define _CLS_REFLECT_ISLAND_RANGE_CHECK() \
if(__is_ct_const(remote_island)) \
{ \
CT_ASSERT((remote_island&(~0x3f))==0); \
}
/*
* Implement the CLS 32-bit pointer reflect command with sig_done or ctx swap
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
#define _CLS_IMPLEMENT_REFLECT_COMMAND(command, is_read) \
if (is_read == 1) { \
CT_ASSERT(__is_read_reg(data)); \
} \
else \
{ \
CT_ASSERT(__is_write_reg(data)); \
} \
CT_ASSERT(__is_ct_const(sync)); \
CT_ASSERT(sync == sig_done || sync == ctx_swap); \
_CLS_REFLECT_ME_RANGE_CHECK() \
\
{ \
unsigned int address = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff); \
if (__is_ct_const(count) && count<=8) \
{ \
CT_ASSERT(count!=0); \
if (sync == sig_done) \
{ \
__asm cls[command, *data, address, 0, __ct_const_val(count)], sig_done[*sig_ptr] \
} \
else \
{ \
__asm cls[command, *data, address, 0, __ct_const_val(count)], ctx_swap[*sig_ptr] \
} \
} \
else \
{ \
if (__is_ct_const(count)) \
{ \
CT_ASSERT(count<=16); \
} \
else \
{ \
CT_QPERFINFO_INDIRECT_REF(command); \
} \
{ \
generic_ind_t ind; \
_INTRINSIC_OVERRIDE_LENGTH(ind, (count - 1)); \
if (sync == sig_done) \
{ \
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)] \
__asm cls[command, *data, address, 0, __ct_const_val(count)], sig_done[*sig_ptr], indirect_ref \
} \
else \
{ \
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)] \
__asm cls[command, *data, address, 0, __ct_const_val(count)], ctx_swap[*sig_ptr], indirect_ref \
} \
} \
} \
}
/*
* Implement the CLS 40-bit pointer reflect command with sig_done or ctx swap
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
#define _CLS_IMPLEMENT_REFLECT_COMMAND_PTR40(command, is_read) \
if (is_read == 1) { \
CT_ASSERT(__is_read_reg(data)); \
} \
else \
{ \
CT_ASSERT(__is_write_reg(data)); \
} \
CT_ASSERT(__is_ct_const(sync)); \
CT_ASSERT(sync == sig_done || sync == ctx_swap); \
_CLS_REFLECT_ME_RANGE_CHECK() \
_CLS_REFLECT_ISLAND_RANGE_CHECK() \
{ \
unsigned int low_addr = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff); \
unsigned int hi_addr = (remote_island & 0x3f) << 26; \
if (__is_ct_const(count) && count<=8) \
{ \
CT_ASSERT(count!=0); \
if (sync == sig_done) \
{ \
__asm cls[command, *data, hi_addr, << 8, low_addr, __ct_const_val(count)], sig_done[*sig_ptr] \
} \
else \
{ \
__asm cls[command, *data, hi_addr, << 8, low_addr, __ct_const_val(count)], ctx_swap[*sig_ptr] \
} \
} \
else \
{ \
if (__is_ct_const(count)) \
{ \
CT_ASSERT(count<=16); \
} \
else \
{ \
CT_QPERFINFO_INDIRECT_REF(command); \
} \
{ \
generic_ind_t ind; \
_INTRINSIC_OVERRIDE_LENGTH(ind, (count - 1)); \
if (sync == sig_done) \
{ \
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)] \
__asm cls[command, *data, hi_addr, << 8, low_addr, __ct_const_val(count)], sig_done[*sig_ptr], indirect_ref \
} \
else \
{ \
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)] \
__asm cls[command, *data, hi_addr, << 8, low_addr, __ct_const_val(count)], ctx_swap[*sig_ptr], indirect_ref \
} \
} \
} \
}
/* Local Scratch reflect from signal src */
__intrinsic
void cls_reflect_write_sig_local(
__xwrite void *data, /* data to reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND(reflect_write_sig_local, 0);
}
_INTRINSIC_END;
}
__intrinsic
void cls_reflect_write_sig_local_ptr40(
__xwrite void *data, /* data to reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND_PTR40(reflect_write_sig_local, 0);
}
_INTRINSIC_END;
}
/* Local Scratch reflect from signal dest */
void cls_reflect_write_sig_remote(
__xwrite void *data, /* data to reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int count /* number of longwords to reflect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig<16);
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
bits[11:0] = xfer register address (reg number)
bits[15:12] = FPC/ME
bits[23:16] = 8b0
bits[27:24] = Signal number
bits[30:28] = ctx
bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
if (__is_ct_const(count) && count<=8)
{
CT_ASSERT(count!=0);
__asm cls[reflect_write_sig_remote, *data, address, 0, __ct_const_val(count)]
} else
{
if (__is_ct_const(count))
{
CT_ASSERT(count<=16);
}
else
{
CT_QPERFINFO_INDIRECT_REF(reflect_write_sig_remote);
}
{
generic_ind_t ind;
_INTRINSIC_OVERRIDE_LENGTH(ind, count - 1);
__asm
{
alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
cls[reflect_write_sig_remote, *data, address, 0, __ct_const_val(count)], indirect_ref
}
}
}
}
}
_INTRINSIC_END;
}
void cls_reflect_write_sig_remote_ptr40(
__xwrite void *data, /* data to reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int count /* number of longwords to reflect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig<16);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
bits[11:0] = xfer register address (reg number)
bits[15:12] = FPC/ME
bits[23:16] = 8b0
bits[27:24] = Signal number
bits[30:28] = ctx
bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int low_addr = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
if (__is_ct_const(count) && count<=8)
{
CT_ASSERT(count!=0);
__asm cls[reflect_write_sig_remote, *data, hi_addr, << 8, low_addr, __ct_const_val(count)]
} else
{
if (__is_ct_const(count))
{
CT_ASSERT(count<=16);
}
else
{
CT_QPERFINFO_INDIRECT_REF(reflect_write_sig_remote);
}
{
generic_ind_t ind;
_INTRINSIC_OVERRIDE_LENGTH(ind, count - 1);
__asm
{
alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
cls[reflect_write_sig_remote, *data, hi_addr, << 8, low_addr, __ct_const_val(count)], indirect_ref
}
}
}
}
}
_INTRINSIC_END;
}
/* Local Scratch reflect from signal both */
void cls_reflect_write_sig_both(
__xwrite void *data, /* data to reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND(reflect_write_sig_both, 0);
}
_INTRINSIC_END;
}
void cls_reflect_write_sig_both_ptr40(
__xwrite void *data, /* data to reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND_PTR40(reflect_write_sig_both, 0);
}
_INTRINSIC_END;
}
/* Local Scratch reflect to signal src */
void cls_reflect_read_sig_remote(
__xread void *data, /* data from reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int count /* number of longwords to reflect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig<16);
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
bits[11:0] = xfer register address (reg number)
bits[15:12] = FPC/ME
bits[23:16] = 8b0
bits[27:24] = Signal number
bits[30:28] = ctx
bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
if (__is_ct_const(count) && count<=8)
{
CT_ASSERT(count!=0);
__asm cls[reflect_read_sig_remote, *data, address, 0, __ct_const_val(count)]
} else
{
if (__is_ct_const(count))
{
CT_ASSERT(count<=16);
}
else
{
CT_QPERFINFO_INDIRECT_REF(reflect_read_sig_remote);
}
{
generic_ind_t ind;
_INTRINSIC_OVERRIDE_LENGTH(ind, count - 1);
__asm
{
alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
cls[reflect_read_sig_remote, *data, address, 0, __ct_const_val(count)], indirect_ref
}
}
}
}
}
_INTRINSIC_END;
}
void cls_reflect_read_sig_remote_ptr40(
__xread void *data, /* data from reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int count /* number of longwords to reflect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig<16);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
bits[11:0] = xfer register address (reg number)
bits[15:12] = FPC/ME
bits[23:16] = 8b0
bits[27:24] = Signal number
bits[30:28] = ctx
bit[31] = Signal reference (If set, signal number comes from bits [27:24]
bit[33:32] = 2b0
bit[39:34] = island number
*/
unsigned int low_addr = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
if (__is_ct_const(count) && count<=8)
{
CT_ASSERT(count!=0);
__asm cls[reflect_read_sig_remote, *data, hi_addr, << 8, low_addr, __ct_const_val(count)]
} else
{
if (__is_ct_const(count))
{
CT_ASSERT(count<=16);
}
else
{
CT_QPERFINFO_INDIRECT_REF(reflect_read_sig_remote);
}
{
generic_ind_t ind;
_INTRINSIC_OVERRIDE_LENGTH(ind, count - 1);
__asm
{
alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
cls[reflect_read_sig_remote, *data, hi_addr, << 8, low_addr, __ct_const_val(count)], indirect_ref
}
}
}
}
}
_INTRINSIC_END;
}
/* Local Scratch reflect to signal dest */
void cls_reflect_read_sig_local(
__xread void *data, /* data from reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND(reflect_read_sig_local, 1);
}
_INTRINSIC_END;
}
/* Local Scratch reflect to signal dest */
void cls_reflect_read_sig_local_ptr40(
__xread void *data, /* data from reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND_PTR40(reflect_read_sig_local, 1);
}
_INTRINSIC_END;
}
/* Local Scratch reflect to signal both */
void cls_reflect_read_sig_both(
__xread void *data, /* data from reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND(reflect_read_sig_both, 1);
}
_INTRINSIC_END;
}
void cls_reflect_read_sig_both_ptr40(
__xread void *data, /* data from reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int count, /* number of longwords to reflect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
_CLS_IMPLEMENT_REFLECT_COMMAND_PTR40(reflect_read_sig_both, 1);
}
_INTRINSIC_END;
}
#ifdef __PTR40
__intrinsic
void cls_reflect_write_sig_local_ind_ptr40(
__xwrite void *data, /* data to reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
unsigned int low_addr = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_write_sig_local, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_write_sig_local, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#endif
__intrinsic
void cls_reflect_write_sig_local_ind(
__xwrite void *data, /* data to reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_write_sig_local, *data, address, 0, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_write_sig_local, *data, address, 0, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#ifdef __PTR40
__intrinsic
void cls_reflect_write_sig_remote_ind_ptr40(
__xwrite void *data, /* data to reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind /* indirect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig < 16);
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
unsigned int low_addr = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
__asm cls[reflect_write_sig_remote, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], indirect_ref
}
}
_INTRINSIC_END;
}
#endif
__intrinsic
void cls_reflect_write_sig_remote_ind(
__xwrite void *data, /* data to reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind /* indirect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig < 16);
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
if(__is_ct_const(remote_ME))
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
__asm cls[reflect_write_sig_remote, *data, address, 0, __ct_const_val(max_nn)], indirect_ref
}
}
_INTRINSIC_END;
}
#ifdef __PTR40
__intrinsic
void cls_reflect_write_sig_both_ind_ptr40(
__xwrite void *data, /* data to reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
unsigned int low_addr = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_write_sig_both, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_write_sig_both, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#endif
__intrinsic
void cls_reflect_write_sig_both_ind(
__xwrite void *data, /* data to reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_write_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula for reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_write_sig_both, *data, address, 0, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_write_sig_both, *data, address, 0, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#ifdef __PTR40
__intrinsic
void cls_reflect_read_sig_remote_ind_ptr40(
__xread void *data, /* data from reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind /* indirect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig < 16);
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
unsigned int low_addr = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
__asm cls[reflect_read_sig_remote, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], indirect_ref
}
}
_INTRINSIC_END;
}
#endif
__intrinsic
void cls_reflect_read_sig_remote_ind(
__xread void *data, /* data from reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
int sig, /* triggered signal number*/
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind /* indirect */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(sig) && sig < 16);
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = (1 << 31) | (sig << 24) | ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
__asm cls[reflect_read_sig_remote, *data, address, 0, __ct_const_val(max_nn)], indirect_ref
}
}
_INTRINSIC_END;
}
#ifdef __PTR40
__intrinsic
void cls_reflect_read_sig_local_ind_ptr40(
__xread void *data, /* data from reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
unsigned int low_addr = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_read_sig_local, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_read_sig_local, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#endif
__intrinsic
void cls_reflect_read_sig_local_ind(
__xread void *data, /* data from reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
if(__is_ct_const(remote_ME))
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_read_sig_local, *data, address, 0, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_read_sig_local, *data, address, 0, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#ifdef __PTR40
__intrinsic
void cls_reflect_read_sig_both_ind_ptr40(
__xread void *data, /* data from reflect */
unsigned int remote_island, /* remote island number */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
_CLS_REFLECT_ISLAND_RANGE_CHECK();
{
/* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
* bit[33:32] = 2b0
* bit[39:34] = island number
*/
unsigned int low_addr = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
unsigned int hi_addr = (remote_island & 0x3f) << 26;
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_read_sig_both, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_read_sig_both, *data, hi_addr, <<8, low_addr, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#endif
__intrinsic
void cls_reflect_read_sig_both_ind(
__xread void *data, /* data from reflect */
unsigned int remote_ME, /* remote ME number */
unsigned int remote_xfer_reg_number, /* remote xfer register number */
unsigned int max_nn, /* max number of longwords to reflect */
generic_ind_t ind, /* indirect */
sync_t sync, /* type of synchronization to use */
volatile SIGNAL *sig_ptr /* signal to raise upon completion */
)
{
_INTRINSIC_BEGIN;
{
CT_ASSERT(__is_read_reg(data));
CT_ASSERT(__is_ct_const(max_nn) && max_nn <= 16);
CT_ASSERT(__is_ct_const(sync));
CT_ASSERT(sync == sig_done || sync == ctx_swap);
_CLS_REFLECT_ME_RANGE_CHECK();
{
/* Address encoding formula
* Address encoding formula for cls reflect
* bits[11:0] = xfer register address (reg number)
* bits[15:12] = FPC/ME
* bits[23:16] = 8b0
* bits[27:24] = Signal number
* bits[30:28] = ctx
* bit[31] = Signal reference (If set, signal number comes from bits [27:24]
*/
unsigned int address = ((remote_ME & 0xf) << 12) | (remote_xfer_reg_number & 0xfff);
#ifdef __NFP_INDIRECT_REF_FORMAT_NFP6000
__asm local_csr_wr[CMD_INDIRECT_REF_0, CSR_INDIRECT_TYPE(ind)]
#endif
__asm alu[--, --, B, ALU_INDIRECT_TYPE(ind)]
if (sync == sig_done)
{
__asm cls[reflect_read_sig_both, *data, address, 0, __ct_const_val(max_nn)], sig_done[*sig_ptr], indirect_ref
}
else
{
__asm cls[reflect_read_sig_both, *data, address, 0, __ct_const_val(max_nn)], ctx_swap[*sig_ptr], indirect_ref
}
}
}
_INTRINSIC_END;
}
#pragma diag_default 279 /* suppressed above */
#endif /* __NFP_CLS_REFLECT_C__ */
|
559202.c |
#include <windows.h>
#include <process.h>
#include <time.h>
#include <stdio.h>
#define BUF_SIZE 10
#define NUM_THREADS 7
DWORD WINAPI Producer(LPVOID param);
DWORD WINAPI Consumer(LPVOID param);
HANDLE mutex;
HANDLE not_full;
HANDLE not_empty;
int buffer[BUF_SIZE];
int size = 0;
int front = 0;
int back = 0;
int main()
{
DWORD ThreadId;
HANDLE threads[2];
int params[] = { 0, 1, 2, 3, 4, 5, 6};
mutex = CreateSemaphore(0, 1, 1, 0);
not_full = CreateEvent(NULL, FALSE, FALSE, NULL);
not_empty = CreateEvent(NULL, FALSE, FALSE, NULL);
for (int i = 0; i < 3; i++)
{
threads[i] = CreateThread(0, 0, Producer, ¶ms[i], 0, &ThreadId);
}
for (int i = 3; i < NUM_THREADS; i++)
{
threads[i] = CreateThread(0, 0, Consumer, ¶ms[i], 0, &ThreadId);
}
for (int i = 0; i < NUM_THREADS; i++)
WaitForSingleObject(threads[i], INFINITE);
}
DWORD WINAPI Producer(LPVOID param)
{
int id = * (DWORD *)param;
srand(time(0) + id);
for (;;)
{
int prod = rand() % 3000;
Sleep(prod);
WaitForSingleObject(mutex, INFINITE);
if (size != BUF_SIZE)
{
buffer[front] = prod;
front = (front + 1) % BUF_SIZE;
size++;
printf("%d producer %d %d\n", size, id, prod);
ReleaseSemaphore(mutex, 1, 0);
SetEvent(not_empty);
}
else
{
printf("producer %d sleep\n", id);
ReleaseSemaphore(mutex, 1, 0);
WaitForSingleObject(not_full, INFINITE);
printf("producer %d wake\n", id);
}
}
return 0;
}
DWORD WINAPI Consumer(LPVOID param)
{
int id = * (DWORD *)param;
srand(time(0) + id);
for (;;)
{
int consume;
Sleep(rand() % 2000);
WaitForSingleObject(mutex, INFINITE);
if (size != 0)
{
consume = buffer[back];
back = (back + 1) % BUF_SIZE;
size--;
printf("%d consumer %d %d\n", size, id, consume);
ReleaseSemaphore(mutex, 1, 0);
SetEvent(not_full);
}
else
{
printf("consumer %d sleep\n", id);
ReleaseSemaphore(mutex, 1, 0);
WaitForSingleObject(not_empty, INFINITE);
printf("consumer %d wake\n", id);
}
}
return 0;
}
|
256509.c | /*
* tclCompExpr.c --
*
* This file contains the code to parse and compile Tcl expressions and
* implementations of the Tcl commands corresponding to expression
* operators, such as the command ::tcl::mathop::+ .
*
* Contributions from Don Porter, NIST, 2006-2007. (not subject to US copyright)
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
*/
#include "tclInt.h"
#include "tclCompile.h" /* CompileEnv */
/*
* Expression parsing takes place in the routine ParseExpr(). It takes a
* string as input, parses that string, and generates a representation of the
* expression in the form of a tree of operators, a list of literals, a list
* of function names, and an array of Tcl_Token's within a Tcl_Parse struct.
* The tree is composed of OpNodes.
*/
typedef struct {
int left; /* "Pointer" to the left operand. */
int right; /* "Pointer" to the right operand. */
union {
int parent; /* "Pointer" to the parent operand. */
int prev; /* "Pointer" joining incomplete tree stack */
} p;
unsigned char lexeme; /* Code that identifies the operator. */
unsigned char precedence; /* Precedence of the operator */
unsigned char mark; /* Mark used to control traversal. */
unsigned char constant; /* Flag marking constant subexpressions. */
} OpNode;
/*
* The storage for the tree is dynamically allocated array of OpNodes. The
* array is grown as parsing needs dictate according to a scheme similar to
* Tcl's string growth algorithm, so that the resizing costs are O(N) and so
* that we use at least half the memory allocated as expressions get large.
*
* Each OpNode in the tree represents an operator in the expression, either
* unary or binary. When parsing is completed successfully, a binary operator
* OpNode will have its left and right fields filled with "pointers" to its
* left and right operands. A unary operator OpNode will have its right field
* filled with a pointer to its single operand. When an operand is a
* subexpression the "pointer" takes the form of the index -- a non-negative
* integer -- into the OpNode storage array where the root of that
* subexpression parse tree is found.
*
* Non-operator elements of the expression do not get stored in the OpNode
* tree. They are stored in the other structures according to their type.
* Literal values get appended to the literal list. Elements that denote forms
* of quoting or substitution known to the Tcl parser get stored as
* Tcl_Tokens. These non-operator elements of the expression are the leaves of
* the completed parse tree. When an operand of an OpNode is one of these leaf
* elements, the following negative integer codes are used to indicate which
* kind of elements it is.
*/
enum OperandTypes {
OT_LITERAL = -3, /* Operand is a literal in the literal list */
OT_TOKENS = -2, /* Operand is sequence of Tcl_Tokens */
OT_EMPTY = -1 /* "Operand" is an empty string. This is a special
* case used only to represent the EMPTY lexeme. See
* below. */
};
/*
* Readable macros to test whether a "pointer" value points to an operator.
* They operate on the "non-negative integer -> operator; negative integer ->
* a non-operator OperandType" distinction.
*/
#define IsOperator(l) ((l) >= 0)
#define NotOperator(l) ((l) < 0)
/*
* Note that it is sufficient to store in the tree just the type of leaf
* operand, without any explicit pointer to which leaf. This is true because
* the traversals of the completed tree we perform are known to visit the
* leaves in the same order as the original parse.
*
* In a completed parse tree, those OpNodes that are themselves (roots of
* subexpression trees that are) operands of some operator store in their
* p.parent field a "pointer" to the OpNode of that operator. The p.parent
* field permits a traversal of the tree within a non-recursive routine
* (ConvertTreeToTokens() and CompileExprTree()). This means that even
* expression trees of great depth pose no risk of blowing the C stack.
*
* While the parse tree is being constructed, the same memory space is used to
* hold the p.prev field which chains together a stack of incomplete trees
* awaiting their right operands.
*
* The lexeme field is filled in with the lexeme of the operator that is
* returned by the ParseLexeme() routine. Only lexemes for unary and binary
* operators get stored in an OpNode. Other lexmes get different treatement.
*
* The precedence field provides a place to store the precedence of the
* operator, so it need not be looked up again and again.
*
* The mark field is use to control the traversal of the tree, so that it can
* be done non-recursively. The mark values are:
*/
enum Marks {
MARK_LEFT, /* Next step of traversal is to visit left subtree */
MARK_RIGHT, /* Next step of traversal is to visit right subtree */
MARK_PARENT /* Next step of traversal is to return to parent */
};
/*
* The constant field is a boolean flag marking which subexpressions are
* completely known at compile time, and are eligible for computing then
* rather than waiting until run time.
*/
/*
* Each lexeme belongs to one of four categories, which determine its place in
* the parse tree. We use the two high bits of the (unsigned char) value to
* store a NODE_TYPE code.
*/
#define NODE_TYPE 0xC0
/*
* The four category values are LEAF, UNARY, and BINARY, explained below, and
* "uncategorized", which is used either temporarily, until context determines
* which of the other three categories is correct, or for lexemes like
* INVALID, which aren't really lexemes at all, but indicators of a parsing
* error. Note that the codes must be distinct to distinguish categories, but
* need not take the form of a bit array.
*/
#define BINARY 0x40 /* This lexeme is a binary operator. An OpNode
* representing it should go into the parse
* tree, and two operands should be parsed for
* it in the expression. */
#define UNARY 0x80 /* This lexeme is a unary operator. An OpNode
* representing it should go into the parse
* tree, and one operand should be parsed for
* it in the expression. */
#define LEAF 0xC0 /* This lexeme is a leaf operand in the parse
* tree. No OpNode will be placed in the tree
* for it. Either a literal value will be
* appended to the list of literals in this
* expression, or appropriate Tcl_Tokens will
* be appended in a Tcl_Parse struct to
* represent those leaves that require some
* form of substitution. */
/* Uncategorized lexemes */
#define PLUS 1 /* Ambiguous. Resolves to UNARY_PLUS or
* BINARY_PLUS according to context. */
#define MINUS 2 /* Ambiguous. Resolves to UNARY_MINUS or
* BINARY_MINUS according to context. */
#define BAREWORD 3 /* Ambigous. Resolves to BOOLEAN or to
* FUNCTION or a parse error according to
* context and value. */
#define INCOMPLETE 4 /* A parse error. Used only when the single
* "=" is encountered. */
#define INVALID 5 /* A parse error. Used when any punctuation
* appears that's not a supported operator. */
/* Leaf lexemes */
#define NUMBER (LEAF | 1)
/* For literal numbers */
#define SCRIPT (LEAF | 2)
/* Script substitution; [foo] */
#define BOOLEAN (LEAF | BAREWORD)
/* For literal booleans */
#define BRACED (LEAF | 4)
/* Braced string; {foo bar} */
#define VARIABLE (LEAF | 5)
/* Variable substitution; $x */
#define QUOTED (LEAF | 6)
/* Quoted string; "foo $bar [soom]" */
#define EMPTY (LEAF | 7)
/* Used only for an empty argument list to a
* function. Represents the empty string
* within parens in the expression: rand() */
/* Unary operator lexemes */
#define UNARY_PLUS (UNARY | PLUS)
#define UNARY_MINUS (UNARY | MINUS)
#define FUNCTION (UNARY | BAREWORD)
/* This is a bit of "creative interpretation"
* on the part of the parser. A function call
* is parsed into the parse tree according to
* the perspective that the function name is a
* unary operator and its argument list,
* enclosed in parens, is its operand. The
* additional requirements not implied
* generally by treatment as a unary operator
* -- for example, the requirement that the
* operand be enclosed in parens -- are hard
* coded in the relevant portions of
* ParseExpr(). We trade off the need to
* include such exceptional handling in the
* code against the need we would otherwise
* have for more lexeme categories. */
#define START (UNARY | 4)
/* This lexeme isn't parsed from the
* expression text at all. It represents the
* start of the expression and sits at the
* root of the parse tree where it serves as
* the start/end point of traversals. */
#define OPEN_PAREN (UNARY | 5)
/* Another bit of creative interpretation,
* where we treat "(" as a unary operator with
* the sub-expression between it and its
* matching ")" as its operand. See
* CLOSE_PAREN below. */
#define NOT (UNARY | 6)
#define BIT_NOT (UNARY | 7)
/* Binary operator lexemes */
#define BINARY_PLUS (BINARY | PLUS)
#define BINARY_MINUS (BINARY | MINUS)
#define COMMA (BINARY | 3)
/* The "," operator is a low precedence binary
* operator that separates the arguments in a
* function call. The additional constraint
* that this operator can only legally appear
* at the right places within a function call
* argument list are hard coded within
* ParseExpr(). */
#define MULT (BINARY | 4)
#define DIVIDE (BINARY | 5)
#define MOD (BINARY | 6)
#define LESS (BINARY | 7)
#define GREATER (BINARY | 8)
#define BIT_AND (BINARY | 9)
#define BIT_XOR (BINARY | 10)
#define BIT_OR (BINARY | 11)
#define QUESTION (BINARY | 12)
/* These two lexemes make up the */
#define COLON (BINARY | 13)
/* ternary conditional operator, $x ? $y : $z.
* We treat them as two binary operators to
* avoid another lexeme category, and code the
* additional constraints directly in
* ParseExpr(). For instance, the right
* operand of a "?" operator must be a ":"
* operator. */
#define LEFT_SHIFT (BINARY | 14)
#define RIGHT_SHIFT (BINARY | 15)
#define LEQ (BINARY | 16)
#define GEQ (BINARY | 17)
#define EQUAL (BINARY | 18)
#define NEQ (BINARY | 19)
#define AND (BINARY | 20)
#define OR (BINARY | 21)
#define STREQ (BINARY | 22)
#define STRNEQ (BINARY | 23)
#define EXPON (BINARY | 24)
/* Unlike the other binary operators, EXPON is
* right associative and this distinction is
* coded directly in ParseExpr(). */
#define IN_LIST (BINARY | 25)
#define NOT_IN_LIST (BINARY | 26)
#define CLOSE_PAREN (BINARY | 27)
/* By categorizing the CLOSE_PAREN lexeme as a
* BINARY operator, the normal parsing rules
* for binary operators assure that a close
* paren will not directly follow another
* operator, and the machinery already in
* place to connect operands to operators
* according to precedence performs most of
* the work of matching open and close parens
* for us. In the end though, a close paren is
* not really a binary operator, and some
* special coding in ParseExpr() make sure we
* never put an actual CLOSE_PAREN node in the
* parse tree. The sub-expression between
* parens becomes the single argument of the
* matching OPEN_PAREN unary operator. */
<<<<<<< HEAD
#define END (BINARY | 28)
=======
#define STR_LT (BINARY | 28)
#define STR_GT (BINARY | 29)
#define STR_LEQ (BINARY | 30)
#define STR_GEQ (BINARY | 31)
#define END (BINARY | 32)
>>>>>>> upstream/master
/* This lexeme represents the end of the
* string being parsed. Treating it as a
* binary operator follows the same logic as
* the CLOSE_PAREN lexeme and END pairs with
* START, in the same way that CLOSE_PAREN
* pairs with OPEN_PAREN. */
/*
* When ParseExpr() builds the parse tree it must choose which operands to
* connect to which operators. This is done according to operator precedence.
* The greater an operator's precedence the greater claim it has to link to an
* available operand. The Precedence enumeration lists the precedence values
* used by Tcl expression operators, from lowest to highest claim. Each
* precedence level is commented with the operators that hold that precedence.
*/
enum Precedence {
PREC_END = 1, /* END */
PREC_START, /* START */
PREC_CLOSE_PAREN, /* ")" */
PREC_OPEN_PAREN, /* "(" */
PREC_COMMA, /* "," */
PREC_CONDITIONAL, /* "?", ":" */
PREC_OR, /* "||" */
PREC_AND, /* "&&" */
PREC_BIT_OR, /* "|" */
PREC_BIT_XOR, /* "^" */
PREC_BIT_AND, /* "&" */
PREC_EQUAL, /* "==", "!=", "eq", "ne", "in", "ni" */
PREC_COMPARE, /* "<", ">", "<=", ">=" */
PREC_SHIFT, /* "<<", ">>" */
PREC_ADD, /* "+", "-" */
PREC_MULT, /* "*", "/", "%" */
PREC_EXPON, /* "**" */
PREC_UNARY /* "+", "-", FUNCTION, "!", "~" */
};
/*
* Here the same information contained in the comments above is stored in
* inverted form, so that given a lexeme, one can quickly look up its
* precedence value.
*/
static const unsigned char prec[] = {
/* Non-operator lexemes */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
/* Binary operator lexemes */
PREC_ADD, /* BINARY_PLUS */
PREC_ADD, /* BINARY_MINUS */
PREC_COMMA, /* COMMA */
PREC_MULT, /* MULT */
PREC_MULT, /* DIVIDE */
PREC_MULT, /* MOD */
PREC_COMPARE, /* LESS */
PREC_COMPARE, /* GREATER */
PREC_BIT_AND, /* BIT_AND */
PREC_BIT_XOR, /* BIT_XOR */
PREC_BIT_OR, /* BIT_OR */
PREC_CONDITIONAL, /* QUESTION */
PREC_CONDITIONAL, /* COLON */
PREC_SHIFT, /* LEFT_SHIFT */
PREC_SHIFT, /* RIGHT_SHIFT */
PREC_COMPARE, /* LEQ */
PREC_COMPARE, /* GEQ */
PREC_EQUAL, /* EQUAL */
PREC_EQUAL, /* NEQ */
PREC_AND, /* AND */
PREC_OR, /* OR */
PREC_EQUAL, /* STREQ */
PREC_EQUAL, /* STRNEQ */
PREC_EXPON, /* EXPON */
PREC_EQUAL, /* IN_LIST */
PREC_EQUAL, /* NOT_IN_LIST */
PREC_CLOSE_PAREN, /* CLOSE_PAREN */
PREC_COMPARE, /* STR_LT */
PREC_COMPARE, /* STR_GT */
PREC_COMPARE, /* STR_LEQ */
PREC_COMPARE, /* STR_GEQ */
PREC_END, /* END */
/* Expansion room for more binary operators */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
<<<<<<< HEAD
0,
=======
>>>>>>> upstream/master
/* Unary operator lexemes */
PREC_UNARY, /* UNARY_PLUS */
PREC_UNARY, /* UNARY_MINUS */
PREC_UNARY, /* FUNCTION */
PREC_START, /* START */
PREC_OPEN_PAREN, /* OPEN_PAREN */
PREC_UNARY, /* NOT*/
PREC_UNARY, /* BIT_NOT*/
};
/*
* A table mapping lexemes to bytecode instructions, used by CompileExprTree().
*/
static const unsigned char instruction[] = {
/* Non-operator lexemes */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
/* Binary operator lexemes */
INST_ADD, /* BINARY_PLUS */
INST_SUB, /* BINARY_MINUS */
0, /* COMMA */
INST_MULT, /* MULT */
INST_DIV, /* DIVIDE */
INST_MOD, /* MOD */
INST_LT, /* LESS */
INST_GT, /* GREATER */
INST_BITAND, /* BIT_AND */
INST_BITXOR, /* BIT_XOR */
INST_BITOR, /* BIT_OR */
0, /* QUESTION */
0, /* COLON */
INST_LSHIFT, /* LEFT_SHIFT */
INST_RSHIFT, /* RIGHT_SHIFT */
INST_LE, /* LEQ */
INST_GE, /* GEQ */
INST_EQ, /* EQUAL */
INST_NEQ, /* NEQ */
0, /* AND */
0, /* OR */
INST_STR_EQ, /* STREQ */
INST_STR_NEQ, /* STRNEQ */
INST_EXPON, /* EXPON */
INST_LIST_IN, /* IN_LIST */
INST_LIST_NOT_IN, /* NOT_IN_LIST */
0, /* CLOSE_PAREN */
INST_STR_LT, /* STR_LT */
INST_STR_GT, /* STR_GT */
INST_STR_LE, /* STR_LEQ */
INST_STR_GE, /* STR_GEQ */
0, /* END */
/* Expansion room for more binary operators */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
<<<<<<< HEAD
0,
=======
>>>>>>> upstream/master
/* Unary operator lexemes */
INST_UPLUS, /* UNARY_PLUS */
INST_UMINUS, /* UNARY_MINUS */
0, /* FUNCTION */
0, /* START */
0, /* OPEN_PAREN */
INST_LNOT, /* NOT*/
INST_BITNOT, /* BIT_NOT*/
};
/*
* A table mapping a byte value to the corresponding lexeme for use by
* ParseLexeme().
*/
static const unsigned char Lexeme[] = {
INVALID /* NUL */, INVALID /* SOH */,
INVALID /* STX */, INVALID /* ETX */,
INVALID /* EOT */, INVALID /* ENQ */,
INVALID /* ACK */, INVALID /* BEL */,
INVALID /* BS */, INVALID /* HT */,
INVALID /* LF */, INVALID /* VT */,
INVALID /* FF */, INVALID /* CR */,
INVALID /* SO */, INVALID /* SI */,
INVALID /* DLE */, INVALID /* DC1 */,
INVALID /* DC2 */, INVALID /* DC3 */,
INVALID /* DC4 */, INVALID /* NAK */,
INVALID /* SYN */, INVALID /* ETB */,
INVALID /* CAN */, INVALID /* EM */,
INVALID /* SUB */, INVALID /* ESC */,
INVALID /* FS */, INVALID /* GS */,
INVALID /* RS */, INVALID /* US */,
INVALID /* SPACE */, 0 /* ! or != */,
QUOTED /* " */, INVALID /* # */,
VARIABLE /* $ */, MOD /* % */,
0 /* & or && */, INVALID /* ' */,
OPEN_PAREN /* ( */, CLOSE_PAREN /* ) */,
0 /* * or ** */, PLUS /* + */,
COMMA /* , */, MINUS /* - */,
0 /* . */, DIVIDE /* / */,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0-9 */
COLON /* : */, INVALID /* ; */,
0 /* < or << or <= */,
0 /* == or INVALID */,
0 /* > or >> or >= */,
QUESTION /* ? */, INVALID /* @ */,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A-M */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* N-Z */
SCRIPT /* [ */, INVALID /* \ */,
INVALID /* ] */, BIT_XOR /* ^ */,
INVALID /* _ */, INVALID /* ` */,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a-m */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* n-z */
BRACED /* { */, 0 /* | or || */,
INVALID /* } */, BIT_NOT /* ~ */,
INVALID /* DEL */
};
/*
* The JumpList struct is used to create a stack of data needed for the
* TclEmitForwardJump() and TclFixupForwardJump() calls that are performed
* when compiling the short-circuiting operators QUESTION/COLON, AND, and OR.
* Keeping a stack permits the CompileExprTree() routine to be non-recursive.
*/
typedef struct JumpList {
JumpFixup jump; /* Pass this argument to matching calls of
* TclEmitForwardJump() and
* TclFixupForwardJump(). */
struct JumpList *next; /* Point to next item on the stack */
} JumpList;
/*
* Declarations for local functions to this file:
*/
static void CompileExprTree(Tcl_Interp *interp, OpNode *nodes,
int index, Tcl_Obj *const **litObjvPtr,
Tcl_Obj *const *funcObjv, Tcl_Token *tokenPtr,
CompileEnv *envPtr, int optimize);
static void ConvertTreeToTokens(const char *start, size_t numBytes,
OpNode *nodes, Tcl_Token *tokenPtr,
Tcl_Parse *parsePtr);
static int ExecConstantExprTree(Tcl_Interp *interp, OpNode *nodes,
int index, Tcl_Obj * const **litObjvPtr);
static int ParseExpr(Tcl_Interp *interp, const char *start,
size_t numBytes, OpNode **opTreePtr,
Tcl_Obj *litList, Tcl_Obj *funcList,
Tcl_Parse *parsePtr, int parseOnly);
static size_t ParseLexeme(const char *start, size_t numBytes,
unsigned char *lexemePtr, Tcl_Obj **literalPtr);
/*
*----------------------------------------------------------------------
*
* ParseExpr --
*
* Given a string, the numBytes bytes starting at start, this function
* parses it as a Tcl expression and constructs a tree representing the
* structure of the expression. The caller must pass in empty lists as
* the funcList and litList arguments. The elements of the parsed
* expression are returned to the caller as that tree, a list of literal
* values, a list of function names, and in Tcl_Tokens added to a
* Tcl_Parse struct passed in by the caller.
*
* Results:
* If the string is successfully parsed as a valid Tcl expression, TCL_OK
* is returned, and data about the expression structure is written to the
* last four arguments. If the string cannot be parsed as a valid Tcl
* expression, TCL_ERROR is returned, and if interp is non-NULL, an error
* message is written to interp.
*
* Side effects:
* Memory will be allocated. If TCL_OK is returned, the caller must clean
* up the returned data structures. The (OpNode *) value written to
* opTreePtr should be passed to Tcl_Free() and the parsePtr argument
* should be passed to Tcl_FreeParse(). The elements appended to the
* litList and funcList will automatically be freed whenever the refcount
* on those lists indicates they can be freed.
*
*----------------------------------------------------------------------
*/
static int
ParseExpr(
Tcl_Interp *interp, /* Used for error reporting. */
const char *start, /* Start of source string to parse. */
size_t numBytes, /* Number of bytes in string. */
OpNode **opTreePtr, /* Points to space where a pointer to the
* allocated OpNode tree should go. */
Tcl_Obj *litList, /* List to append literals to. */
Tcl_Obj *funcList, /* List to append function names to. */
Tcl_Parse *parsePtr, /* Structure to fill with tokens representing
* those operands that require run time
* substitutions. */
int parseOnly) /* A boolean indicating whether the caller's
* aim is just a parse, or whether it will go
* on to compile the expression. Different
* optimizations are appropriate for the two
* scenarios. */
{
OpNode *nodes = NULL; /* Pointer to the OpNode storage array where
* we build the parse tree. */
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
int nodesAvailable = 64; /* Initial size of the storage array. This
=======
unsigned int nodesAvailable = 64; /* Initial size of the storage array. This
>>>>>>> upstream/master
=======
unsigned int nodesAvailable = 64; /* Initial size of the storage array. This
>>>>>>> upstream/master
=======
unsigned int nodesAvailable = 64; /* Initial size of the storage array. This
>>>>>>> upstream/master
=======
unsigned int nodesAvailable = 64; /* Initial size of the storage array. This
>>>>>>> upstream/master
=======
unsigned int nodesAvailable = 64; /* Initial size of the storage array. This
>>>>>>> upstream/master
* value establishes a minimum tree memory
* cost of only about 1 kibyte, and is large
* enough for most expressions to parse with
* no need for array growth and
* reallocation. */
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
int nodesUsed = 0; /* Number of OpNodes filled. */
=======
unsigned int nodesUsed = 0; /* Number of OpNodes filled. */
>>>>>>> upstream/master
=======
unsigned int nodesUsed = 0; /* Number of OpNodes filled. */
<<<<<<< HEAD
>>>>>>> upstream/master
=======
unsigned int nodesUsed = 0; /* Number of OpNodes filled. */
>>>>>>> upstream/master
=======
unsigned int nodesUsed = 0; /* Number of OpNodes filled. */
>>>>>>> upstream/master
int scanned = 0; /* Capture number of byte scanned by parsing
=======
size_t scanned = 0; /* Capture number of byte scanned by parsing
>>>>>>> upstream/master
=======
unsigned int nodesUsed = 0; /* Number of OpNodes filled. */
size_t scanned = 0; /* Capture number of byte scanned by parsing
>>>>>>> upstream/master
* routines. */
int lastParsed; /* Stores info about what the lexeme parsed
* the previous pass through the parsing loop
* was. If it was an operator, lastParsed is
* the index of the OpNode for that operator.
* If it was not an operator, lastParsed holds
* an OperandTypes value encoding what we need
* to know about it. */
int incomplete; /* Index of the most recent incomplete tree in
* the OpNode array. Heads a stack of
* incomplete trees linked by p.prev. */
int complete = OT_EMPTY; /* "Index" of the complete tree (that is, a
* complete subexpression) determined at the
* moment. OT_EMPTY is a nonsense value used
* only to silence compiler warnings. During a
* parse, complete will always hold an index
* or an OperandTypes value pointing to an
* actual leaf at the time the complete tree
* is needed. */
/*
* These variables control generation of the error message.
*/
Tcl_Obj *msg = NULL; /* The error message. */
Tcl_Obj *post = NULL; /* In a few cases, an additional postscript
* for the error message, supplying more
* information after the error msg and
* location have been reported. */
const char *errCode = NULL; /* The detail word of the errorCode list, or
* NULL to indicate that no changes to the
* errorCode are to be done. */
const char *subErrCode = NULL;
/* Extra information for use in generating the
* errorCode. */
const char *mark = "_@_"; /* In the portion of the complete error
* message where the error location is
* reported, this "mark" substring is inserted
* into the string being parsed to aid in
* pinpointing the location of the syntax
* error in the expression. */
int insertMark = 0; /* A boolean controlling whether the "mark"
* should be inserted. */
const unsigned limit = 25; /* Portions of the error message are
* constructed out of substrings of the
* original expression. In order to keep the
* error message readable, we impose this
* limit on the substring size we extract. */
TclParseInit(interp, start, numBytes, parsePtr);
nodes = (OpNode *)Tcl_AttemptAlloc(nodesAvailable * sizeof(OpNode));
if (nodes == NULL) {
TclNewLiteralStringObj(msg, "not enough memory to parse expression");
errCode = "NOMEM";
goto error;
}
/*
* Initialize the parse tree with the special "START" node.
*/
nodes->lexeme = START;
nodes->precedence = prec[START];
nodes->mark = MARK_RIGHT;
nodes->constant = 1;
incomplete = lastParsed = nodesUsed;
nodesUsed++;
/*
* Main parsing loop parses one lexeme per iteration. We exit the loop
* only when there's a syntax error with a "goto error" which takes us to
* the error handling code following the loop, or when we've successfully
* completed the parse and we return to the caller.
*/
while (1) {
OpNode *nodePtr; /* Points to the OpNode we may fill this pass
* through the loop. */
unsigned char lexeme; /* The lexeme we parse this iteration. */
Tcl_Obj *literal; /* Filled by the ParseLexeme() call when a
* literal is parsed that has a Tcl_Obj rep
* worth preserving. */
/*
* Each pass through this loop adds up to one more OpNode. Allocate
* space for one if required.
*/
if (nodesUsed >= nodesAvailable) {
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
int size = nodesUsed * 2;
=======
unsigned int size = nodesUsed * 2;
>>>>>>> upstream/master
=======
unsigned int size = nodesUsed * 2;
>>>>>>> upstream/master
=======
unsigned int size = nodesUsed * 2;
>>>>>>> upstream/master
=======
unsigned int size = nodesUsed * 2;
>>>>>>> upstream/master
=======
unsigned int size = nodesUsed * 2;
>>>>>>> upstream/master
OpNode *newPtr = NULL;
do {
if (size <= UINT_MAX/sizeof(OpNode)) {
newPtr = (OpNode *)Tcl_AttemptRealloc(nodes, size * sizeof(OpNode));
}
} while ((newPtr == NULL)
&& ((size -= (size - nodesUsed) / 2) > nodesUsed));
if (newPtr == NULL) {
TclNewLiteralStringObj(msg,
"not enough memory to parse expression");
errCode = "NOMEM";
goto error;
}
nodesAvailable = size;
nodes = newPtr;
}
nodePtr = nodes + nodesUsed;
/*
* Skip white space between lexemes.
*/
scanned = TclParseAllWhiteSpace(start, numBytes);
start += scanned;
numBytes -= scanned;
scanned = ParseLexeme(start, numBytes, &lexeme, &literal);
/*
* Use context to categorize the lexemes that are ambiguous.
*/
if ((NODE_TYPE & lexeme) == 0) {
int b;
switch (lexeme) {
case INVALID:
msg = Tcl_ObjPrintf("invalid character \"%.*s\"",
(int)scanned, start);
errCode = "BADCHAR";
goto error;
case INCOMPLETE:
msg = Tcl_ObjPrintf("incomplete operator \"%.*s\"",
(int)scanned, start);
errCode = "PARTOP";
goto error;
case BAREWORD:
/*
* Most barewords in an expression are a syntax error. The
* exceptions are that when a bareword is followed by an open
* paren, it might be a function call, and when the bareword
* is a legal literal boolean value, we accept that as well.
*/
if (start[scanned+TclParseAllWhiteSpace(
start+scanned, numBytes-scanned)] == '(') {
lexeme = FUNCTION;
/*
* When we compile the expression we'll need the function
* name, and there's no place in the parse tree to store
* it, so we keep a separate list of all the function
* names we've parsed in the order we found them.
*/
Tcl_ListObjAppendElement(NULL, funcList, literal);
} else if (Tcl_GetBooleanFromObj(NULL,literal,&b) == TCL_OK) {
lexeme = BOOLEAN;
} else {
Tcl_DecrRefCount(literal);
msg = Tcl_ObjPrintf("invalid bareword \"%.*s%s\"",
(scanned < limit) ? (int)scanned : (int)limit - 3, start,
(scanned < limit) ? "" : "...");
post = Tcl_ObjPrintf(
"should be \"$%.*s%s\" or \"{%.*s%s}\"",
(scanned < limit) ? (int)scanned : (int)limit - 3,
start, (scanned < limit) ? "" : "...",
(scanned < limit) ? (int)scanned : (int)limit - 3,
start, (scanned < limit) ? "" : "...");
Tcl_AppendPrintfToObj(post, " or \"%.*s%s(...)\" or ...",
(scanned < limit) ? (int)scanned : (int)limit - 3,
start, (scanned < limit) ? "" : "...");
errCode = "BAREWORD";
if (start[0] == '0') {
const char *stop;
TclParseNumber(NULL, NULL, NULL, start, scanned,
&stop, TCL_PARSE_NO_WHITESPACE);
if (isdigit(UCHAR(*stop)) || (stop == start + 1)) {
switch (start[1]) {
case 'b':
Tcl_AppendToObj(post,
" (invalid binary number?)", -1);
parsePtr->errorType = TCL_PARSE_BAD_NUMBER;
errCode = "BADNUMBER";
subErrCode = "BINARY";
break;
case 'o':
Tcl_AppendToObj(post,
" (invalid octal number?)", -1);
parsePtr->errorType = TCL_PARSE_BAD_NUMBER;
errCode = "BADNUMBER";
subErrCode = "OCTAL";
break;
default:
if (isdigit(UCHAR(start[1]))) {
Tcl_AppendToObj(post,
" (invalid octal number?)", -1);
parsePtr->errorType = TCL_PARSE_BAD_NUMBER;
errCode = "BADNUMBER";
subErrCode = "OCTAL";
}
break;
}
}
}
goto error;
}
break;
case PLUS:
case MINUS:
if (IsOperator(lastParsed)) {
/*
* A "+" or "-" coming just after another operator must be
* interpreted as a unary operator.
*/
lexeme |= UNARY;
} else {
lexeme |= BINARY;
}
}
} /* Uncategorized lexemes */
/*
* Handle lexeme based on its category.
*/
switch (NODE_TYPE & lexeme) {
case LEAF: {
/*
* Each LEAF results in either a literal getting appended to the
* litList, or a sequence of Tcl_Tokens representing a Tcl word
* getting appended to the parsePtr->tokens. No OpNode is filled
* for this lexeme.
*/
Tcl_Token *tokenPtr;
const char *end = start;
int wordIndex;
int code = TCL_OK;
/*
* A leaf operand appearing just after something that's not an
* operator is a syntax error.
*/
if (NotOperator(lastParsed)) {
msg = Tcl_ObjPrintf("missing operator at %s", mark);
errCode = "MISSING";
scanned = 0;
insertMark = 1;
/*
* Free any literal to avoid a memleak.
*/
if ((lexeme == NUMBER) || (lexeme == BOOLEAN)) {
Tcl_DecrRefCount(literal);
}
goto error;
}
switch (lexeme) {
case NUMBER:
case BOOLEAN:
/*
* TODO: Consider using a dict or hash to collapse all
* duplicate literals into a single representative value.
* (Like what is done with [split $s {}]).
* Pro: ~75% memory saving on expressions like
* {1+1+1+1+1+.....+1} (Convert "pointer + Tcl_Obj" cost
* to "pointer" cost only)
* Con: Cost of the dict store/retrieve on every literal in
* every expression when expressions like the above tend
* to be uncommon.
* The memory savings is temporary; Compiling to bytecode
* will collapse things as literals are registered
* anyway, so the savings applies only to the time
* between parsing and compiling. Possibly important due
* to high-water mark nature of memory allocation.
*/
Tcl_ListObjAppendElement(NULL, litList, literal);
complete = lastParsed = OT_LITERAL;
start += scanned;
numBytes -= scanned;
continue;
default:
break;
}
/*
* Remaining LEAF cases may involve filling Tcl_Tokens, so make
* room for at least 2 more tokens.
*/
TclGrowParseTokenArray(parsePtr, 2);
wordIndex = parsePtr->numTokens;
tokenPtr = parsePtr->tokenPtr + wordIndex;
tokenPtr->type = TCL_TOKEN_WORD;
tokenPtr->start = start;
parsePtr->numTokens++;
switch (lexeme) {
case QUOTED:
code = Tcl_ParseQuotedString(NULL, start, numBytes,
parsePtr, 1, &end);
scanned = end - start;
break;
case BRACED:
code = Tcl_ParseBraces(NULL, start, numBytes,
parsePtr, 1, &end);
scanned = end - start;
break;
case VARIABLE:
code = Tcl_ParseVarName(NULL, start, numBytes, parsePtr, 1);
/*
* Handle the quirk that Tcl_ParseVarName reports a successful
* parse even when it gets only a "$" with no variable name.
*/
tokenPtr = parsePtr->tokenPtr + wordIndex + 1;
if (code == TCL_OK && tokenPtr->type != TCL_TOKEN_VARIABLE) {
TclNewLiteralStringObj(msg, "invalid character \"$\"");
errCode = "BADCHAR";
goto error;
}
scanned = tokenPtr->size;
break;
case SCRIPT: {
Tcl_Parse *nestedPtr = (Tcl_Parse *)
TclStackAlloc(interp, sizeof(Tcl_Parse));
tokenPtr = parsePtr->tokenPtr + parsePtr->numTokens;
tokenPtr->type = TCL_TOKEN_COMMAND;
tokenPtr->start = start;
tokenPtr->numComponents = 0;
end = start + numBytes;
start++;
while (1) {
code = Tcl_ParseCommand(interp, start, end - start, 1,
nestedPtr);
if (code != TCL_OK) {
parsePtr->term = nestedPtr->term;
parsePtr->errorType = nestedPtr->errorType;
parsePtr->incomplete = nestedPtr->incomplete;
break;
}
start = nestedPtr->commandStart + nestedPtr->commandSize;
Tcl_FreeParse(nestedPtr);
if ((nestedPtr->term < end) && (nestedPtr->term[0] == ']')
&& !nestedPtr->incomplete) {
break;
}
if (start == end) {
TclNewLiteralStringObj(msg, "missing close-bracket");
parsePtr->term = tokenPtr->start;
parsePtr->errorType = TCL_PARSE_MISSING_BRACKET;
parsePtr->incomplete = 1;
code = TCL_ERROR;
errCode = "UNBALANCED";
break;
}
}
TclStackFree(interp, nestedPtr);
end = start;
start = tokenPtr->start;
scanned = end - start;
tokenPtr->size = scanned;
parsePtr->numTokens++;
break;
} /* SCRIPT case */
}
if (code != TCL_OK) {
/*
* Here we handle all the syntax errors generated by the
* Tcl_Token generating parsing routines called in the switch
* just above. If the value of parsePtr->incomplete is 1, then
* the error was an unbalanced '[', '(', '{', or '"' and
* parsePtr->term is pointing to that unbalanced character. If
* the value of parsePtr->incomplete is 0, then the error is
* one of lacking whitespace following a quoted word, for
* example: expr {[an error {foo}bar]}, and parsePtr->term
* points to where the whitespace is missing. We reset our
* values of start and scanned so that when our error message
* is constructed, the location of the syntax error is sure to
* appear in it, even if the quoted expression is truncated.
*/
start = parsePtr->term;
scanned = parsePtr->incomplete;
if (parsePtr->incomplete) {
errCode = "UNBALANCED";
}
goto error;
}
tokenPtr = parsePtr->tokenPtr + wordIndex;
tokenPtr->size = scanned;
tokenPtr->numComponents = parsePtr->numTokens - wordIndex - 1;
if (!parseOnly && ((lexeme == QUOTED) || (lexeme == BRACED))) {
/*
* When this expression is destined to be compiled, and a
* braced or quoted word within an expression is known at
* compile time (no runtime substitutions in it), we can store
* it as a literal rather than in its tokenized form. This is
* an advantage since the compiled bytecode is going to need
* the argument in Tcl_Obj form eventually, so it's just as
* well to get there now. Another advantage is that with this
* conversion, larger constant expressions might be grown and
* optimized.
*
* On the contrary, if the end goal of this parse is to fill a
* Tcl_Parse for a caller of Tcl_ParseExpr(), then it's
* wasteful to convert to a literal only to convert back again
* later.
*/
TclNewObj(literal);
if (TclWordKnownAtCompileTime(tokenPtr, literal)) {
Tcl_ListObjAppendElement(NULL, litList, literal);
complete = lastParsed = OT_LITERAL;
parsePtr->numTokens = wordIndex;
break;
}
Tcl_DecrRefCount(literal);
}
complete = lastParsed = OT_TOKENS;
break;
} /* case LEAF */
case UNARY:
/*
* A unary operator appearing just after something that's not an
* operator is a syntax error -- something trying to be the left
* operand of an operator that doesn't take one.
*/
if (NotOperator(lastParsed)) {
msg = Tcl_ObjPrintf("missing operator at %s", mark);
scanned = 0;
insertMark = 1;
errCode = "MISSING";
goto error;
}
/*
* Create an OpNode for the unary operator.
*/
nodePtr->lexeme = lexeme;
nodePtr->precedence = prec[lexeme];
nodePtr->mark = MARK_RIGHT;
/*
* A FUNCTION cannot be a constant expression, because Tcl allows
* functions to return variable results with the same arguments;
* for example, rand(). Other unary operators can root a constant
* expression, so long as the argument is a constant expression.
*/
nodePtr->constant = (lexeme != FUNCTION);
/*
* This unary operator is a new incomplete tree, so push it onto
* our stack of incomplete trees. Also remember it as the last
* lexeme we parsed.
*/
nodePtr->p.prev = incomplete;
incomplete = lastParsed = nodesUsed;
nodesUsed++;
break;
case BINARY: {
OpNode *incompletePtr;
unsigned char precedence = prec[lexeme];
/*
* A binary operator appearing just after another operator is a
* syntax error -- one of the two operators is missing an operand.
*/
if (IsOperator(lastParsed)) {
if ((lexeme == CLOSE_PAREN)
&& (nodePtr[-1].lexeme == OPEN_PAREN)) {
if (nodePtr[-2].lexeme == FUNCTION) {
/*
* Normally, "()" is a syntax error, but as a special
* case accept it as an argument list for a function.
* Treat this as a special LEAF lexeme, and restart
* the parsing loop with zero characters scanned. We
* will parse the ")" again the next time through, but
* with the OT_EMPTY leaf as the subexpression between
* the parens.
*/
scanned = 0;
complete = lastParsed = OT_EMPTY;
break;
}
msg = Tcl_ObjPrintf("empty subexpression at %s", mark);
scanned = 0;
insertMark = 1;
errCode = "EMPTY";
goto error;
}
if (nodePtr[-1].precedence > precedence) {
if (nodePtr[-1].lexeme == OPEN_PAREN) {
TclNewLiteralStringObj(msg, "unbalanced open paren");
parsePtr->errorType = TCL_PARSE_MISSING_PAREN;
errCode = "UNBALANCED";
} else if (nodePtr[-1].lexeme == COMMA) {
msg = Tcl_ObjPrintf(
"missing function argument at %s", mark);
scanned = 0;
insertMark = 1;
errCode = "MISSING";
} else if (nodePtr[-1].lexeme == START) {
TclNewLiteralStringObj(msg, "empty expression");
errCode = "EMPTY";
}
} else if (lexeme == CLOSE_PAREN) {
TclNewLiteralStringObj(msg, "unbalanced close paren");
errCode = "UNBALANCED";
} else if ((lexeme == COMMA)
&& (nodePtr[-1].lexeme == OPEN_PAREN)
&& (nodePtr[-2].lexeme == FUNCTION)) {
msg = Tcl_ObjPrintf("missing function argument at %s",
mark);
scanned = 0;
insertMark = 1;
errCode = "UNBALANCED";
}
if (msg == NULL) {
msg = Tcl_ObjPrintf("missing operand at %s", mark);
scanned = 0;
insertMark = 1;
errCode = "MISSING";
}
goto error;
}
/*
* Here is where the tree comes together. At this point, we have a
* stack of incomplete trees corresponding to substrings that are
* incomplete expressions, followed by a complete tree
* corresponding to a substring that is itself a complete
* expression, followed by the binary operator we have just
* parsed. The incomplete trees can each be completed by adding a
* right operand.
*
* To illustrate with an example, when we parse the expression
* "1+2*3-4" and we reach this point having just parsed the "-"
* operator, we have these incomplete trees: START, "1+", and
* "2*". Next we have the complete subexpression "3". Last is the
* "-" we've just parsed.
*
* The next step is to join our complete tree to an operator. The
* choice is governed by the precedence and associativity of the
* competing operators. If we connect it as the right operand of
* our most recent incomplete tree, we get a new complete tree,
* and we can repeat the process. The while loop following repeats
* this until precedence indicates it is time to join the complete
* tree as the left operand of the just parsed binary operator.
*
* Continuing the example, the first pass through the loop will
* join "3" to "2*"; the next pass will join "2*3" to "1+". Then
* we'll exit the loop and join "1+2*3" to "-". When we return to
* parse another lexeme, our stack of incomplete trees is START
* and "1+2*3-".
*/
while (1) {
incompletePtr = nodes + incomplete;
if (incompletePtr->precedence < precedence) {
break;
}
if (incompletePtr->precedence == precedence) {
/*
* Right association rules for exponentiation.
*/
if (lexeme == EXPON) {
break;
}
/*
* Special association rules for the conditional
* operators. The "?" and ":" operators have equal
* precedence, but must be linked up in sensible pairs.
*/
if ((incompletePtr->lexeme == QUESTION)
&& (NotOperator(complete)
|| (nodes[complete].lexeme != COLON))) {
break;
}
if ((incompletePtr->lexeme == COLON)
&& (lexeme == QUESTION)) {
break;
}
}
/*
* Some special syntax checks...
*/
/* Parens must balance */
if ((incompletePtr->lexeme == OPEN_PAREN)
&& (lexeme != CLOSE_PAREN)) {
TclNewLiteralStringObj(msg, "unbalanced open paren");
parsePtr->errorType = TCL_PARSE_MISSING_PAREN;
errCode = "UNBALANCED";
goto error;
}
/* Right operand of "?" must be ":" */
if ((incompletePtr->lexeme == QUESTION)
&& (NotOperator(complete)
|| (nodes[complete].lexeme != COLON))) {
msg = Tcl_ObjPrintf("missing operator \":\" at %s", mark);
scanned = 0;
insertMark = 1;
errCode = "MISSING";
goto error;
}
/* Operator ":" may only be right operand of "?" */
if (IsOperator(complete)
&& (nodes[complete].lexeme == COLON)
&& (incompletePtr->lexeme != QUESTION)) {
TclNewLiteralStringObj(msg,
"unexpected operator \":\" "
"without preceding \"?\"");
errCode = "SURPRISE";
goto error;
}
/*
* Attach complete tree as right operand of most recent
* incomplete tree.
*/
incompletePtr->right = complete;
if (IsOperator(complete)) {
nodes[complete].p.parent = incomplete;
incompletePtr->constant = incompletePtr->constant
&& nodes[complete].constant;
} else {
incompletePtr->constant = incompletePtr->constant
&& (complete == OT_LITERAL);
}
/*
* The QUESTION/COLON and FUNCTION/OPEN_PAREN combinations
* each make up a single operator. Force them to agree whether
* they have a constant expression.
*/
if ((incompletePtr->lexeme == QUESTION)
|| (incompletePtr->lexeme == FUNCTION)) {
nodes[complete].constant = incompletePtr->constant;
}
if (incompletePtr->lexeme == START) {
/*
* Completing the START tree indicates we're done.
* Transfer the parse tree to the caller and return.
*/
*opTreePtr = nodes;
return TCL_OK;
}
/*
* With a right operand attached, last incomplete tree has
* become the complete tree. Pop it from the incomplete tree
* stack.
*/
complete = incomplete;
incomplete = incompletePtr->p.prev;
/* CLOSE_PAREN can only close one OPEN_PAREN. */
if (incompletePtr->lexeme == OPEN_PAREN) {
break;
}
}
/*
* More syntax checks...
*/
/* Parens must balance. */
if (lexeme == CLOSE_PAREN) {
if (incompletePtr->lexeme != OPEN_PAREN) {
TclNewLiteralStringObj(msg, "unbalanced close paren");
errCode = "UNBALANCED";
goto error;
}
}
/* Commas must appear only in function argument lists. */
if (lexeme == COMMA) {
if ((incompletePtr->lexeme != OPEN_PAREN)
|| (incompletePtr[-1].lexeme != FUNCTION)) {
TclNewLiteralStringObj(msg,
"unexpected \",\" outside function argument list");
errCode = "SURPRISE";
goto error;
}
}
/* Operator ":" may only be right operand of "?" */
if (IsOperator(complete) && (nodes[complete].lexeme == COLON)) {
TclNewLiteralStringObj(msg,
"unexpected operator \":\" without preceding \"?\"");
errCode = "SURPRISE";
goto error;
}
/*
* Create no node for a CLOSE_PAREN lexeme.
*/
if (lexeme == CLOSE_PAREN) {
break;
}
/*
* Link complete tree as left operand of new node.
*/
nodePtr->lexeme = lexeme;
nodePtr->precedence = precedence;
nodePtr->mark = MARK_LEFT;
nodePtr->left = complete;
/*
* The COMMA operator cannot be optimized, since the function
* needs all of its arguments, and optimization would reduce the
* number. Other binary operators root constant expressions when
* both arguments are constant expressions.
*/
nodePtr->constant = (lexeme != COMMA);
if (IsOperator(complete)) {
nodes[complete].p.parent = nodesUsed;
nodePtr->constant = nodePtr->constant
&& nodes[complete].constant;
} else {
nodePtr->constant = nodePtr->constant
&& (complete == OT_LITERAL);
}
/*
* With a left operand attached and a right operand missing, the
* just-parsed binary operator is root of a new incomplete tree.
* Push it onto the stack of incomplete trees.
*/
nodePtr->p.prev = incomplete;
incomplete = lastParsed = nodesUsed;
nodesUsed++;
break;
} /* case BINARY */
} /* lexeme handler */
/* Advance past the just-parsed lexeme */
start += scanned;
numBytes -= scanned;
} /* main parsing loop */
/*
* We only get here if there's been an error. Any errors that didn't get a
* suitable parsePtr->errorType, get recorded as syntax errors.
*/
error:
if (parsePtr->errorType == TCL_PARSE_SUCCESS) {
parsePtr->errorType = TCL_PARSE_SYNTAX;
}
/*
* Free any partial parse tree we've built.
*/
if (nodes != NULL) {
Tcl_Free(nodes);
}
if (interp == NULL) {
/*
* Nowhere to report an error message, so just free it.
*/
if (msg) {
Tcl_DecrRefCount(msg);
}
} else {
/*
* Construct the complete error message. Start with the simple error
* message, pulled from the interp result if necessary...
*/
if (msg == NULL) {
msg = Tcl_GetObjResult(interp);
}
/*
* Add a detailed quote from the bad expression, displaying and
* sometimes marking the precise location of the syntax error.
*/
Tcl_AppendPrintfToObj(msg, "\nin expression \"%s%.*s%.*s%s%s%.*s%s\"",
((start - limit) < parsePtr->string) ? "" : "...",
((start - limit) < parsePtr->string)
? (int) (start - parsePtr->string) : (int)limit - 3,
((start - limit) < parsePtr->string)
? parsePtr->string : start - limit + 3,
(scanned < limit) ? (int)scanned : (int)limit - 3, start,
(scanned < limit) ? "" : "...", insertMark ? mark : "",
(start + scanned + limit > parsePtr->end)
? (int) (parsePtr->end - start) - (int)scanned : (int)limit-3,
start + scanned,
(start + scanned + limit > parsePtr->end) ? "" : "...");
/*
* Next, append any postscript message.
*/
if (post != NULL) {
Tcl_AppendToObj(msg, ";\n", -1);
Tcl_AppendObjToObj(msg, post);
Tcl_DecrRefCount(post);
}
Tcl_SetObjResult(interp, msg);
/*
* Finally, place context information in the errorInfo.
*/
numBytes = parsePtr->end - parsePtr->string;
Tcl_AppendObjToErrorInfo(interp, Tcl_ObjPrintf(
"\n (parsing expression \"%.*s%s\")",
(numBytes < limit) ? (int)numBytes : (int)limit - 3,
parsePtr->string, (numBytes < limit) ? "" : "..."));
if (errCode) {
Tcl_SetErrorCode(interp, "TCL", "PARSE", "EXPR", errCode,
subErrCode, NULL);
}
}
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* ConvertTreeToTokens --
*
* Given a string, the numBytes bytes starting at start, and an OpNode
* tree and Tcl_Token array created by passing that same string to
* ParseExpr(), this function writes into *parsePtr the sequence of
* Tcl_Tokens needed so to satisfy the historical interface provided by
* Tcl_ParseExpr(). Note that this routine exists only for the sake of
* the public Tcl_ParseExpr() routine. It is not used by Tcl itself at
* all.
*
* Results:
* None.
*
* Side effects:
* The Tcl_Parse *parsePtr is filled with Tcl_Tokens representing the
* parsed expression.
*
*----------------------------------------------------------------------
*/
static void
ConvertTreeToTokens(
const char *start,
size_t numBytes,
OpNode *nodes,
Tcl_Token *tokenPtr,
Tcl_Parse *parsePtr)
{
int subExprTokenIdx = 0;
OpNode *nodePtr = nodes;
int next = nodePtr->right;
while (1) {
Tcl_Token *subExprTokenPtr;
int scanned, parentIdx;
unsigned char lexeme;
/*
* Advance the mark so the next exit from this node won't retrace
* steps over ground already covered.
*/
nodePtr->mark++;
/*
* Handle next child node or leaf.
*/
switch (next) {
case OT_EMPTY:
/* No tokens and no characters for the OT_EMPTY leaf. */
break;
case OT_LITERAL:
/*
* Skip any white space that comes before the literal.
*/
scanned = TclParseAllWhiteSpace(start, numBytes);
start += scanned;
numBytes -= scanned;
/*
* Reparse the literal to get pointers into source string.
*/
scanned = ParseLexeme(start, numBytes, &lexeme, NULL);
TclGrowParseTokenArray(parsePtr, 2);
subExprTokenPtr = parsePtr->tokenPtr + parsePtr->numTokens;
subExprTokenPtr->type = TCL_TOKEN_SUB_EXPR;
subExprTokenPtr->start = start;
subExprTokenPtr->size = scanned;
subExprTokenPtr->numComponents = 1;
subExprTokenPtr[1].type = TCL_TOKEN_TEXT;
subExprTokenPtr[1].start = start;
subExprTokenPtr[1].size = scanned;
subExprTokenPtr[1].numComponents = 0;
parsePtr->numTokens += 2;
start += scanned;
numBytes -= scanned;
break;
case OT_TOKENS: {
/*
* tokenPtr points to a token sequence that came from parsing a
* Tcl word. A Tcl word is made up of a sequence of one or more
* elements. When the word is only a single element, it's been the
* historical practice to replace the TCL_TOKEN_WORD token
* directly with a TCL_TOKEN_SUB_EXPR token. However, when the
* word has multiple elements, a TCL_TOKEN_WORD token is kept as a
* grouping device so that TCL_TOKEN_SUB_EXPR always has only one
* element. Wise or not, these are the rules the Tcl expr parser
* has followed, and for the sake of those few callers of
* Tcl_ParseExpr() we do not change them now. Internally, we can
* do better.
*/
int toCopy = tokenPtr->numComponents + 1;
if (tokenPtr->numComponents == tokenPtr[1].numComponents + 1) {
/*
* Single element word. Copy tokens and convert the leading
* token to TCL_TOKEN_SUB_EXPR.
*/
TclGrowParseTokenArray(parsePtr, toCopy);
subExprTokenPtr = parsePtr->tokenPtr + parsePtr->numTokens;
memcpy(subExprTokenPtr, tokenPtr,
toCopy * sizeof(Tcl_Token));
subExprTokenPtr->type = TCL_TOKEN_SUB_EXPR;
parsePtr->numTokens += toCopy;
} else {
/*
* Multiple element word. Create a TCL_TOKEN_SUB_EXPR token to
* lead, with fields initialized from the leading token, then
* copy entire set of word tokens.
*/
TclGrowParseTokenArray(parsePtr, toCopy+1);
subExprTokenPtr = parsePtr->tokenPtr + parsePtr->numTokens;
*subExprTokenPtr = *tokenPtr;
subExprTokenPtr->type = TCL_TOKEN_SUB_EXPR;
subExprTokenPtr->numComponents++;
subExprTokenPtr++;
memcpy(subExprTokenPtr, tokenPtr,
toCopy * sizeof(Tcl_Token));
parsePtr->numTokens += toCopy + 1;
}
scanned = tokenPtr->start + tokenPtr->size - start;
start += scanned;
numBytes -= scanned;
tokenPtr += toCopy;
break;
}
default:
/*
* Advance to the child node, which is an operator.
*/
nodePtr = nodes + next;
/*
* Skip any white space that comes before the subexpression.
*/
scanned = TclParseAllWhiteSpace(start, numBytes);
start += scanned;
numBytes -= scanned;
/*
* Generate tokens for the operator / subexpression...
*/
switch (nodePtr->lexeme) {
case OPEN_PAREN:
case COMMA:
case COLON:
/*
* Historical practice has been to have no Tcl_Tokens for
* these operators.
*/
break;
default: {
/*
* Remember the index of the last subexpression we were
* working on -- that of our parent. We'll stack it later.
*/
parentIdx = subExprTokenIdx;
/*
* Verify space for the two leading Tcl_Tokens representing
* the subexpression rooted by this operator. The first
* Tcl_Token will be of type TCL_TOKEN_SUB_EXPR; the second of
* type TCL_TOKEN_OPERATOR.
*/
TclGrowParseTokenArray(parsePtr, 2);
subExprTokenIdx = parsePtr->numTokens;
subExprTokenPtr = parsePtr->tokenPtr + subExprTokenIdx;
parsePtr->numTokens += 2;
subExprTokenPtr->type = TCL_TOKEN_SUB_EXPR;
subExprTokenPtr[1].type = TCL_TOKEN_OPERATOR;
/*
* Our current position scanning the string is the starting
* point for this subexpression.
*/
subExprTokenPtr->start = start;
/*
* Eventually, we know that the numComponents field of the
* Tcl_Token of type TCL_TOKEN_OPERATOR will be 0. This means
* we can make other use of this field for now to track the
* stack of subexpressions we have pending.
*/
subExprTokenPtr[1].numComponents = parentIdx;
break;
}
}
break;
}
/* Determine which way to exit the node on this pass. */
router:
switch (nodePtr->mark) {
case MARK_LEFT:
next = nodePtr->left;
break;
case MARK_RIGHT:
next = nodePtr->right;
/*
* Skip any white space that comes before the operator.
*/
scanned = TclParseAllWhiteSpace(start, numBytes);
start += scanned;
numBytes -= scanned;
/*
* Here we scan from the string the operator corresponding to
* nodePtr->lexeme.
*/
scanned = ParseLexeme(start, numBytes, &lexeme, NULL);
switch(nodePtr->lexeme) {
case OPEN_PAREN:
case COMMA:
case COLON:
/*
* No tokens for these lexemes -> nothing to do.
*/
break;
default:
/*
* Record in the TCL_TOKEN_OPERATOR token the pointers into
* the string marking where the operator is.
*/
subExprTokenPtr = parsePtr->tokenPtr + subExprTokenIdx;
subExprTokenPtr[1].start = start;
subExprTokenPtr[1].size = scanned;
break;
}
start += scanned;
numBytes -= scanned;
break;
case MARK_PARENT:
switch (nodePtr->lexeme) {
case START:
/* When we get back to the START node, we're done. */
return;
case COMMA:
case COLON:
/* No tokens for these lexemes -> nothing to do. */
break;
case OPEN_PAREN:
/*
* Skip past matching close paren.
*/
scanned = TclParseAllWhiteSpace(start, numBytes);
start += scanned;
numBytes -= scanned;
scanned = ParseLexeme(start, numBytes, &lexeme, NULL);
start += scanned;
numBytes -= scanned;
break;
default:
/*
* Before we leave this node/operator/subexpression for the
* last time, finish up its tokens....
*
* Our current position scanning the string is where the
* substring for the subexpression ends.
*/
subExprTokenPtr = parsePtr->tokenPtr + subExprTokenIdx;
subExprTokenPtr->size = start - subExprTokenPtr->start;
/*
* All the Tcl_Tokens allocated and filled belong to
* this subexpression. The first token is the leading
* TCL_TOKEN_SUB_EXPR token, and all the rest (one fewer)
* are its components.
*/
subExprTokenPtr->numComponents =
(parsePtr->numTokens - subExprTokenIdx) - 1;
/*
* Finally, as we return up the tree to our parent, pop the
* parent subexpression off our subexpression stack, and
* fill in the zero numComponents for the operator Tcl_Token.
*/
parentIdx = subExprTokenPtr[1].numComponents;
subExprTokenPtr[1].numComponents = 0;
subExprTokenIdx = parentIdx;
break;
}
/*
* Since we're returning to parent, skip child handling code.
*/
nodePtr = nodes + nodePtr->p.parent;
goto router;
}
}
}
/*
*----------------------------------------------------------------------
*
* Tcl_ParseExpr --
*
* Given a string, the numBytes bytes starting at start, this function
* parses it as a Tcl expression and stores information about the
* structure of the expression in the Tcl_Parse struct indicated by the
* caller.
*
* Results:
* If the string is successfully parsed as a valid Tcl expression, TCL_OK
* is returned, and data about the expression structure is written to
* *parsePtr. If the string cannot be parsed as a valid Tcl expression,
* TCL_ERROR is returned, and if interp is non-NULL, an error message is
* written to interp.
*
* Side effects:
* If there is insufficient space in parsePtr to hold all the information
* about the expression, then additional space is malloc-ed. If the
* function returns TCL_OK then the caller must eventually invoke
* Tcl_FreeParse to release any additional space that was allocated.
*
*----------------------------------------------------------------------
*/
int
Tcl_ParseExpr(
Tcl_Interp *interp, /* Used for error reporting. */
const char *start, /* Start of source string to parse. */
size_t numBytes, /* Number of bytes in string. If -1, the
* string consists of all bytes up to the
* first null character. */
Tcl_Parse *parsePtr) /* Structure to fill with information about
* the parsed expression; any previous
* information in the structure is ignored. */
{
int code;
OpNode *opTree = NULL; /* Will point to the tree of operators. */
Tcl_Obj *litList; /* List to hold the literals. */
Tcl_Obj *funcList; /* List to hold the functon names. */
Tcl_Parse *exprParsePtr = (Tcl_Parse *)TclStackAlloc(interp, sizeof(Tcl_Parse));
/* Holds the Tcl_Tokens of substitutions. */
TclNewObj(litList);
TclNewObj(funcList);
if (numBytes == TCL_INDEX_NONE) {
numBytes = (start ? strlen(start) : 0);
}
code = ParseExpr(interp, start, numBytes, &opTree, litList, funcList,
exprParsePtr, 1 /* parseOnly */);
Tcl_DecrRefCount(funcList);
Tcl_DecrRefCount(litList);
TclParseInit(interp, start, numBytes, parsePtr);
if (code == TCL_OK) {
ConvertTreeToTokens(start, numBytes,
opTree, exprParsePtr->tokenPtr, parsePtr);
} else {
parsePtr->term = exprParsePtr->term;
parsePtr->errorType = exprParsePtr->errorType;
}
Tcl_FreeParse(exprParsePtr);
TclStackFree(interp, exprParsePtr);
Tcl_Free(opTree);
return code;
}
/*
*----------------------------------------------------------------------
*
* ParseLexeme --
*
* Parse a single lexeme from the start of a string, scanning no more
* than numBytes bytes.
*
* Results:
* Returns the number of bytes scanned to produce the lexeme.
*
* Side effects:
* Code identifying lexeme parsed is writen to *lexemePtr.
*
*----------------------------------------------------------------------
*/
static size_t
ParseLexeme(
const char *start, /* Start of lexeme to parse. */
size_t numBytes, /* Number of bytes in string. */
unsigned char *lexemePtr, /* Write code of parsed lexeme to this
* storage. */
Tcl_Obj **literalPtr) /* Write corresponding literal value to this
storage, if non-NULL. */
{
const char *end;
Tcl_UniChar ch = 0;
Tcl_Obj *literal = NULL;
unsigned char byte;
if (numBytes == 0) {
*lexemePtr = END;
return 0;
}
byte = UCHAR(*start);
if (byte < sizeof(Lexeme) && Lexeme[byte] != 0) {
*lexemePtr = Lexeme[byte];
return 1;
}
switch (byte) {
case '*':
if ((numBytes > 1) && (start[1] == '*')) {
*lexemePtr = EXPON;
return 2;
}
*lexemePtr = MULT;
return 1;
case '=':
if ((numBytes > 1) && (start[1] == '=')) {
*lexemePtr = EQUAL;
return 2;
}
*lexemePtr = INCOMPLETE;
return 1;
case '!':
if ((numBytes > 1) && (start[1] == '=')) {
*lexemePtr = NEQ;
return 2;
}
*lexemePtr = NOT;
return 1;
case '&':
if ((numBytes > 1) && (start[1] == '&')) {
*lexemePtr = AND;
return 2;
}
*lexemePtr = BIT_AND;
return 1;
case '|':
if ((numBytes > 1) && (start[1] == '|')) {
*lexemePtr = OR;
return 2;
}
*lexemePtr = BIT_OR;
return 1;
case '<':
if (numBytes > 1) {
switch (start[1]) {
case '<':
*lexemePtr = LEFT_SHIFT;
return 2;
case '=':
*lexemePtr = LEQ;
return 2;
}
}
*lexemePtr = LESS;
return 1;
case '>':
if (numBytes > 1) {
switch (start[1]) {
case '>':
*lexemePtr = RIGHT_SHIFT;
return 2;
case '=':
*lexemePtr = GEQ;
return 2;
}
}
*lexemePtr = GREATER;
return 1;
case 'i':
if ((numBytes > 1) && (start[1] == 'n')
&& ((numBytes == 2) || start[2] & 0x80 || !isalpha(UCHAR(start[2])))) {
/*
* Must make this check so we can tell the difference between the
* "in" operator and the "int" function name and the "infinity"
* numeric value.
*/
*lexemePtr = IN_LIST;
return 2;
}
break;
case 'e':
if ((numBytes > 1) && (start[1] == 'q')
&& ((numBytes == 2) || start[2] & 0x80 || !isalpha(UCHAR(start[2])))) {
*lexemePtr = STREQ;
return 2;
}
break;
case 'n':
if ((numBytes > 1)
&& ((numBytes == 2) || start[2] & 0x80 || !isalpha(UCHAR(start[2])))) {
switch (start[1]) {
case 'e':
*lexemePtr = STRNEQ;
return 2;
case 'i':
*lexemePtr = NOT_IN_LIST;
return 2;
}
}
break;
case 'l':
if ((numBytes > 1)
&& ((numBytes == 2) || start[2] & 0x80 || !isalpha(UCHAR(start[2])))) {
switch (start[1]) {
case 't':
*lexemePtr = STR_LT;
return 2;
case 'e':
*lexemePtr = STR_LEQ;
return 2;
}
}
break;
case 'g':
if ((numBytes > 1)
&& ((numBytes == 2) || start[2] & 0x80 || !isalpha(UCHAR(start[2])))) {
switch (start[1]) {
case 't':
*lexemePtr = STR_GT;
return 2;
case 'e':
*lexemePtr = STR_GEQ;
return 2;
}
}
break;
}
TclNewObj(literal);
if (TclParseNumber(NULL, literal, NULL, start, numBytes, &end,
TCL_PARSE_NO_WHITESPACE) == TCL_OK) {
if (end < start + numBytes && !TclIsBareword(*end)) {
number:
TclInitStringRep(literal, start, end-start);
*lexemePtr = NUMBER;
if (literalPtr) {
*literalPtr = literal;
} else {
Tcl_DecrRefCount(literal);
}
return (end-start);
} else {
unsigned char lexeme;
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> upstream/master
/*
* We have a number followed directly by bareword characters
* (alpha, digit, underscore). Is this a number followed by
* bareword syntax error? Or should we join into one bareword?
* Example: Inf + luence + () becomes a valid function call.
* [Bug 3401704]
*/
<<<<<<< HEAD
if (literal->typePtr == &tclDoubleType) {
=======
if (TclHasIntRep(literal, &tclDoubleType)) {
>>>>>>> upstream/master
const char *p = start;
while (p < end) {
if (!TclIsBareword(*p++)) {
/*
* The number has non-bareword characters, so we
* must treat it as a number.
*/
goto number;
}
}
}
ParseLexeme(end, numBytes-(end-start), &lexeme, NULL);
if ((NODE_TYPE & lexeme) == BINARY) {
/*
* The bareword characters following the number take the
* form of an operator (eq, ne, in, ni, ...) so we treat
* as number + operator.
*/
goto number;
}
/*
* Otherwise, fall through and parse the whole as a bareword.
*/
}
}
/*
* We reject leading underscores in bareword. No sensible reason why.
* Might be inspired by reserved identifier rules in C, which of course
* have no direct relevance here.
*/
if (!TclIsBareword(*start) || *start == '_') {
<<<<<<< HEAD
if (Tcl_UtfCharComplete(start, numBytes)) {
scanned = Tcl_UtfToUniChar(start, &ch);
=======
/*
* We have a number followed directly by bareword characters
* (alpha, digit, underscore). Is this a number followed by
* bareword syntax error? Or should we join into one bareword?
* Example: Inf + luence + () becomes a valid function call.
* [Bug 3401704]
*/
if (literal->typePtr == &tclDoubleType) {
const char *p = start;
while (p < end) {
if (!TclIsBareword(*p++)) {
/*
* The number has non-bareword characters, so we
* must treat it as a number.
*/
goto number;
}
}
}
ParseLexeme(end, numBytes-(end-start), &lexeme, NULL);
if ((NODE_TYPE & lexeme) == BINARY) {
/*
* The bareword characters following the number take the
* form of an operator (eq, ne, in, ni, ...) so we treat
* as number + operator.
*/
goto number;
}
/*
* Otherwise, fall through and parse the whole as a bareword.
*/
}
}
/*
* We reject leading underscores in bareword. No sensible reason why.
* Might be inspired by reserved identifier rules in C, which of course
* have no direct relevance here.
*/
if (!TclIsBareword(*start) || *start == '_') {
size_t scanned;
if (Tcl_UtfCharComplete(start, numBytes)) {
scanned = TclUtfToUniChar(start, &ch);
>>>>>>> upstream/master
} else {
char utfBytes[TCL_UTF_MAX];
memcpy(utfBytes, start, numBytes);
utfBytes[numBytes] = '\0';
scanned = TclUtfToUniChar(utfBytes, &ch);
}
*lexemePtr = INVALID;
Tcl_DecrRefCount(literal);
return scanned;
}
=======
size_t scanned;
if (Tcl_UtfCharComplete(start, numBytes)) {
scanned = TclUtfToUniChar(start, &ch);
} else {
char utfBytes[4];
memcpy(utfBytes, start, numBytes);
utfBytes[numBytes] = '\0';
scanned = TclUtfToUniChar(utfBytes, &ch);
}
*lexemePtr = INVALID;
Tcl_DecrRefCount(literal);
return scanned;
}
>>>>>>> upstream/master
end = start;
while (numBytes && TclIsBareword(*end)) {
end += 1;
numBytes -= 1;
}
*lexemePtr = BAREWORD;
if (literalPtr) {
Tcl_SetStringObj(literal, start, (int) (end-start));
*literalPtr = literal;
} else {
Tcl_DecrRefCount(literal);
}
return (end-start);
}
/*
*----------------------------------------------------------------------
*
* TclCompileExpr --
*
* This procedure compiles a string containing a Tcl expression into Tcl
* bytecodes.
*
* Results:
* None.
*
* Side effects:
* Adds instructions to envPtr to evaluate the expression at runtime.
*
*----------------------------------------------------------------------
*/
void
TclCompileExpr(
Tcl_Interp *interp, /* Used for error reporting. */
const char *script, /* The source script to compile. */
size_t numBytes, /* Number of bytes in script. */
CompileEnv *envPtr, /* Holds resulting instructions. */
int optimize) /* 0 for one-off expressions. */
{
OpNode *opTree = NULL; /* Will point to the tree of operators */
Tcl_Obj *litList; /* List to hold the literals */
Tcl_Obj *funcList; /* List to hold the functon names*/
Tcl_Parse *parsePtr = (Tcl_Parse *)TclStackAlloc(interp, sizeof(Tcl_Parse));
/* Holds the Tcl_Tokens of substitutions */
int code;
TclNewObj(litList);
TclNewObj(funcList);
code = ParseExpr(interp, script, numBytes, &opTree, litList,
funcList, parsePtr, 0 /* parseOnly */);
if (code == TCL_OK) {
/*
* Valid parse; compile the tree.
*/
int objc;
Tcl_Obj *const *litObjv;
Tcl_Obj **funcObjv;
/* TIP #280 : Track Lines within the expression */
TclAdvanceLines(&envPtr->line, script,
script + TclParseAllWhiteSpace(script, numBytes));
TclListObjGetElements(NULL, litList, &objc, (Tcl_Obj ***)&litObjv);
TclListObjGetElements(NULL, funcList, &objc, &funcObjv);
CompileExprTree(interp, opTree, 0, &litObjv, funcObjv,
parsePtr->tokenPtr, envPtr, optimize);
} else {
TclCompileSyntaxError(interp, envPtr);
}
Tcl_FreeParse(parsePtr);
TclStackFree(interp, parsePtr);
Tcl_DecrRefCount(funcList);
Tcl_DecrRefCount(litList);
Tcl_Free(opTree);
}
/*
*----------------------------------------------------------------------
*
* ExecConstantExprTree --
* Compiles and executes bytecode for the subexpression tree at index
* in the nodes array. This subexpression must be constant, made up
* of only constant operators (not functions) and literals.
*
* Results:
* A standard Tcl return code and result left in interp.
*
* Side effects:
* Consumes subtree of nodes rooted at index. Advances the pointer
* *litObjvPtr.
*
*----------------------------------------------------------------------
*/
static int
ExecConstantExprTree(
Tcl_Interp *interp,
OpNode *nodes,
int index,
Tcl_Obj *const **litObjvPtr)
{
CompileEnv *envPtr;
ByteCode *byteCodePtr;
int code;
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
Tcl_Obj *byteCodeObj = Tcl_NewObj();
=======
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
NRE_callback *rootPtr = TOP_CB(interp);
/*
* Note we are compiling an expression with literal arguments. This means
* there can be no [info frame] calls when we execute the resulting
* bytecode, so there's no need to tend to TIP 280 issues.
*/
envPtr = (CompileEnv *)TclStackAlloc(interp, sizeof(CompileEnv));
TclInitCompileEnv(interp, envPtr, NULL, 0, NULL, 0);
CompileExprTree(interp, nodes, index, litObjvPtr, NULL, NULL, envPtr,
0 /* optimize */);
TclEmitOpcode(INST_DONE, envPtr);
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
Tcl_IncrRefCount(byteCodeObj);
TclInitByteCodeObj(byteCodeObj, envPtr);
TclFreeCompileEnv(envPtr);
TclStackFree(interp, envPtr);
byteCodePtr = byteCodeObj->internalRep.twoPtrValue.ptr1;
TclNRExecuteByteCode(interp, byteCodePtr);
code = TclNRRunCallbacks(interp, TCL_OK, rootPtr);
Tcl_DecrRefCount(byteCodeObj);
=======
=======
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
byteCodePtr = TclInitByteCode(envPtr);
TclFreeCompileEnv(envPtr);
TclStackFree(interp, envPtr);
TclNRExecuteByteCode(interp, byteCodePtr);
code = TclNRRunCallbacks(interp, TCL_OK, rootPtr);
TclReleaseByteCode(byteCodePtr);
<<<<<<< HEAD
<<<<<<< HEAD
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
=======
>>>>>>> upstream/master
=======
byteCodePtr = TclInitByteCode(envPtr);
TclFreeCompileEnv(envPtr);
TclStackFree(interp, envPtr);
TclNRExecuteByteCode(interp, byteCodePtr);
code = TclNRRunCallbacks(interp, TCL_OK, rootPtr);
TclReleaseByteCode(byteCodePtr);
>>>>>>> upstream/master
=======
byteCodePtr = TclInitByteCode(envPtr);
TclFreeCompileEnv(envPtr);
TclStackFree(interp, envPtr);
TclNRExecuteByteCode(interp, byteCodePtr);
code = TclNRRunCallbacks(interp, TCL_OK, rootPtr);
TclReleaseByteCode(byteCodePtr);
>>>>>>> upstream/master
return code;
}
/*
*----------------------------------------------------------------------
*
* CompileExprTree --
*
* Compiles and writes to envPtr instructions for the subexpression tree
* at index in the nodes array. (*litObjvPtr) must point to the proper
* location in a corresponding literals list. Likewise, when non-NULL,
* funcObjv and tokenPtr must point into matching arrays of function
* names and Tcl_Token's derived from earlier call to ParseExpr(). When
* optimize is true, any constant subexpressions will be precomputed.
*
* Results:
* None.
*
* Side effects:
* Adds instructions to envPtr to evaluate the expression at runtime.
* Consumes subtree of nodes rooted at index. Advances the pointer
* *litObjvPtr.
*
*----------------------------------------------------------------------
*/
static void
CompileExprTree(
Tcl_Interp *interp,
OpNode *nodes,
int index,
Tcl_Obj *const **litObjvPtr,
Tcl_Obj *const *funcObjv,
Tcl_Token *tokenPtr,
CompileEnv *envPtr,
int optimize)
{
OpNode *nodePtr = nodes + index;
OpNode *rootPtr = nodePtr;
int numWords = 0;
JumpList *jumpPtr = NULL;
int convert = 1;
while (1) {
int next;
JumpList *freePtr, *newJump;
if (nodePtr->mark == MARK_LEFT) {
next = nodePtr->left;
if (nodePtr->lexeme == QUESTION) {
convert = 1;
}
} else if (nodePtr->mark == MARK_RIGHT) {
next = nodePtr->right;
switch (nodePtr->lexeme) {
case FUNCTION: {
Tcl_DString cmdName;
const char *p;
size_t length;
Tcl_DStringInit(&cmdName);
TclDStringAppendLiteral(&cmdName, "tcl::mathfunc::");
p = TclGetStringFromObj(*funcObjv, &length);
funcObjv++;
Tcl_DStringAppend(&cmdName, p, length);
TclEmitPush(TclRegisterLiteral(envPtr,
Tcl_DStringValue(&cmdName),
Tcl_DStringLength(&cmdName), LITERAL_CMD_NAME), envPtr);
Tcl_DStringFree(&cmdName);
/*
* Start a count of the number of words in this function
* command invocation. In case there's already a count in
* progress (nested functions), save it in our unused "left"
* field for restoring later.
*/
nodePtr->left = numWords;
numWords = 2; /* Command plus one argument */
break;
}
case QUESTION:
newJump = (JumpList *)TclStackAlloc(interp, sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
TclEmitForwardJump(envPtr, TCL_FALSE_JUMP, &jumpPtr->jump);
break;
case COLON:
newJump = (JumpList *)TclStackAlloc(interp, sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
TclEmitForwardJump(envPtr, TCL_UNCONDITIONAL_JUMP,
&jumpPtr->jump);
TclAdjustStackDepth(-1, envPtr);
if (convert) {
jumpPtr->jump.jumpType = TCL_TRUE_JUMP;
}
convert = 1;
break;
case AND:
case OR:
newJump = (JumpList *)TclStackAlloc(interp, sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
TclEmitForwardJump(envPtr, (nodePtr->lexeme == AND)
? TCL_FALSE_JUMP : TCL_TRUE_JUMP, &jumpPtr->jump);
break;
}
} else {
int pc1, pc2, target;
switch (nodePtr->lexeme) {
case START:
case QUESTION:
if (convert && (nodePtr == rootPtr)) {
TclEmitOpcode(INST_TRY_CVT_TO_NUMERIC, envPtr);
}
break;
case OPEN_PAREN:
/* do nothing */
break;
case FUNCTION:
/*
* Use the numWords count we've kept to invoke the function
* command with the correct number of arguments.
*/
if (numWords < 255) {
TclEmitInvoke(envPtr, INST_INVOKE_STK1, numWords);
} else {
TclEmitInvoke(envPtr, INST_INVOKE_STK4, numWords);
}
/*
* Restore any saved numWords value.
*/
numWords = nodePtr->left;
convert = 1;
break;
case COMMA:
/*
* Each comma implies another function argument.
*/
numWords++;
break;
case COLON:
CLANG_ASSERT(jumpPtr);
if (jumpPtr->jump.jumpType == TCL_TRUE_JUMP) {
jumpPtr->jump.jumpType = TCL_UNCONDITIONAL_JUMP;
convert = 1;
}
target = jumpPtr->jump.codeOffset + 2;
if (TclFixupForwardJumpToHere(envPtr, &jumpPtr->jump, 127)) {
target += 3;
}
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
TclStackFree(interp, freePtr);
TclFixupForwardJump(envPtr, &jumpPtr->jump,
target - jumpPtr->jump.codeOffset, 127);
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
TclStackFree(interp, freePtr);
break;
case AND:
case OR:
CLANG_ASSERT(jumpPtr);
pc1 = CurrentOffset(envPtr);
TclEmitInstInt1((nodePtr->lexeme == AND) ? INST_JUMP_FALSE1
: INST_JUMP_TRUE1, 0, envPtr);
TclEmitPush(TclRegisterLiteral(envPtr,
(nodePtr->lexeme == AND) ? "1" : "0", 1, 0), envPtr);
pc2 = CurrentOffset(envPtr);
TclEmitInstInt1(INST_JUMP1, 0, envPtr);
TclAdjustStackDepth(-1, envPtr);
TclStoreInt1AtPtr(CurrentOffset(envPtr) - pc1,
envPtr->codeStart + pc1 + 1);
if (TclFixupForwardJumpToHere(envPtr, &jumpPtr->jump, 127)) {
pc2 += 3;
}
TclEmitPush(TclRegisterLiteral(envPtr,
(nodePtr->lexeme == AND) ? "0" : "1", 1, 0), envPtr);
TclStoreInt1AtPtr(CurrentOffset(envPtr) - pc2,
envPtr->codeStart + pc2 + 1);
convert = 0;
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
TclStackFree(interp, freePtr);
break;
default:
TclEmitOpcode(instruction[nodePtr->lexeme], envPtr);
convert = 0;
break;
}
if (nodePtr == rootPtr) {
/* We're done */
return;
}
nodePtr = nodes + nodePtr->p.parent;
continue;
}
nodePtr->mark++;
switch (next) {
case OT_EMPTY:
numWords = 1; /* No arguments, so just the command */
break;
case OT_LITERAL: {
Tcl_Obj *const *litObjv = *litObjvPtr;
Tcl_Obj *literal = *litObjv;
if (optimize) {
<<<<<<< HEAD
int length;
=======
size_t length;
>>>>>>> upstream/master
const char *bytes = TclGetStringFromObj(literal, &length);
int idx = TclRegisterLiteral(envPtr, bytes, length, 0);
Tcl_Obj *objPtr = TclFetchLiteral(envPtr, idx);
if ((objPtr->typePtr == NULL) && (literal->typePtr != NULL)) {
/*
* Would like to do this:
*
* lePtr->objPtr = literal;
* Tcl_IncrRefCount(literal);
* Tcl_DecrRefCount(objPtr);
*
* However, the design of the "global" and "local"
* LiteralTable does not permit the value of lePtr->objPtr
* to change. So rather than replace lePtr->objPtr, we do
* surgery to transfer our desired intrep into it.
*/
objPtr->typePtr = literal->typePtr;
objPtr->internalRep = literal->internalRep;
literal->typePtr = NULL;
}
TclEmitPush(idx, envPtr);
} else {
/*
* When optimize==0, we know the expression is a one-off and
* there's nothing to be gained from sharing literals when
* they won't live long, and the copies we have already have
* an appropriate intrep. In this case, skip literal
* registration that would enable sharing, and use the routine
* that preserves intreps.
*/
TclEmitPush(TclAddLiteralObj(envPtr, literal, NULL), envPtr);
}
(*litObjvPtr)++;
break;
}
case OT_TOKENS:
CompileTokens(envPtr, tokenPtr, interp);
tokenPtr += tokenPtr->numComponents + 1;
break;
default:
if (optimize && nodes[next].constant) {
Tcl_InterpState save = Tcl_SaveInterpState(interp, TCL_OK);
if (ExecConstantExprTree(interp, nodes, next, litObjvPtr)
== TCL_OK) {
int idx;
Tcl_Obj *objPtr = Tcl_GetObjResult(interp);
/*
* Don't generate a string rep, but if we have one
* already, then use it to share via the literal table.
*/
if (TclHasStringRep(objPtr)) {
Tcl_Obj *tableValue;
<<<<<<< HEAD
int numBytes;
const char *bytes
= Tcl_GetStringFromObj(objPtr, &numBytes);
=======
size_t numBytes;
const char *bytes
= TclGetStringFromObj(objPtr, &numBytes);
>>>>>>> upstream/master
idx = TclRegisterLiteral(envPtr, bytes, numBytes, 0);
tableValue = TclFetchLiteral(envPtr, idx);
if ((tableValue->typePtr == NULL) &&
(objPtr->typePtr != NULL)) {
/*
* Same intrep surgery as for OT_LITERAL.
*/
tableValue->typePtr = objPtr->typePtr;
tableValue->internalRep = objPtr->internalRep;
objPtr->typePtr = NULL;
}
} else {
idx = TclAddLiteralObj(envPtr, objPtr, NULL);
}
TclEmitPush(idx, envPtr);
} else {
TclCompileSyntaxError(interp, envPtr);
}
Tcl_RestoreInterpState(interp, save);
convert = 0;
} else {
nodePtr = nodes + next;
}
}
}
}
/*
*----------------------------------------------------------------------
*
* TclSingleOpCmd --
*
* Implements the commands: ~, !, <<, >>, %, !=, ne, in, ni
* in the ::tcl::mathop namespace. These commands have no
* extension to arbitrary arguments; they accept only exactly one
* or exactly two arguments as suitable for the operator.
*
* Results:
* A standard Tcl return code and result left in interp.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
TclSingleOpCmd(
ClientData clientData,
Tcl_Interp *interp,
int objc,
Tcl_Obj *const objv[])
{
TclOpCmdClientData *occdPtr = (TclOpCmdClientData *)clientData;
unsigned char lexeme;
OpNode nodes[2];
Tcl_Obj *const *litObjv = objv + 1;
if (objc != 1 + occdPtr->i.numArgs) {
Tcl_WrongNumArgs(interp, 1, objv, occdPtr->expected);
return TCL_ERROR;
}
ParseLexeme(occdPtr->op, strlen(occdPtr->op), &lexeme, NULL);
nodes[0].lexeme = START;
nodes[0].mark = MARK_RIGHT;
nodes[0].right = 1;
nodes[1].lexeme = lexeme;
if (objc == 2) {
nodes[1].mark = MARK_RIGHT;
} else {
nodes[1].mark = MARK_LEFT;
nodes[1].left = OT_LITERAL;
}
nodes[1].right = OT_LITERAL;
nodes[1].p.parent = 0;
return ExecConstantExprTree(interp, nodes, 0, &litObjv);
}
/*
*----------------------------------------------------------------------
*
* TclSortingOpCmd --
* Implements the commands:
<<<<<<< HEAD
* <, <=, >, >=, ==, eq
=======
* <, <=, >, >=, ==, eq, lt, le, gt, ge
>>>>>>> upstream/master
* in the ::tcl::mathop namespace. These commands are defined for
* arbitrary number of arguments by computing the AND of the base
* operator applied to all neighbor argument pairs.
*
* Results:
* A standard Tcl return code and result left in interp.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
TclSortingOpCmd(
ClientData clientData,
Tcl_Interp *interp,
int objc,
Tcl_Obj *const objv[])
{
int code = TCL_OK;
if (objc < 3) {
Tcl_SetObjResult(interp, Tcl_NewBooleanObj(1));
} else {
TclOpCmdClientData *occdPtr = (TclOpCmdClientData *)clientData;
Tcl_Obj **litObjv = (Tcl_Obj **)TclStackAlloc(interp,
2 * (objc-2) * sizeof(Tcl_Obj *));
OpNode *nodes = (OpNode *)TclStackAlloc(interp, 2 * (objc-2) * sizeof(OpNode));
unsigned char lexeme;
int i, lastAnd = 1;
Tcl_Obj *const *litObjPtrPtr = litObjv;
ParseLexeme(occdPtr->op, strlen(occdPtr->op), &lexeme, NULL);
litObjv[0] = objv[1];
nodes[0].lexeme = START;
nodes[0].mark = MARK_RIGHT;
for (i=2; i<objc-1; i++) {
litObjv[2*(i-1)-1] = objv[i];
nodes[2*(i-1)-1].lexeme = lexeme;
nodes[2*(i-1)-1].mark = MARK_LEFT;
nodes[2*(i-1)-1].left = OT_LITERAL;
nodes[2*(i-1)-1].right = OT_LITERAL;
litObjv[2*(i-1)] = objv[i];
nodes[2*(i-1)].lexeme = AND;
nodes[2*(i-1)].mark = MARK_LEFT;
nodes[2*(i-1)].left = lastAnd;
nodes[lastAnd].p.parent = 2*(i-1);
nodes[2*(i-1)].right = 2*(i-1)+1;
nodes[2*(i-1)+1].p.parent= 2*(i-1);
lastAnd = 2*(i-1);
}
litObjv[2*(objc-2)-1] = objv[objc-1];
nodes[2*(objc-2)-1].lexeme = lexeme;
nodes[2*(objc-2)-1].mark = MARK_LEFT;
nodes[2*(objc-2)-1].left = OT_LITERAL;
nodes[2*(objc-2)-1].right = OT_LITERAL;
nodes[0].right = lastAnd;
nodes[lastAnd].p.parent = 0;
code = ExecConstantExprTree(interp, nodes, 0, &litObjPtrPtr);
TclStackFree(interp, nodes);
TclStackFree(interp, litObjv);
}
return code;
}
/*
*----------------------------------------------------------------------
*
* TclVariadicOpCmd --
* Implements the commands: +, *, &, |, ^, **
* in the ::tcl::mathop namespace. These commands are defined for
* arbitrary number of arguments by repeatedly applying the base
* operator with suitable associative rules. When fewer than two
* arguments are provided, suitable identity values are returned.
*
* Results:
* A standard Tcl return code and result left in interp.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
TclVariadicOpCmd(
ClientData clientData,
Tcl_Interp *interp,
int objc,
Tcl_Obj *const objv[])
{
TclOpCmdClientData *occdPtr = (TclOpCmdClientData *)clientData;
unsigned char lexeme;
int code;
if (objc < 2) {
Tcl_SetObjResult(interp, Tcl_NewWideIntObj(occdPtr->i.identity));
return TCL_OK;
}
ParseLexeme(occdPtr->op, strlen(occdPtr->op), &lexeme, NULL);
lexeme |= BINARY;
if (objc == 2) {
Tcl_Obj *litObjv[2];
OpNode nodes[2];
int decrMe = 0;
Tcl_Obj *const *litObjPtrPtr = litObjv;
if (lexeme == EXPON) {
TclNewIntObj(litObjv[1], occdPtr->i.identity);
Tcl_IncrRefCount(litObjv[1]);
decrMe = 1;
litObjv[0] = objv[1];
nodes[0].lexeme = START;
nodes[0].mark = MARK_RIGHT;
nodes[0].right = 1;
nodes[1].lexeme = lexeme;
nodes[1].mark = MARK_LEFT;
nodes[1].left = OT_LITERAL;
nodes[1].right = OT_LITERAL;
nodes[1].p.parent = 0;
} else {
if (lexeme == DIVIDE) {
litObjv[0] = Tcl_NewDoubleObj(1.0);
} else {
TclNewIntObj(litObjv[0], occdPtr->i.identity);
}
Tcl_IncrRefCount(litObjv[0]);
litObjv[1] = objv[1];
nodes[0].lexeme = START;
nodes[0].mark = MARK_RIGHT;
nodes[0].right = 1;
nodes[1].lexeme = lexeme;
nodes[1].mark = MARK_LEFT;
nodes[1].left = OT_LITERAL;
nodes[1].right = OT_LITERAL;
nodes[1].p.parent = 0;
}
code = ExecConstantExprTree(interp, nodes, 0, &litObjPtrPtr);
Tcl_DecrRefCount(litObjv[decrMe]);
return code;
} else {
Tcl_Obj *const *litObjv = objv + 1;
OpNode *nodes = (OpNode *)TclStackAlloc(interp, (objc-1) * sizeof(OpNode));
int i, lastOp = OT_LITERAL;
nodes[0].lexeme = START;
nodes[0].mark = MARK_RIGHT;
if (lexeme == EXPON) {
for (i=objc-2; i>0; i--) {
nodes[i].lexeme = lexeme;
nodes[i].mark = MARK_LEFT;
nodes[i].left = OT_LITERAL;
nodes[i].right = lastOp;
if (lastOp >= 0) {
nodes[lastOp].p.parent = i;
}
lastOp = i;
}
} else {
for (i=1; i<objc-1; i++) {
nodes[i].lexeme = lexeme;
nodes[i].mark = MARK_LEFT;
nodes[i].left = lastOp;
if (lastOp >= 0) {
nodes[lastOp].p.parent = i;
}
nodes[i].right = OT_LITERAL;
lastOp = i;
}
}
nodes[0].right = lastOp;
nodes[lastOp].p.parent = 0;
code = ExecConstantExprTree(interp, nodes, 0, &litObjv);
TclStackFree(interp, nodes);
return code;
}
}
/*
*----------------------------------------------------------------------
*
* TclNoIdentOpCmd --
* Implements the commands: -, /
* in the ::tcl::mathop namespace. These commands are defined for
* arbitrary non-zero number of arguments by repeatedly applying the base
* operator with suitable associative rules. When no arguments are
* provided, an error is raised.
*
* Results:
* A standard Tcl return code and result left in interp.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
int
TclNoIdentOpCmd(
ClientData clientData,
Tcl_Interp *interp,
int objc,
Tcl_Obj *const objv[])
{
TclOpCmdClientData *occdPtr = (TclOpCmdClientData *)clientData;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 1, objv, occdPtr->expected);
return TCL_ERROR;
}
return TclVariadicOpCmd(clientData, interp, objc, objv);
}
/*
* Local Variables:
* mode: c
* c-basic-offset: 4
* fill-column: 78
* End:
*/
|
450277.c | /* origin: FreeBSD /usr/src/lib/msun/src/s_cosf.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
* Optimized by Bruce D. Evans.
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "libm.h"
/* Small multiples of pi/2 rounded to double precision. */
static const double c1pio2 = 1 * M_PI_2, /* 0x3FF921FB, 0x54442D18 */
c2pio2 = 2 * M_PI_2, /* 0x400921FB, 0x54442D18 */
c3pio2 = 3 * M_PI_2, /* 0x4012D97C, 0x7F3321D2 */
c4pio2 = 4 * M_PI_2; /* 0x401921FB, 0x54442D18 */
float cosf(float x)
{
double y;
uint32_t ix;
unsigned n, sign;
GET_FLOAT_WORD(ix, x);
sign = ix >> 31;
ix &= 0x7fffffff;
if (ix <= 0x3f490fda) { /* |x| ~<= pi/4 */
if (ix < 0x39800000) { /* |x| < 2**-12 */
/* raise inexact if x != 0 */
FORCE_EVAL(x + 0x1p120f);
return 1.0f;
}
return __cosdf(x);
}
if (ix <= 0x407b53d1) { /* |x| ~<= 5*pi/4 */
if (ix > 0x4016cbe3) /* |x| ~> 3*pi/4 */
return -__cosdf(sign ? x + c2pio2 : x - c2pio2);
else {
if (sign)
return __sindf(x + c1pio2);
else
return __sindf(c1pio2 - x);
}
}
if (ix <= 0x40e231d5) { /* |x| ~<= 9*pi/4 */
if (ix > 0x40afeddf) /* |x| ~> 7*pi/4 */
return __cosdf(sign ? x + c4pio2 : x - c4pio2);
else {
if (sign)
return __sindf(-x - c3pio2);
else
return __sindf(x - c3pio2);
}
}
/* cos(Inf or NaN) is NaN */
if (ix >= 0x7f800000)
return x - x;
/* general argument reduction needed */
n = __rem_pio2f(x, &y);
switch (n & 3) {
case 0:
return __cosdf(y);
case 1:
return __sindf(-y);
case 2:
return -__cosdf(y);
default:
return __sindf(y);
}
}
|
477531.c | #include <string.h>
#include "chardef.h"
#include "select.h"
/*
Author: Stefan Kurtz, [email protected], October 2002.
*/
/*
This module implements a selection function which selects the context
of a match. The size of the entire match is specified by the following
variable. It can be overwritten by a extra second argument to the option
\texttt{-selfun}.
*/
static Uint matchlength = UintConst(17);
/*
The selection function bundle.
*/
Sint selectmatchHeader(Argctype argc,const char * const*argv,
Argctype callargc,const char * const*callargv)
{
Uint i;
BOOL selfunfound = False;
for(i=1; i<(Uint) argc; i++)
{
if(strcmp(argv[i],"-selfun") == 0)
{
selfunfound = True;
break;
}
}
if(!selfunfound)
{
fprintf(stderr,"%s: in shared object compiled from file %s\n",argv[0],
__FILE__);
fprintf(stderr,"cannot find option -selfun\n");
exit(EXIT_FAILURE);
}
if(i+2 < (Uint) (argc-1) && argv[i+2][0] != '-') // does not point to index
{ // or option
Scaninteger readint;
if(sscanf(argv[i+2],"%ld",&readint) != 1 || readint < 1)
{
fprintf(stderr,"optional second argument to option -selfun "
"must be positive number\n");
exit(EXIT_FAILURE);
}
matchlength = (Uint) readint;
}
return 0;
}
Sint selectmatch(Alphabet *alpha,
Multiseq *virtualmultiseq,
Multiseq *querymultiseq,
StoreMatch *storematch)
{
Uint addone = 0;
BOOL reject = False;
Uchar *sptr, *start, *end;
if(storematch->Storelength1 > matchlength)
{
fprintf(stderr,"match is longer than the given match length. "
"Does not make sense to select a negative context.\n");
exit(EXIT_FAILURE);
}
if(storematch->Storeflag & FLAGPALINDROMIC)
{
if(matchlength > storematch->Storeposition1 + storematch->Storelength1)
{
return 0;
}
start = virtualmultiseq->sequence + storematch->Storeposition1 +
storematch->Storelength1 - matchlength;
end = start + matchlength - 1;
for(sptr = end; sptr>=start; sptr--)
{
if(ISSPECIAL(*sptr))
{
reject = True;
break;
}
}
if(!reject)
{
printf("%lu %lu - ",(Showuint) (storematch->Storeseqnum1+addone),
(Showuint) (storematch->Storerelpos1+
storematch->Storelength1-matchlength+addone));
for(sptr = end; sptr>=start; sptr--)
{
printf("%c",alpha->characters[3-*sptr]);
}
printf("\n");
}
} else
{
if(storematch->Storeposition1 + matchlength > virtualmultiseq->totallength)
{
return 0;
}
start = virtualmultiseq->sequence + storematch->Storeposition1;
end = start + matchlength - 1;
for(sptr = start; sptr<=end; sptr++)
{
if(ISSPECIAL(*sptr))
{
reject = True;
break;
}
}
if(!reject)
{
printf("%lu %lu + ",(Showuint) (storematch->Storeseqnum1+addone),
(Showuint) (storematch->Storerelpos1+addone));
for(sptr = start; sptr<=end; sptr++)
{
printf("%c",alpha->characters[*sptr]);
}
printf("\n");
}
}
return 0; /* reject */
}
|
903787.c | //*****************************************************************************
//
// fontcmss40b.c - Font definition for the 40pt Cmss bold font.
//
// Copyright (c) 2011-2020 Texas Instruments Incorporated. All rights reserved.
// Software License Agreement
//
// Texas Instruments (TI) is supplying this software for use solely and
// exclusively on TI's microcontroller products. The software is owned by
// TI and/or its suppliers, and is protected under applicable copyright
// laws. You may not combine this software with "viral" open-source
// software in order to form a larger program.
//
// THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS.
// NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT
// NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY
// CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
// DAMAGES, FOR ANY REASON WHATSOEVER.
//
// This is part of revision 2.2.0.295 of the Tiva Graphics Library.
//
//*****************************************************************************
//*****************************************************************************
//
// This file is generated by ftrasterize; DO NOT EDIT BY HAND!
//
//*****************************************************************************
#include <stdint.h>
#include <stdbool.h>
#include "grlib/grlib.h"
//*****************************************************************************
//
// Details of this font:
// Characters: 32 to 126 inclusive
// Style: cmss
// Size: 40 point
// Bold: yes
// Italic: no
// Memory usage: 4688 bytes
//
//*****************************************************************************
//*****************************************************************************
//
// The compressed data for the 40 point Cmss bold font.
// Contains characters 32 to 126 inclusive.
//
//*****************************************************************************
static const uint8_t g_pui8Cmss40bData[4485] =
{
4, 17, 0, 85, 32, 9, 240, 197, 69, 69, 69, 69,
69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
69, 69, 240, 240, 21, 69, 69, 69, 69, 0, 11, 96,
25, 18, 0, 6, 101, 69, 69, 69, 69, 69, 69, 69,
69, 69, 83, 99, 99, 99, 83, 99, 99, 99, 0, 63,
96, 104, 35, 0, 14, 115, 99, 240, 131, 99, 240, 116,
99, 240, 116, 84, 240, 115, 99, 240, 116, 99, 240, 116,
84, 240, 115, 99, 240, 131, 99, 240, 116, 84, 207, 14,
95, 15, 1, 79, 15, 1, 95, 14, 243, 99, 240, 131,
99, 240, 116, 84, 240, 115, 99, 240, 131, 99, 255, 14,
95, 15, 1, 79, 15, 1, 95, 14, 196, 84, 240, 115,
100, 240, 115, 99, 240, 131, 99, 240, 116, 84, 240, 115,
99, 240, 131, 99, 240, 116, 99, 240, 116, 84, 240, 115,
99, 240, 131, 99, 240, 131, 99, 0, 11, 63, 22, 129,
240, 83, 240, 67, 240, 40, 205, 143, 111, 1, 101, 19,
52, 85, 35, 82, 85, 35, 197, 35, 197, 35, 198, 19,
202, 220, 189, 173, 173, 202, 195, 23, 179, 38, 179, 53,
179, 53, 65, 99, 53, 67, 67, 53, 69, 35, 37, 95,
2, 95, 1, 141, 185, 240, 19, 240, 67, 0, 23, 64,
101, 39, 70, 240, 50, 186, 243, 187, 212, 165, 52, 196,
165, 69, 180, 165, 84, 164, 181, 84, 148, 197, 84, 132,
213, 84, 132, 213, 84, 116, 229, 84, 100, 245, 84, 84,
240, 37, 52, 100, 240, 44, 84, 240, 74, 84, 240, 118,
101, 240, 240, 68, 102, 240, 116, 89, 240, 84, 91, 240,
53, 85, 36, 240, 52, 85, 68, 240, 20, 101, 68, 244,
117, 68, 229, 117, 68, 228, 133, 68, 212, 149, 68, 196,
165, 68, 181, 165, 68, 180, 197, 36, 180, 219, 179, 249,
195, 240, 37, 0, 40, 74, 32, 0, 13, 21, 240, 169,
240, 123, 240, 85, 52, 240, 84, 84, 240, 52, 100, 240,
52, 100, 240, 52, 100, 240, 52, 84, 240, 68, 68, 240,
85, 36, 130, 201, 148, 168, 148, 182, 180, 182, 165, 153,
148, 148, 22, 116, 148, 54, 85, 133, 70, 53, 149, 86,
21, 165, 106, 181, 120, 198, 89, 113, 79, 13, 95, 12,
109, 43, 135, 151, 0, 40, 80, 15, 9, 240, 197, 69,
69, 69, 69, 83, 99, 83, 99, 0, 32, 32, 43, 15,
131, 179, 179, 180, 164, 180, 164, 180, 164, 180, 165, 165,
165, 164, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165,
165, 165, 180, 181, 165, 165, 180, 180, 196, 180, 196, 180,
196, 195, 211, 211, 64, 43, 15, 3, 211, 211, 196, 196,
180, 196, 181, 180, 180, 181, 165, 165, 180, 181, 165, 165,
165, 165, 165, 165, 165, 165, 165, 165, 165, 164, 165, 165,
165, 164, 180, 164, 180, 179, 180, 164, 179, 179, 179, 192,
42, 20, 114, 240, 36, 240, 20, 240, 20, 164, 50, 52,
69, 34, 37, 70, 18, 22, 94, 138, 212, 218, 142, 86,
18, 22, 69, 34, 37, 68, 50, 52, 164, 240, 20, 240,
20, 240, 34, 0, 53, 112, 65, 33, 0, 26, 51, 240,
243, 240, 243, 240, 243, 240, 243, 240, 243, 240, 243, 240,
243, 240, 243, 240, 243, 240, 243, 240, 243, 240, 63, 12,
95, 14, 79, 14, 95, 12, 240, 51, 240, 243, 240, 243,
240, 243, 240, 243, 240, 243, 240, 243, 240, 243, 240, 243,
240, 243, 240, 243, 240, 243, 240, 243, 0, 22, 96, 16,
9, 0, 28, 21, 69, 69, 69, 69, 83, 99, 83, 99,
0, 7, 64, 10, 15, 0, 35, 91, 75, 75, 0, 34,
32, 12, 9, 0, 28, 21, 69, 69, 69, 69, 0, 11,
96, 83, 21, 227, 240, 51, 240, 36, 240, 35, 240, 36,
240, 36, 240, 35, 240, 36, 240, 36, 240, 35, 240, 36,
240, 35, 240, 51, 240, 36, 240, 35, 240, 36, 240, 36,
240, 35, 240, 36, 240, 35, 240, 51, 240, 36, 240, 35,
240, 36, 240, 36, 240, 35, 240, 36, 240, 35, 240, 51,
240, 36, 240, 35, 240, 36, 240, 36, 240, 35, 240, 36,
240, 36, 240, 35, 240, 36, 240, 35, 240, 51, 240, 48,
55, 21, 0, 8, 71, 218, 172, 133, 53, 117, 85, 101,
85, 100, 116, 85, 117, 69, 117, 69, 117, 69, 117, 69,
117, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69,
117, 69, 117, 69, 117, 84, 116, 101, 85, 101, 85, 117,
53, 141, 155, 199, 0, 27, 48, 34, 20, 0, 8, 67,
245, 170, 155, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 174,
111, 94, 0, 25, 80, 54, 21, 0, 8, 70, 218, 173,
126, 117, 70, 84, 117, 84, 118, 82, 149, 97, 149, 240,
21, 240, 21, 240, 21, 245, 245, 240, 21, 245, 244, 240,
20, 240, 20, 240, 20, 240, 20, 240, 20, 240, 20, 240,
31, 1, 79, 2, 79, 2, 95, 1, 0, 26, 96, 49,
21, 0, 8, 86, 203, 157, 126, 102, 70, 99, 117, 113,
133, 240, 21, 240, 21, 245, 246, 185, 200, 218, 240, 22,
240, 21, 240, 37, 240, 21, 240, 21, 240, 21, 240, 21,
67, 149, 69, 86, 95, 1, 110, 155, 199, 0, 27, 48,
58, 24, 0, 10, 38, 240, 39, 240, 24, 240, 24, 249,
243, 21, 228, 21, 227, 37, 212, 37, 196, 53, 196, 53,
180, 69, 180, 69, 164, 85, 164, 85, 148, 101, 148, 101,
143, 4, 95, 5, 79, 5, 95, 3, 240, 21, 240, 69,
240, 69, 240, 69, 240, 69, 240, 69, 0, 31, 50, 22,
0, 8, 78, 143, 127, 126, 133, 240, 37, 240, 37, 240,
37, 240, 37, 240, 43, 189, 158, 134, 69, 116, 116, 116,
117, 240, 37, 240, 37, 240, 37, 240, 37, 240, 37, 82,
165, 83, 133, 86, 86, 111, 142, 155, 215, 0, 28, 96,
52, 21, 0, 8, 102, 217, 186, 171, 150, 229, 240, 21,
240, 20, 240, 36, 53, 133, 24, 117, 25, 103, 69, 86,
85, 85, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69,
117, 69, 117, 84, 117, 85, 101, 86, 69, 126, 140, 170,
214, 0, 27, 48, 60, 21, 0, 8, 15, 1, 79, 2,
79, 2, 95, 1, 240, 20, 240, 20, 240, 21, 240, 20,
240, 20, 240, 21, 240, 20, 240, 21, 240, 20, 240, 21,
240, 21, 240, 20, 240, 21, 240, 21, 240, 21, 240, 21,
245, 240, 21, 240, 21, 240, 21, 240, 21, 240, 21, 240,
21, 0, 27, 96, 50, 21, 0, 8, 71, 203, 157, 127,
101, 85, 100, 116, 100, 116, 100, 116, 100, 116, 100, 116,
101, 85, 125, 169, 173, 117, 85, 100, 116, 85, 117, 69,
117, 69, 117, 69, 117, 69, 117, 69, 117, 85, 85, 111,
125, 155, 199, 0, 27, 48, 53, 21, 0, 8, 71, 202,
172, 142, 118, 54, 101, 85, 85, 116, 85, 117, 69, 117,
69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 85, 86,
85, 71, 105, 21, 120, 21, 133, 53, 240, 20, 240, 21,
240, 21, 114, 86, 141, 125, 154, 214, 0, 27, 80, 19,
9, 0, 13, 69, 69, 69, 69, 69, 0, 9, 69, 69,
69, 69, 69, 0, 11, 96, 23, 9, 0, 13, 69, 69,
69, 69, 69, 0, 9, 69, 69, 69, 69, 69, 83, 99,
83, 99, 0, 7, 64, 33, 9, 0, 11, 37, 69, 69,
69, 69, 240, 240, 21, 69, 69, 69, 69, 69, 69, 69,
69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
240, 112, 25, 33, 0, 53, 111, 12, 95, 14, 79, 14,
95, 12, 0, 21, 63, 12, 95, 14, 79, 14, 95, 12,
0, 58, 48, 37, 19, 0, 24, 52, 245, 214, 214, 228,
0, 9, 4, 244, 244, 244, 244, 229, 228, 229, 228, 229,
213, 214, 213, 229, 229, 129, 85, 115, 70, 69, 94, 93,
136, 0, 5, 96, 39, 20, 0, 8, 8, 172, 111, 100,
86, 82, 133, 245, 245, 245, 229, 230, 213, 229, 244, 244,
240, 20, 240, 20, 240, 20, 245, 240, 20, 0, 9, 53,
245, 245, 245, 245, 0, 26, 32, 71, 29, 0, 12, 25,
240, 46, 223, 3, 167, 117, 150, 75, 117, 77, 102, 63,
85, 54, 55, 85, 53, 86, 69, 53, 117, 69, 53, 117,
69, 53, 117, 69, 53, 117, 69, 53, 117, 69, 53, 117,
69, 53, 117, 69, 53, 117, 85, 53, 85, 101, 54, 54,
101, 77, 133, 75, 150, 87, 198, 240, 152, 118, 159, 3,
223, 240, 41, 0, 37, 64, 64, 29, 0, 12, 7, 240,
105, 240, 89, 240, 74, 240, 75, 240, 59, 240, 38, 21,
240, 37, 38, 240, 21, 38, 245, 69, 245, 70, 229, 70,
213, 101, 213, 102, 197, 102, 181, 133, 181, 134, 175, 4,
159, 6, 143, 6, 127, 7, 117, 198, 101, 198, 86, 198,
85, 230, 69, 230, 68, 240, 21, 0, 36, 96, 61, 26,
0, 9, 111, 1, 175, 3, 143, 4, 117, 135, 101, 166,
85, 181, 85, 181, 85, 181, 85, 181, 85, 165, 101, 134,
127, 3, 143, 1, 175, 4, 117, 135, 101, 166, 85, 181,
85, 197, 69, 197, 69, 197, 69, 197, 69, 197, 69, 182,
69, 151, 95, 5, 111, 4, 127, 2, 0, 33, 80, 58,
26, 0, 10, 121, 239, 1, 159, 2, 127, 3, 135, 116,
118, 178, 102, 240, 85, 240, 101, 240, 85, 240, 101, 240,
101, 240, 101, 240, 101, 240, 101, 240, 101, 240, 101, 240,
102, 240, 101, 240, 101, 240, 102, 240, 102, 194, 104, 132,
127, 4, 143, 3, 175, 233, 0, 33, 64, 61, 28, 0,
10, 79, 1, 207, 4, 159, 5, 133, 151, 117, 182, 101,
197, 101, 213, 85, 213, 85, 228, 85, 229, 69, 229, 69,
229, 69, 229, 69, 229, 69, 229, 69, 229, 69, 229, 69,
229, 69, 229, 69, 213, 85, 213, 85, 197, 101, 182, 101,
151, 127, 5, 143, 4, 159, 1, 0, 36, 64, 61, 23,
0, 8, 95, 3, 95, 3, 95, 3, 95, 3, 85, 240,
53, 240, 53, 240, 53, 240, 53, 240, 53, 240, 53, 240,
63, 2, 111, 2, 111, 2, 111, 2, 101, 240, 53, 240,
53, 240, 53, 240, 53, 240, 53, 240, 53, 240, 53, 240,
63, 4, 79, 4, 79, 4, 79, 4, 0, 29, 32, 60,
22, 0, 8, 47, 2, 95, 3, 79, 3, 79, 2, 85,
240, 37, 240, 37, 240, 37, 240, 37, 240, 37, 240, 37,
240, 47, 1, 111, 1, 111, 1, 111, 1, 101, 240, 37,
240, 37, 240, 37, 240, 37, 240, 37, 240, 37, 240, 37,
240, 37, 240, 37, 240, 37, 240, 37, 0, 29, 80, 59,
28, 0, 11, 106, 255, 1, 175, 4, 143, 5, 120, 133,
102, 210, 102, 241, 101, 240, 133, 240, 118, 240, 117, 240,
133, 240, 133, 240, 133, 240, 133, 169, 69, 169, 69, 169,
70, 153, 85, 213, 86, 197, 87, 181, 103, 165, 120, 133,
143, 5, 159, 4, 191, 1, 249, 0, 36, 16, 61, 27,
0, 10, 21, 213, 69, 213, 69, 213, 69, 213, 69, 213,
69, 213, 69, 213, 69, 213, 69, 213, 69, 213, 69, 213,
79, 8, 79, 8, 79, 8, 79, 8, 69, 213, 69, 213,
69, 213, 69, 213, 69, 213, 69, 213, 69, 213, 69, 213,
69, 213, 69, 213, 69, 213, 69, 213, 0, 34, 32, 33,
9, 240, 197, 69, 69, 69, 69, 69, 69, 69, 69, 69,
69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
69, 69, 69, 69, 69, 0, 11, 96, 36, 18, 0, 7,
117, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
213, 213, 213, 213, 213, 213, 213, 213, 213, 66, 117, 67,
86, 77, 93, 92, 151, 0, 23, 64, 58, 28, 0, 10,
69, 214, 69, 198, 85, 182, 101, 166, 117, 150, 133, 134,
149, 118, 165, 87, 181, 71, 197, 55, 213, 39, 229, 23,
254, 239, 223, 216, 38, 199, 70, 182, 102, 165, 118, 165,
134, 149, 150, 133, 166, 117, 166, 117, 182, 101, 198, 85,
214, 69, 214, 0, 35, 64, 61, 21, 0, 7, 117, 240,
21, 240, 21, 240, 21, 240, 21, 240, 21, 240, 21, 240,
21, 240, 21, 240, 21, 240, 21, 240, 21, 240, 21, 240,
21, 240, 21, 240, 21, 240, 21, 240, 21, 240, 21, 240,
21, 240, 21, 240, 21, 240, 21, 240, 31, 1, 95, 2,
79, 2, 79, 1, 0, 26, 112, 98, 35, 0, 13, 23,
240, 39, 72, 248, 72, 248, 73, 217, 73, 217, 73, 217,
69, 20, 180, 21, 69, 20, 180, 21, 69, 21, 149, 21,
69, 36, 148, 37, 69, 36, 148, 37, 69, 37, 117, 37,
69, 52, 116, 53, 69, 53, 85, 53, 69, 68, 84, 69,
69, 68, 84, 69, 69, 69, 53, 69, 69, 84, 52, 85,
69, 85, 21, 85, 69, 100, 20, 101, 69, 100, 20, 101,
69, 105, 101, 69, 119, 117, 69, 119, 117, 69, 133, 133,
69, 133, 133, 69, 240, 101, 0, 44, 32, 76, 28, 0,
10, 72, 181, 72, 181, 73, 165, 73, 165, 74, 149, 74,
149, 69, 21, 133, 69, 21, 133, 69, 37, 117, 69, 37,
117, 69, 53, 101, 69, 53, 101, 69, 69, 85, 69, 70,
69, 69, 85, 69, 69, 101, 53, 69, 101, 53, 69, 117,
37, 69, 117, 37, 69, 133, 21, 69, 133, 21, 69, 154,
69, 154, 69, 169, 69, 169, 69, 184, 69, 184, 0, 35,
64, 69, 30, 0, 12, 57, 240, 62, 239, 3, 183, 103,
150, 166, 133, 197, 117, 229, 101, 229, 100, 240, 20, 85,
240, 21, 69, 240, 21, 69, 240, 21, 69, 240, 21, 69,
240, 21, 69, 240, 21, 69, 240, 21, 69, 240, 21, 69,
240, 21, 85, 229, 101, 229, 101, 229, 117, 197, 134, 166,
151, 103, 191, 3, 238, 240, 57, 0, 39, 16, 59, 25,
0, 9, 63, 1, 159, 3, 127, 4, 101, 150, 85, 166,
69, 181, 69, 181, 69, 181, 69, 181, 69, 181, 69, 166,
69, 150, 95, 5, 95, 4, 111, 3, 127, 165, 240, 85,
240, 85, 240, 85, 240, 85, 240, 85, 240, 85, 240, 85,
240, 85, 240, 85, 240, 85, 0, 33, 96, 81, 30, 0,
12, 57, 240, 62, 239, 3, 183, 103, 150, 166, 133, 197,
117, 229, 101, 229, 101, 229, 85, 240, 21, 69, 240, 21,
69, 240, 21, 69, 240, 21, 69, 240, 21, 69, 240, 21,
69, 240, 21, 69, 240, 21, 69, 240, 21, 84, 240, 21,
85, 85, 69, 101, 86, 53, 117, 86, 22, 118, 91, 151,
73, 191, 3, 239, 1, 240, 30, 240, 151, 240, 151, 240,
135, 240, 150, 0, 23, 32, 60, 25, 0, 9, 63, 175,
3, 127, 4, 101, 150, 85, 166, 69, 181, 69, 181, 69,
181, 69, 181, 69, 181, 69, 165, 85, 150, 95, 4, 111,
3, 127, 1, 149, 86, 149, 86, 149, 102, 133, 117, 133,
118, 117, 133, 117, 134, 101, 149, 101, 165, 85, 165, 85,
181, 69, 181, 0, 31, 96, 51, 24, 0, 9, 105, 206,
159, 2, 111, 2, 118, 101, 101, 163, 101, 193, 101, 240,
70, 240, 55, 240, 57, 253, 206, 191, 189, 250, 240, 55,
240, 54, 240, 69, 240, 69, 66, 213, 68, 166, 70, 118,
95, 4, 95, 3, 143, 201, 0, 31, 32, 59, 29, 0,
10, 127, 10, 79, 10, 79, 10, 79, 10, 229, 240, 149,
240, 149, 240, 149, 240, 149, 240, 149, 240, 149, 240, 149,
240, 149, 240, 149, 240, 149, 240, 149, 240, 149, 240, 149,
240, 149, 240, 149, 240, 149, 240, 149, 240, 149, 240, 149,
240, 149, 240, 149, 240, 149, 0, 38, 59, 26, 0, 9,
101, 197, 69, 197, 69, 197, 69, 197, 69, 197, 69, 197,
69, 197, 69, 197, 69, 197, 69, 197, 69, 197, 69, 197,
69, 197, 69, 197, 69, 197, 69, 197, 69, 197, 69, 197,
69, 197, 69, 197, 69, 197, 85, 165, 101, 165, 118, 102,
159, 1, 190, 233, 0, 33, 112, 68, 31, 0, 11, 86,
240, 21, 70, 246, 86, 229, 102, 214, 102, 213, 134, 197,
134, 182, 134, 181, 166, 165, 166, 150, 166, 149, 198, 118,
198, 117, 230, 101, 230, 86, 230, 85, 240, 22, 69, 240,
22, 54, 240, 22, 53, 240, 54, 37, 240, 54, 21, 240,
85, 21, 240, 91, 240, 90, 240, 121, 240, 121, 240, 135,
0, 40, 64, 104, 43, 0, 16, 21, 198, 196, 69, 183,
181, 69, 183, 180, 101, 153, 149, 101, 153, 149, 101, 153,
149, 102, 132, 20, 148, 133, 117, 21, 117, 133, 116, 37,
117, 133, 116, 37, 117, 134, 85, 53, 100, 165, 85, 53,
85, 165, 84, 69, 85, 165, 84, 69, 84, 197, 53, 85,
68, 197, 52, 101, 53, 197, 52, 101, 53, 198, 36, 101,
52, 229, 21, 117, 21, 229, 20, 133, 21, 229, 20, 133,
20, 245, 20, 148, 20, 240, 25, 153, 240, 24, 169, 240,
24, 168, 240, 55, 183, 240, 54, 199, 0, 55, 16, 63,
30, 0, 11, 54, 182, 119, 151, 135, 134, 166, 118, 183,
87, 199, 70, 231, 38, 240, 22, 23, 240, 29, 240, 59,
240, 89, 240, 120, 240, 134, 240, 135, 240, 121, 240, 106,
240, 76, 240, 38, 23, 240, 22, 38, 246, 70, 214, 87,
183, 103, 166, 134, 150, 151, 118, 183, 87, 199, 70, 230,
0, 38, 62, 30, 0, 11, 37, 246, 70, 214, 87, 198,
103, 166, 134, 150, 151, 134, 167, 102, 198, 86, 230, 54,
247, 38, 240, 22, 22, 240, 59, 240, 75, 240, 89, 240,
119, 240, 135, 240, 149, 240, 165, 240, 165, 240, 165, 240,
165, 240, 165, 240, 165, 240, 165, 240, 165, 240, 165, 240,
165, 0, 39, 48, 61, 26, 0, 9, 127, 6, 95, 6,
95, 6, 95, 6, 240, 55, 240, 70, 240, 70, 240, 71,
240, 55, 240, 70, 240, 70, 240, 71, 240, 55, 240, 70,
240, 70, 240, 71, 240, 70, 240, 70, 240, 70, 240, 71,
240, 70, 240, 70, 240, 70, 240, 79, 7, 79, 7, 79,
7, 79, 7, 0, 33, 43, 13, 9, 73, 73, 69, 133,
133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,
133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,
133, 133, 133, 133, 133, 133, 133, 133, 137, 73, 73, 64,
24, 17, 0, 6, 83, 83, 99, 83, 83, 83, 99, 83,
85, 53, 69, 53, 69, 53, 69, 53, 69, 53, 0, 60,
43, 13, 9, 73, 73, 133, 133, 133, 133, 133, 133, 133,
133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,
133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,
133, 133, 133, 73, 73, 73, 64, 16, 17, 0, 6, 117,
198, 168, 132, 21, 100, 68, 67, 115, 0, 66, 48, 11,
9, 240, 197, 69, 69, 69, 69, 0, 36, 64, 14, 9,
240, 227, 99, 83, 99, 85, 69, 69, 69, 69, 0, 32,
38, 21, 0, 32, 24, 173, 142, 116, 86, 98, 133, 240,
21, 240, 21, 216, 141, 102, 69, 85, 101, 69, 117, 69,
117, 69, 117, 70, 71, 90, 21, 104, 37, 117, 69, 0,
26, 96, 61, 22, 0, 8, 37, 240, 37, 240, 37, 240,
37, 240, 37, 240, 37, 240, 37, 240, 37, 240, 37, 240,
37, 54, 133, 25, 127, 1, 102, 86, 85, 117, 85, 133,
69, 133, 69, 133, 69, 133, 69, 133, 69, 133, 69, 133,
69, 133, 69, 117, 86, 86, 95, 1, 101, 25, 117, 38,
0, 28, 80, 30, 20, 0, 30, 88, 172, 126, 86, 83,
101, 129, 85, 245, 245, 245, 245, 245, 245, 245, 240, 21,
130, 86, 84, 110, 124, 168, 0, 25, 112, 59, 22, 0,
9, 117, 240, 37, 240, 37, 240, 37, 240, 37, 240, 37,
240, 37, 240, 37, 240, 37, 150, 37, 121, 21, 111, 1,
86, 86, 85, 117, 69, 133, 69, 133, 69, 133, 69, 133,
69, 133, 69, 133, 69, 133, 69, 133, 85, 117, 86, 71,
111, 1, 121, 21, 149, 53, 0, 28, 37, 21, 0, 32,
39, 202, 157, 133, 69, 101, 101, 84, 132, 69, 132, 69,
132, 79, 2, 79, 2, 69, 240, 21, 240, 21, 240, 37,
146, 86, 100, 111, 125, 183, 0, 27, 32, 35, 17, 0,
7, 23, 137, 122, 102, 65, 101, 197, 197, 197, 197, 170,
123, 106, 149, 197, 197, 197, 197, 197, 197, 197, 197, 197,
197, 197, 197, 197, 197, 0, 22, 64, 54, 24, 0, 36,
88, 67, 127, 2, 111, 3, 101, 69, 149, 101, 133, 101,
133, 101, 133, 101, 133, 101, 149, 69, 174, 188, 178, 24,
210, 240, 126, 175, 1, 159, 1, 111, 4, 84, 165, 68,
196, 68, 196, 69, 180, 85, 134, 111, 2, 143, 186, 0,
7, 16, 61, 21, 0, 7, 117, 240, 21, 240, 21, 240,
21, 240, 21, 240, 21, 240, 21, 240, 21, 240, 21, 240,
21, 70, 101, 41, 85, 27, 71, 70, 70, 101, 70, 101,
69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117,
69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117,
0, 26, 96, 31, 9, 240, 197, 69, 69, 69, 69, 240,
240, 165, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
69, 69, 69, 69, 69, 69, 69, 0, 11, 96, 41, 15,
0, 6, 53, 150, 150, 150, 165, 0, 8, 101, 165, 165,
165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165,
165, 165, 165, 165, 165, 165, 165, 65, 70, 74, 89, 134,
240, 240, 112, 56, 21, 0, 7, 117, 240, 21, 240, 21,
240, 21, 240, 21, 240, 21, 240, 21, 240, 21, 240, 21,
240, 21, 102, 69, 86, 85, 70, 101, 54, 117, 38, 133,
22, 155, 170, 187, 172, 156, 150, 37, 133, 54, 117, 70,
101, 85, 101, 101, 85, 102, 69, 117, 0, 26, 96, 33,
9, 240, 197, 69, 69, 69, 69, 69, 69, 69, 69, 69,
69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
69, 69, 69, 69, 69, 0, 11, 96, 61, 33, 0, 49,
69, 70, 102, 101, 41, 57, 85, 27, 27, 71, 72, 70,
70, 102, 101, 70, 102, 101, 69, 117, 117, 69, 117, 117,
69, 117, 117, 69, 117, 117, 69, 117, 117, 69, 117, 117,
69, 117, 117, 69, 117, 117, 69, 117, 117, 69, 117, 117,
69, 117, 117, 69, 117, 117, 0, 41, 96, 43, 21, 0,
31, 69, 70, 101, 41, 85, 27, 71, 70, 70, 101, 70,
101, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69,
117, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69,
117, 0, 26, 96, 36, 23, 0, 35, 39, 236, 159, 133,
86, 101, 117, 100, 149, 69, 149, 69, 149, 69, 149, 69,
149, 69, 149, 69, 149, 69, 149, 85, 117, 102, 86, 127,
157, 200, 0, 30, 58, 22, 0, 33, 5, 54, 133, 26,
111, 1, 102, 86, 85, 117, 85, 133, 69, 133, 69, 133,
69, 133, 69, 133, 69, 133, 69, 133, 69, 118, 69, 117,
86, 86, 95, 1, 101, 25, 117, 38, 149, 240, 37, 240,
37, 240, 37, 240, 37, 240, 37, 240, 37, 240, 37, 0,
7, 80, 58, 22, 0, 33, 86, 37, 121, 21, 111, 1,
86, 71, 85, 117, 70, 117, 69, 133, 69, 133, 69, 133,
69, 133, 69, 133, 69, 133, 70, 117, 85, 117, 86, 71,
111, 1, 121, 21, 149, 53, 240, 37, 240, 37, 240, 37,
240, 37, 240, 37, 240, 37, 240, 37, 240, 37, 0, 6,
28, 15, 0, 22, 69, 66, 69, 36, 69, 21, 69, 21,
72, 119, 134, 149, 165, 165, 165, 165, 165, 165, 165, 165,
165, 165, 0, 20, 29, 19, 0, 29, 8, 156, 109, 85,
84, 84, 130, 84, 246, 234, 156, 155, 170, 229, 244, 67,
132, 69, 85, 78, 108, 168, 0, 24, 80, 31, 18, 0,
16, 21, 213, 213, 213, 213, 188, 93, 108, 133, 213, 213,
213, 213, 213, 213, 213, 213, 213, 213, 214, 50, 123, 137,
166, 0, 23, 48, 43, 21, 0, 31, 69, 117, 69, 117,
69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117,
69, 117, 69, 117, 69, 117, 69, 117, 69, 117, 69, 117,
69, 102, 70, 71, 90, 21, 118, 53, 0, 26, 96, 37,
22, 0, 33, 4, 164, 69, 133, 69, 133, 85, 101, 101,
101, 101, 101, 117, 69, 133, 69, 133, 69, 149, 37, 165,
37, 165, 37, 186, 202, 202, 216, 232, 246, 0, 28, 96,
59, 31, 0, 46, 68, 131, 132, 69, 101, 101, 69, 101,
101, 84, 101, 100, 101, 70, 100, 101, 71, 69, 101, 71,
69, 116, 66, 20, 68, 133, 35, 21, 37, 133, 35, 36,
37, 148, 35, 36, 36, 168, 52, 36, 168, 58, 168, 73,
183, 72, 198, 88, 198, 88, 213, 102, 0, 39, 112, 37,
22, 0, 33, 5, 132, 86, 101, 102, 69, 134, 37, 165,
22, 186, 216, 246, 240, 36, 240, 38, 248, 217, 212, 36,
181, 37, 149, 69, 117, 101, 85, 118, 69, 133, 0, 28,
50, 21, 0, 31, 68, 148, 69, 117, 84, 117, 85, 100,
101, 85, 117, 69, 117, 68, 149, 37, 149, 37, 164, 36,
186, 186, 200, 216, 230, 246, 240, 21, 240, 20, 240, 36,
240, 36, 240, 35, 240, 36, 178, 52, 201, 200, 214, 0,
7, 16, 28, 20, 0, 30, 31, 95, 95, 229, 229, 230,
214, 214, 229, 229, 230, 214, 214, 229, 229, 239, 1, 79,
1, 79, 1, 0, 25, 64, 13, 26, 0, 55, 47, 7,
79, 7, 79, 7, 0, 65, 64, 15, 47, 0, 99, 127,
15, 13, 79, 15, 13, 79, 15, 13, 0, 118, 19, 17,
0, 6, 69, 37, 84, 52, 85, 37, 84, 52, 99, 67,
115, 67, 0, 66, 96, 16, 18, 0, 7, 4, 83, 87,
51, 78, 67, 55, 83, 84, 0, 72, 96,
};
//*****************************************************************************
//
// The font definition for the 40 point Cmss bold font.
//
//*****************************************************************************
const tFont g_sFontCmss40b =
{
//
// The format of the font.
//
FONT_FMT_PIXEL_RLE,
//
// The maximum width of the font.
//
42,
//
// The height of the font.
//
40,
//
// The baseline of the font.
//
30,
//
// The offset to each character in the font.
//
{
0, 4, 36, 61, 165, 228, 329, 403,
418, 461, 504, 546, 611, 627, 637, 649,
732, 787, 821, 875, 924, 982, 1032, 1084,
1144, 1194, 1247, 1266, 1289, 1322, 1347, 1384,
1423, 1494, 1558, 1619, 1677, 1738, 1799, 1859,
1918, 1979, 2012, 2048, 2106, 2167, 2265, 2341,
2410, 2469, 2550, 2610, 2661, 2720, 2779, 2847,
2951, 3014, 3076, 3137, 3180, 3204, 3247, 3263,
3274, 3288, 3326, 3387, 3417, 3476, 3513, 3548,
3602, 3663, 3694, 3735, 3791, 3824, 3885, 3928,
3964, 4022, 4080, 4108, 4137, 4168, 4211, 4248,
4307, 4344, 4394, 4422, 4435, 4450, 4469,
},
//
// A pointer to the actual font data
//
g_pui8Cmss40bData
};
|
637763.c | /*
* Atmel MultiMedia Card Interface driver
*
* Copyright (C) 2004-2008 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
#include <linux/atmel-mci.h>
#include <linux/atmel_pdc.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/unaligned.h>
/*
* Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
* Registers and bitfields marked with [2] are only available in MCI2
*/
/* MCI Register Definitions */
#define ATMCI_CR 0x0000 /* Control */
#define ATMCI_CR_MCIEN BIT(0) /* MCI Enable */
#define ATMCI_CR_MCIDIS BIT(1) /* MCI Disable */
#define ATMCI_CR_PWSEN BIT(2) /* Power Save Enable */
#define ATMCI_CR_PWSDIS BIT(3) /* Power Save Disable */
#define ATMCI_CR_SWRST BIT(7) /* Software Reset */
#define ATMCI_MR 0x0004 /* Mode */
#define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
#define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
#define ATMCI_MR_RDPROOF BIT(11) /* Read Proof */
#define ATMCI_MR_WRPROOF BIT(12) /* Write Proof */
#define ATMCI_MR_PDCFBYTE BIT(13) /* Force Byte Transfer */
#define ATMCI_MR_PDCPADV BIT(14) /* Padding Value */
#define ATMCI_MR_PDCMODE BIT(15) /* PDC-oriented Mode */
#define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */
#define ATMCI_DTOR 0x0008 /* Data Timeout */
#define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
#define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
#define ATMCI_SDCR 0x000c /* SD Card / SDIO */
#define ATMCI_SDCSEL_SLOT_A (0 << 0) /* Select SD slot A */
#define ATMCI_SDCSEL_SLOT_B (1 << 0) /* Select SD slot A */
#define ATMCI_SDCSEL_MASK (3 << 0)
#define ATMCI_SDCBUS_1BIT (0 << 6) /* 1-bit data bus */
#define ATMCI_SDCBUS_4BIT (2 << 6) /* 4-bit data bus */
#define ATMCI_SDCBUS_8BIT (3 << 6) /* 8-bit data bus[2] */
#define ATMCI_SDCBUS_MASK (3 << 6)
#define ATMCI_ARGR 0x0010 /* Command Argument */
#define ATMCI_CMDR 0x0014 /* Command */
#define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
#define ATMCI_CMDR_RSPTYP_NONE (0 << 6) /* No response */
#define ATMCI_CMDR_RSPTYP_48BIT (1 << 6) /* 48-bit response */
#define ATMCI_CMDR_RSPTYP_136BIT (2 << 6) /* 136-bit response */
#define ATMCI_CMDR_SPCMD_INIT (1 << 8) /* Initialization command */
#define ATMCI_CMDR_SPCMD_SYNC (2 << 8) /* Synchronized command */
#define ATMCI_CMDR_SPCMD_INT (4 << 8) /* Interrupt command */
#define ATMCI_CMDR_SPCMD_INTRESP (5 << 8) /* Interrupt response */
#define ATMCI_CMDR_OPDCMD (1 << 11) /* Open Drain */
#define ATMCI_CMDR_MAXLAT_5CYC (0 << 12) /* Max latency 5 cycles */
#define ATMCI_CMDR_MAXLAT_64CYC (1 << 12) /* Max latency 64 cycles */
#define ATMCI_CMDR_START_XFER (1 << 16) /* Start data transfer */
#define ATMCI_CMDR_STOP_XFER (2 << 16) /* Stop data transfer */
#define ATMCI_CMDR_TRDIR_WRITE (0 << 18) /* Write data */
#define ATMCI_CMDR_TRDIR_READ (1 << 18) /* Read data */
#define ATMCI_CMDR_BLOCK (0 << 19) /* Single-block transfer */
#define ATMCI_CMDR_MULTI_BLOCK (1 << 19) /* Multi-block transfer */
#define ATMCI_CMDR_STREAM (2 << 19) /* MMC Stream transfer */
#define ATMCI_CMDR_SDIO_BYTE (4 << 19) /* SDIO Byte transfer */
#define ATMCI_CMDR_SDIO_BLOCK (5 << 19) /* SDIO Block transfer */
#define ATMCI_CMDR_SDIO_SUSPEND (1 << 24) /* SDIO Suspend Command */
#define ATMCI_CMDR_SDIO_RESUME (2 << 24) /* SDIO Resume Command */
#define ATMCI_BLKR 0x0018 /* Block */
#define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
#define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
#define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
#define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
#define ATMCI_RSPR 0x0020 /* Response 0 */
#define ATMCI_RSPR1 0x0024 /* Response 1 */
#define ATMCI_RSPR2 0x0028 /* Response 2 */
#define ATMCI_RSPR3 0x002c /* Response 3 */
#define ATMCI_RDR 0x0030 /* Receive Data */
#define ATMCI_TDR 0x0034 /* Transmit Data */
#define ATMCI_SR 0x0040 /* Status */
#define ATMCI_IER 0x0044 /* Interrupt Enable */
#define ATMCI_IDR 0x0048 /* Interrupt Disable */
#define ATMCI_IMR 0x004c /* Interrupt Mask */
#define ATMCI_CMDRDY BIT(0) /* Command Ready */
#define ATMCI_RXRDY BIT(1) /* Receiver Ready */
#define ATMCI_TXRDY BIT(2) /* Transmitter Ready */
#define ATMCI_BLKE BIT(3) /* Data Block Ended */
#define ATMCI_DTIP BIT(4) /* Data Transfer In Progress */
#define ATMCI_NOTBUSY BIT(5) /* Data Not Busy */
#define ATMCI_ENDRX BIT(6) /* End of RX Buffer */
#define ATMCI_ENDTX BIT(7) /* End of TX Buffer */
#define ATMCI_SDIOIRQA BIT(8) /* SDIO IRQ in slot A */
#define ATMCI_SDIOIRQB BIT(9) /* SDIO IRQ in slot B */
#define ATMCI_SDIOWAIT BIT(12) /* SDIO Read Wait Operation Status */
#define ATMCI_CSRCV BIT(13) /* CE-ATA Completion Signal Received */
#define ATMCI_RXBUFF BIT(14) /* RX Buffer Full */
#define ATMCI_TXBUFE BIT(15) /* TX Buffer Empty */
#define ATMCI_RINDE BIT(16) /* Response Index Error */
#define ATMCI_RDIRE BIT(17) /* Response Direction Error */
#define ATMCI_RCRCE BIT(18) /* Response CRC Error */
#define ATMCI_RENDE BIT(19) /* Response End Bit Error */
#define ATMCI_RTOE BIT(20) /* Response Time-Out Error */
#define ATMCI_DCRCE BIT(21) /* Data CRC Error */
#define ATMCI_DTOE BIT(22) /* Data Time-Out Error */
#define ATMCI_CSTOE BIT(23) /* Completion Signal Time-out Error */
#define ATMCI_BLKOVRE BIT(24) /* DMA Block Overrun Error */
#define ATMCI_DMADONE BIT(25) /* DMA Transfer Done */
#define ATMCI_FIFOEMPTY BIT(26) /* FIFO Empty Flag */
#define ATMCI_XFRDONE BIT(27) /* Transfer Done Flag */
#define ATMCI_ACKRCV BIT(28) /* Boot Operation Acknowledge Received */
#define ATMCI_ACKRCVE BIT(29) /* Boot Operation Acknowledge Error */
#define ATMCI_OVRE BIT(30) /* RX Overrun Error */
#define ATMCI_UNRE BIT(31) /* TX Underrun Error */
#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
#define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
#define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
#define ATMCI_DMAEN BIT(8) /* DMA Hardware Handshaking Enable */
#define ATMCI_CFG 0x0054 /* Configuration[2] */
#define ATMCI_CFG_FIFOMODE_1DATA BIT(0) /* MCI Internal FIFO control mode */
#define ATMCI_CFG_FERRCTRL_COR BIT(4) /* Flow Error flag reset control mode */
#define ATMCI_CFG_HSMODE BIT(8) /* High Speed Mode */
#define ATMCI_CFG_LSYNC BIT(12) /* Synchronize on the last block */
#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
#define ATMCI_WP_EN BIT(0) /* WP Enable */
#define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
#define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
#define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
#define ATMCI_VERSION 0x00FC /* Version */
#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
/* This is not including the FIFO Aperture on MCI2 */
#define ATMCI_REGS_SIZE 0x100
/* Register access macros */
#define atmci_readl(port, reg) \
__raw_readl((port)->regs + reg)
#define atmci_writel(port, reg, value) \
__raw_writel((value), (port)->regs + reg)
/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
#ifdef CONFIG_AVR32
# define ATMCI_PDC_CONNECTED 0
#else
# define ATMCI_PDC_CONNECTED 1
#endif
#define AUTOSUSPEND_DELAY 50
#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
#define ATMCI_DMA_THRESHOLD 16
enum {
EVENT_CMD_RDY = 0,
EVENT_XFER_COMPLETE,
EVENT_NOTBUSY,
EVENT_DATA_ERROR,
};
enum atmel_mci_state {
STATE_IDLE = 0,
STATE_SENDING_CMD,
STATE_DATA_XFER,
STATE_WAITING_NOTBUSY,
STATE_SENDING_STOP,
STATE_END_REQUEST,
};
enum atmci_xfer_dir {
XFER_RECEIVE = 0,
XFER_TRANSMIT,
};
enum atmci_pdc_buf {
PDC_FIRST_BUF = 0,
PDC_SECOND_BUF,
};
struct atmel_mci_caps {
bool has_dma_conf_reg;
bool has_pdc;
bool has_cfg_reg;
bool has_cstor_reg;
bool has_highspeed;
bool has_rwproof;
bool has_odd_clk_div;
bool has_bad_data_ordering;
bool need_reset_after_xfer;
bool need_blksz_mul_4;
bool need_notbusy_for_read_ops;
};
struct atmel_mci_dma {
struct dma_chan *chan;
struct dma_async_tx_descriptor *data_desc;
};
/**
* struct atmel_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
* @regs: Pointer to MMIO registers.
* @sg: Scatterlist entry currently being processed by PIO or PDC code.
* @pio_offset: Offset into the current scatterlist entry.
* @buffer: Buffer used if we don't have the r/w proof capability. We
* don't have the time to switch pdc buffers so we have to use only
* one buffer for the full transaction.
* @buf_size: size of the buffer.
* @phys_buf_addr: buffer address needed for pdc.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
* or NULL if the controller is idle.
* @cmd: The command currently being sent to the card, or NULL.
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
* @data_size: just data->blocks * data->blksz.
* @dma: DMA client state.
* @data_chan: DMA channel being used for the current data transfer.
* @cmd_status: Snapshot of SR taken upon completion of the current
* command. Only valid when EVENT_CMD_COMPLETE is pending.
* @data_status: Snapshot of SR taken upon completion of the current
* data transfer. Only valid when EVENT_DATA_COMPLETE or
* EVENT_DATA_ERROR is pending.
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
* @tasklet: Tasklet running the request state machine.
* @pending_events: Bitmask of events flagged by the interrupt handler
* to be processed by the tasklet.
* @completed_events: Bitmask of events which the state machine has
* processed.
* @state: Tasklet state.
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
* @timer: Timer to balance the data timeout error flag which cannot rise.
* @mode_reg: Value of the MR register.
* @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
* @mck: The peripheral bus clock hooked up to the MMC controller.
* @pdev: Platform device associated with the MMC controller.
* @slot: Slots sharing this MMC controller.
* @caps: MCI capabilities depending on MCI version.
* @prepare_data: function to setup MCI before data transfer which
* depends on MCI capabilities.
* @submit_data: function to start data transfer which depends on MCI
* capabilities.
* @stop_transfer: function to stop data transfer which depends on MCI
* capabilities.
*
* Locking
* =======
*
* @lock is a softirq-safe spinlock protecting @queue as well as
* @cur_slot, @mrq and @state. These must always be updated
* at the same time while holding @lock.
*
* @lock also protects mode_reg and need_clock_update since these are
* used to synchronize mode register updates with the queue
* processing.
*
* The @mrq field of struct atmel_mci_slot is also protected by @lock,
* and must always be written at the same time as the slot is added to
* @queue.
*
* @pending_events and @completed_events are accessed using atomic bit
* operations, so they don't need any locking.
*
* None of the fields touched by the interrupt handler need any
* locking. However, ordering is important: Before EVENT_DATA_ERROR or
* EVENT_DATA_COMPLETE is set in @pending_events, all data-related
* interrupts must be disabled and @data_status updated with a
* snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
* CMDRDY interrupt must be disabled and @cmd_status updated with a
* snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
* bytes_xfered field of @data must be written. This is ensured by
* using barriers.
*/
struct atmel_mci {
spinlock_t lock;
void __iomem *regs;
struct scatterlist *sg;
unsigned int sg_len;
unsigned int pio_offset;
unsigned int *buffer;
unsigned int buf_size;
dma_addr_t buf_phys_addr;
struct atmel_mci_slot *cur_slot;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int data_size;
struct atmel_mci_dma dma;
struct dma_chan *data_chan;
struct dma_slave_config dma_conf;
u32 cmd_status;
u32 data_status;
u32 stop_cmdr;
struct tasklet_struct tasklet;
unsigned long pending_events;
unsigned long completed_events;
enum atmel_mci_state state;
struct list_head queue;
bool need_clock_update;
bool need_reset;
struct timer_list timer;
u32 mode_reg;
u32 cfg_reg;
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
struct platform_device *pdev;
struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
struct atmel_mci_caps caps;
u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
void (*stop_transfer)(struct atmel_mci *host);
};
/**
* struct atmel_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @sdc_reg: Value of SDCR to be written before using this slot.
* @sdio_irq: SDIO irq mask for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
* @queue_node: List node for placing this node in the @queue list of
* &struct atmel_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
* @flags: Random state bits associated with the slot.
* @detect_pin: GPIO pin used for card detection, or negative if not
* available.
* @wp_pin: GPIO pin used for card write protect sending, or negative
* if not available.
* @detect_is_active_high: The state of the detect pin when it is active.
* @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/
struct atmel_mci_slot {
struct mmc_host *mmc;
struct atmel_mci *host;
u32 sdc_reg;
u32 sdio_irq;
struct mmc_request *mrq;
struct list_head queue_node;
unsigned int clock;
unsigned long flags;
#define ATMCI_CARD_PRESENT 0
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
int detect_pin;
int wp_pin;
bool detect_is_active_high;
struct timer_list detect_timer;
};
#define atmci_test_and_clear_pending(host, event) \
test_and_clear_bit(event, &host->pending_events)
#define atmci_set_completed(host, event) \
set_bit(event, &host->completed_events)
#define atmci_set_pending(host, event) \
set_bit(event, &host->pending_events)
/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
*/
static int atmci_req_show(struct seq_file *s, void *v)
{
struct atmel_mci_slot *slot = s->private;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_command *stop;
struct mmc_data *data;
/* Make sure we get a consistent snapshot */
spin_lock_bh(&slot->host->lock);
mrq = slot->mrq;
if (mrq) {
cmd = mrq->cmd;
data = mrq->data;
stop = mrq->stop;
if (cmd)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
cmd->opcode, cmd->arg, cmd->flags,
cmd->resp[0], cmd->resp[1], cmd->resp[2],
cmd->resp[3], cmd->error);
if (data)
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
data->bytes_xfered, data->blocks,
data->blksz, data->flags, data->error);
if (stop)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
stop->opcode, stop->arg, stop->flags,
stop->resp[0], stop->resp[1], stop->resp[2],
stop->resp[3], stop->error);
}
spin_unlock_bh(&slot->host->lock);
return 0;
}
static int atmci_req_open(struct inode *inode, struct file *file)
{
return single_open(file, atmci_req_show, inode->i_private);
}
static const struct file_operations atmci_req_fops = {
.owner = THIS_MODULE,
.open = atmci_req_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void atmci_show_status_reg(struct seq_file *s,
const char *regname, u32 value)
{
static const char *sr_bit[] = {
[0] = "CMDRDY",
[1] = "RXRDY",
[2] = "TXRDY",
[3] = "BLKE",
[4] = "DTIP",
[5] = "NOTBUSY",
[6] = "ENDRX",
[7] = "ENDTX",
[8] = "SDIOIRQA",
[9] = "SDIOIRQB",
[12] = "SDIOWAIT",
[14] = "RXBUFF",
[15] = "TXBUFE",
[16] = "RINDE",
[17] = "RDIRE",
[18] = "RCRCE",
[19] = "RENDE",
[20] = "RTOE",
[21] = "DCRCE",
[22] = "DTOE",
[23] = "CSTOE",
[24] = "BLKOVRE",
[25] = "DMADONE",
[26] = "FIFOEMPTY",
[27] = "XFRDONE",
[30] = "OVRE",
[31] = "UNRE",
};
unsigned int i;
seq_printf(s, "%s:\t0x%08x", regname, value);
for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
if (value & (1 << i)) {
if (sr_bit[i])
seq_printf(s, " %s", sr_bit[i]);
else
seq_puts(s, " UNKNOWN");
}
}
seq_putc(s, '\n');
}
static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
u32 *buf;
int ret = 0;
buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pm_runtime_get_sync(&host->pdev->dev);
/*
* Grab a more or less consistent snapshot. Note that we're
* not disabling interrupts, so IMR and SR may not be
* consistent.
*/
spin_lock_bh(&host->lock);
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
spin_unlock_bh(&host->lock);
pm_runtime_mark_last_busy(&host->pdev->dev);
pm_runtime_put_autosuspend(&host->pdev->dev);
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
if (host->caps.has_odd_clk_div)
seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
((buf[ATMCI_MR / 4] & 0xff) << 1)
| ((buf[ATMCI_MR / 4] >> 16) & 1));
else
seq_printf(s, "CLKDIV=%u\n",
(buf[ATMCI_MR / 4] & 0xff));
seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
buf[ATMCI_BLKR / 4],
buf[ATMCI_BLKR / 4] & 0xffff,
(buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
if (host->caps.has_cstor_reg)
seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
/* Don't read RSPR and RDR; it will consume the data there */
atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
if (host->caps.has_dma_conf_reg) {
u32 val;
val = buf[ATMCI_DMA / 4];
seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
val, val & 3,
((val >> 4) & 3) ?
1 << (((val >> 4) & 3) + 1) : 1,
val & ATMCI_DMAEN ? " DMAEN" : "");
}
if (host->caps.has_cfg_reg) {
u32 val;
val = buf[ATMCI_CFG / 4];
seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
val,
val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
}
kfree(buf);
return ret;
}
static int atmci_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, atmci_regs_show, inode->i_private);
}
static const struct file_operations atmci_regs_fops = {
.owner = THIS_MODULE,
.open = atmci_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void atmci_init_debugfs(struct atmel_mci_slot *slot)
{
struct mmc_host *mmc = slot->mmc;
struct atmel_mci *host = slot->host;
struct dentry *root;
struct dentry *node;
root = mmc->debugfs_root;
if (!root)
return;
node = debugfs_create_file("regs", S_IRUSR, root, host,
&atmci_regs_fops);
if (IS_ERR(node))
return;
if (!node)
goto err;
node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
if (!node)
goto err;
node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
if (!node)
goto err;
node = debugfs_create_x32("pending_events", S_IRUSR, root,
(u32 *)&host->pending_events);
if (!node)
goto err;
node = debugfs_create_x32("completed_events", S_IRUSR, root,
(u32 *)&host->completed_events);
if (!node)
goto err;
return;
err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
#if defined(CONFIG_OF)
static const struct of_device_id atmci_dt_ids[] = {
{ .compatible = "atmel,hsmci" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmci_dt_ids);
static struct mci_platform_data*
atmci_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *cnp;
struct mci_platform_data *pdata;
u32 slot_id;
if (!np) {
dev_err(&pdev->dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "could not allocate memory for pdata\n");
return ERR_PTR(-ENOMEM);
}
for_each_child_of_node(np, cnp) {
if (of_property_read_u32(cnp, "reg", &slot_id)) {
dev_warn(&pdev->dev, "reg property is missing for %s\n",
cnp->full_name);
continue;
}
if (slot_id >= ATMCI_MAX_NR_SLOTS) {
dev_warn(&pdev->dev, "can't have more than %d slots\n",
ATMCI_MAX_NR_SLOTS);
break;
}
if (of_property_read_u32(cnp, "bus-width",
&pdata->slot[slot_id].bus_width))
pdata->slot[slot_id].bus_width = 1;
pdata->slot[slot_id].detect_pin =
of_get_named_gpio(cnp, "cd-gpios", 0);
pdata->slot[slot_id].detect_is_active_high =
of_property_read_bool(cnp, "cd-inverted");
pdata->slot[slot_id].non_removable =
of_property_read_bool(cnp, "non-removable");
pdata->slot[slot_id].wp_pin =
of_get_named_gpio(cnp, "wp-gpios", 0);
}
return pdata;
}
#else /* CONFIG_OF */
static inline struct mci_platform_data*
atmci_of_init(struct platform_device *dev)
{
return ERR_PTR(-EINVAL);
}
#endif
static inline unsigned int atmci_get_version(struct atmel_mci *host)
{
return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
}
/*
* Fix sconfig's burst size according to atmel MCI. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
* With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2,
* 8 -> 3, 16 -> 4.
*
* This can be done by finding most significant bit set.
*/
static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
unsigned int maxburst)
{
unsigned int version = atmci_get_version(host);
unsigned int offset = 2;
if (version >= 0x600)
offset = 1;
if (maxburst > 1)
return fls(maxburst) - offset;
else
return 0;
}
static void atmci_timeout_timer(unsigned long data)
{
struct atmel_mci *host;
host = (struct atmel_mci *)data;
dev_dbg(&host->pdev->dev, "software timeout\n");
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
host->data = NULL;
/*
* With some SDIO modules, sometimes DMA transfer hangs. If
* stop_transfer() is not called then the DMA request is not
* removed, following ones are queued and never computed.
*/
if (host->state == STATE_DATA_XFER)
host->stop_transfer(host);
} else {
host->mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
}
host->need_reset = 1;
host->state = STATE_END_REQUEST;
smp_wmb();
tasklet_schedule(&host->tasklet);
}
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
unsigned int ns)
{
/*
* It is easier here to use us instead of ns for the timeout,
* it prevents from overflows during calculation.
*/
unsigned int us = DIV_ROUND_UP(ns, 1000);
/* Maximum clock frequency is host->bus_hz/2 */
return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
}
static void atmci_set_timeout(struct atmel_mci *host,
struct atmel_mci_slot *slot, struct mmc_data *data)
{
static unsigned dtomul_to_shift[] = {
0, 4, 7, 8, 10, 12, 16, 20
};
unsigned timeout;
unsigned dtocyc;
unsigned dtomul;
timeout = atmci_ns_to_clocks(host, data->timeout_ns)
+ data->timeout_clks;
for (dtomul = 0; dtomul < 8; dtomul++) {
unsigned shift = dtomul_to_shift[dtomul];
dtocyc = (timeout + (1 << shift) - 1) >> shift;
if (dtocyc < 15)
break;
}
if (dtomul >= 8) {
dtomul = 7;
dtocyc = 15;
}
dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
dtocyc << dtomul_to_shift[dtomul]);
atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
}
/*
* Return mask with command flags to be enabled for this command.
*/
static u32 atmci_prepare_command(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct mmc_data *data;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
else
cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
}
/*
* This should really be MAXLAT_5 for CMD2 and ACMD41, but
* it's too difficult to determine whether this is an ACMD or
* not. Better make it 64.
*/
cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
cmdr |= ATMCI_CMDR_OPDCMD;
data = cmd->data;
if (data) {
cmdr |= ATMCI_CMDR_START_XFER;
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= ATMCI_CMDR_SDIO_BLOCK;
} else {
if (data->flags & MMC_DATA_STREAM)
cmdr |= ATMCI_CMDR_STREAM;
else if (data->blocks > 1)
cmdr |= ATMCI_CMDR_MULTI_BLOCK;
else
cmdr |= ATMCI_CMDR_BLOCK;
}
if (data->flags & MMC_DATA_READ)
cmdr |= ATMCI_CMDR_TRDIR_READ;
}
return cmdr;
}
static void atmci_send_command(struct atmel_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
WARN_ON(host->cmd);
host->cmd = cmd;
dev_vdbg(&host->pdev->dev,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
atmci_writel(host, ATMCI_ARGR, cmd->arg);
atmci_writel(host, ATMCI_CMDR, cmd_flags);
}
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
dev_dbg(&host->pdev->dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
/*
* Configure given PDC buffer taking care of alignement issues.
* Update host->data_size and host->sg.
*/
static void atmci_pdc_set_single_buf(struct atmel_mci *host,
enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
{
u32 pointer_reg, counter_reg;
unsigned int buf_size;
if (dir == XFER_RECEIVE) {
pointer_reg = ATMEL_PDC_RPR;
counter_reg = ATMEL_PDC_RCR;
} else {
pointer_reg = ATMEL_PDC_TPR;
counter_reg = ATMEL_PDC_TCR;
}
if (buf_nb == PDC_SECOND_BUF) {
pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
counter_reg += ATMEL_PDC_SCND_BUF_OFF;
}
if (!host->caps.has_rwproof) {
buf_size = host->buf_size;
atmci_writel(host, pointer_reg, host->buf_phys_addr);
} else {
buf_size = sg_dma_len(host->sg);
atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
}
if (host->data_size <= buf_size) {
if (host->data_size & 0x3) {
/* If size is different from modulo 4, transfer bytes */
atmci_writel(host, counter_reg, host->data_size);
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
} else {
/* Else transfer 32-bits words */
atmci_writel(host, counter_reg, host->data_size / 4);
}
host->data_size = 0;
} else {
/* We assume the size of a page is 32-bits aligned */
atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
host->data_size -= sg_dma_len(host->sg);
if (host->data_size)
host->sg = sg_next(host->sg);
}
}
/*
* Configure PDC buffer according to the data size ie configuring one or two
* buffers. Don't use this function if you want to configure only the second
* buffer. In this case, use atmci_pdc_set_single_buf.
*/
static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
{
atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
if (host->data_size)
atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
}
/*
* Unmap sg lists, called when transfer is finished.
*/
static void atmci_pdc_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
if (data)
dma_unmap_sg(&host->pdev->dev,
data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
/*
* Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
* having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
* interrupt needed for both transfer directions.
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
int transfer_size = host->data->blocks * host->data->blksz;
int i;
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_READ)) {
if (host->caps.has_bad_data_ordering)
for (i = 0; i < transfer_size; i++)
host->buffer[i] = swab32(host->buffer[i]);
sg_copy_from_buffer(host->data->sg, host->data->sg_len,
host->buffer, transfer_size);
}
atmci_pdc_cleanup(host);
dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
}
static void atmci_dma_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
if (data)
dma_unmap_sg(host->dma.chan->device->dev,
data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
/*
* This function is called by the DMA driver from tasklet context.
*/
static void atmci_dma_complete(void *arg)
{
struct atmel_mci *host = arg;
struct mmc_data *data = host->data;
dev_vdbg(&host->pdev->dev, "DMA complete\n");
if (host->caps.has_dma_conf_reg)
/* Disable DMA hardware handshaking on MCI */
atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
atmci_dma_cleanup(host);
/*
* If the card was removed, data will be NULL. No point trying
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
dev_dbg(&host->pdev->dev,
"(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
/*
* Regardless of what the documentation says, we have
* to wait for NOTBUSY even after block read
* operations.
*
* When the DMA transfer is complete, the controller
* may still be reading the CRC from the card, i.e.
* the data transfer is still in progress and we
* haven't seen all the potential error bits yet.
*
* The interrupt handler will schedule a different
* tasklet to finish things up when the data transfer
* is completely done.
*
* We may not complete the mmc request here anyway
* because the mmc layer may call back and cause us to
* violate the "don't submit new operations from the
* completion callback" rule of the dma engine
* framework.
*/
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
/*
* Returns a mask of interrupt flags to be enabled after the whole
* request has been prepared.
*/
static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags;
data->error = -EINPROGRESS;
host->sg = data->sg;
host->sg_len = data->sg_len;
host->data = data;
host->data_chan = NULL;
iflags = ATMCI_DATA_ERROR_FLAGS;
/*
* Errata: MMC data write operation with less than 12
* bytes is impossible.
*
* Errata: MCI Transmit Data Register (TDR) FIFO
* corruption when length is not multiple of 4.
*/
if (data->blocks * data->blksz < 12
|| (data->blocks * data->blksz) & 3)
host->need_reset = true;
host->pio_offset = 0;
if (data->flags & MMC_DATA_READ)
iflags |= ATMCI_RXRDY;
else
iflags |= ATMCI_TXRDY;
return iflags;
}
/*
* Set interrupt flags and set block length into the MCI mode register even
* if this value is also accessible in the MCI block register. It seems to be
* necessary before the High Speed MCI version. It also map sg and configure
* PDC registers.
*/
static u32
atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags, tmp;
unsigned int sg_len;
enum dma_data_direction dir;
int i;
data->error = -EINPROGRESS;
host->data = data;
host->sg = data->sg;
iflags = ATMCI_DATA_ERROR_FLAGS;
/* Enable pdc mode */
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
if (data->flags & MMC_DATA_READ) {
dir = DMA_FROM_DEVICE;
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
} else {
dir = DMA_TO_DEVICE;
iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
}
/* Set BLKLEN */
tmp = atmci_readl(host, ATMCI_MR);
tmp &= 0x0000ffff;
tmp |= ATMCI_BLKLEN(data->blksz);
atmci_writel(host, ATMCI_MR, tmp);
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
sg_copy_to_buffer(host->data->sg, host->data->sg_len,
host->buffer, host->data_size);
if (host->caps.has_bad_data_ordering)
for (i = 0; i < host->data_size; i++)
host->buffer[i] = swab32(host->buffer[i]);
}
if (host->data_size)
atmci_pdc_set_both_buf(host,
((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
return iflags;
}
static u32
atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 maxburst;
u32 iflags;
data->error = -EINPROGRESS;
WARN_ON(host->data);
host->sg = NULL;
host->data = data;
iflags = ATMCI_DATA_ERROR_FLAGS;
/*
* We don't do DMA on "complex" transfers, i.e. with
* non-word-aligned buffers or lengths. Also, we don't bother
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
return atmci_prepare_data(host, data);
if (data->blksz & 3)
return atmci_prepare_data(host, data);
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3)
return atmci_prepare_data(host, data);
}
/* If we don't have a channel, we can't do DMA */
chan = host->dma.chan;
if (chan)
host->data_chan = chan;
if (!chan)
return -ENODEV;
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
maxburst = atmci_convert_chksize(host,
host->dma_conf.src_maxburst);
} else {
direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
maxburst = atmci_convert_chksize(host,
host->dma_conf.dst_maxburst);
}
if (host->caps.has_dma_conf_reg)
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
ATMCI_DMAEN);
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
dmaengine_slave_config(chan, &host->dma_conf);
desc = dmaengine_prep_slave_sg(chan,
data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
host->dma.data_desc = desc;
desc->callback = atmci_dma_complete;
desc->callback_param = host;
return iflags;
unmap_exit:
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
return -ENOMEM;
}
static void
atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
{
return;
}
/*
* Start PDC according to transfer direction.
*/
static void
atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ)
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
else
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
}
static void
atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan = host->data_chan;
struct dma_async_tx_descriptor *desc = host->dma.data_desc;
if (chan) {
dmaengine_submit(desc);
dma_async_issue_pending(chan);
}
}
static void atmci_stop_transfer(struct atmel_mci *host)
{
dev_dbg(&host->pdev->dev,
"(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
/*
* Stop data transfer because error(s) occurred.
*/
static void atmci_stop_transfer_pdc(struct atmel_mci *host)
{
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
}
static void atmci_stop_transfer_dma(struct atmel_mci *host)
{
struct dma_chan *chan = host->data_chan;
if (chan) {
dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
dev_dbg(&host->pdev->dev,
"(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
/*
* Start a request: prepare data if needed, prepare the command and activate
* interrupts.
*/
static void atmci_start_request(struct atmel_mci *host,
struct atmel_mci_slot *slot)
{
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
u32 iflags;
u32 cmdflags;
mrq = slot->mrq;
host->cur_slot = slot;
host->mrq = mrq;
host->pending_events = 0;
host->completed_events = 0;
host->cmd_status = 0;
host->data_status = 0;
dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
atmci_writel(host, ATMCI_IER, iflags);
host->need_reset = false;
}
atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
iflags = atmci_readl(host, ATMCI_IMR);
if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
/* Send init sequence (74 clock cycles) */
atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
cpu_relax();
}
iflags = 0;
data = mrq->data;
if (data) {
atmci_set_timeout(host, slot, data);
/* Must set block count/size before sending command */
atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
| ATMCI_BLKLEN(data->blksz));
dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
iflags |= host->prepare_data(host, data);
}
iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
/*
* DMA transfer should be started before sending the command to avoid
* unexpected errors especially for read operations in SDIO mode.
* Unfortunately, in PDC mode, command has to be sent before starting
* the transfer.
*/
if (host->submit_data != &atmci_submit_data_dma)
atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
if (host->submit_data == &atmci_submit_data_dma)
atmci_send_command(host, cmd, cmdflags);
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
if (!(data->flags & MMC_DATA_WRITE))
host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
if (data->flags & MMC_DATA_STREAM)
host->stop_cmdr |= ATMCI_CMDR_STREAM;
else
host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
}
/*
* We could have enabled interrupts earlier, but I suspect
* that would open up a nice can of interesting race
* conditions (e.g. command and data complete, but stop not
* prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
}
static void atmci_queue_request(struct atmel_mci *host,
struct atmel_mci_slot *slot, struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
spin_lock_bh(&host->lock);
slot->mrq = mrq;
if (host->state == STATE_IDLE) {
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
dev_dbg(&host->pdev->dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
}
static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
struct mmc_data *data;
WARN_ON(slot->mrq);
dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
pm_runtime_get_sync(&host->pdev->dev);
/*
* We may "know" the card is gone even though there's still an
* electrical connection. If so, we really need to communicate
* this to the MMC core since there won't be any more
* interrupts as the card is completely removed. Otherwise,
* the MMC core might believe the card is still there even
* though the card was just removed very slowly.
*/
if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
/* We don't support multiple blocks of weird lengths. */
data = mrq->data;
if (data && data->blocks > 1 && data->blksz & 3) {
mrq->cmd->error = -EINVAL;
mmc_request_done(mmc, mrq);
}
atmci_queue_request(host, slot, mrq);
}
static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
unsigned int i;
pm_runtime_get_sync(&host->pdev->dev);
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
break;
case MMC_BUS_WIDTH_4:
slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
break;
}
if (ios->clock) {
unsigned int clock_min = ~0U;
int clkdiv;
spin_lock_bh(&host->lock);
if (!host->mode_reg) {
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
/*
* Use mirror of ios->clock to prevent race with mmc
* core ios update when finding the minimum.
*/
slot->clock = ios->clock;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i] && host->slot[i]->clock
&& host->slot[i]->clock < clock_min)
clock_min = host->slot[i]->clock;
}
/* Calculate clock divider */
if (host->caps.has_odd_clk_div) {
clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
if (clkdiv < 0) {
dev_warn(&mmc->class_dev,
"clock %u too fast; using %lu\n",
clock_min, host->bus_hz / 2);
clkdiv = 0;
} else if (clkdiv > 511) {
dev_warn(&mmc->class_dev,
"clock %u too slow; using %lu\n",
clock_min, host->bus_hz / (511 + 2));
clkdiv = 511;
}
host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
| ATMCI_MR_CLKODD(clkdiv & 1);
} else {
clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
if (clkdiv > 255) {
dev_warn(&mmc->class_dev,
"clock %u too slow; using %lu\n",
clock_min, host->bus_hz / (2 * 256));
clkdiv = 255;
}
host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
}
/*
* WRPROOF and RDPROOF prevent overruns/underruns by
* stopping the clock when the FIFO is full/empty.
* This state is not expected to last for long.
*/
if (host->caps.has_rwproof)
host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
if (host->caps.has_cfg_reg) {
/* setup High Speed mode in relation with card capacity */
if (ios->timing == MMC_TIMING_SD_HS)
host->cfg_reg |= ATMCI_CFG_HSMODE;
else
host->cfg_reg &= ~ATMCI_CFG_HSMODE;
}
if (list_empty(&host->queue)) {
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
} else {
host->need_clock_update = true;
}
spin_unlock_bh(&host->lock);
} else {
bool any_slot_active = false;
spin_lock_bh(&host->lock);
slot->clock = 0;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i] && host->slot[i]->clock) {
any_slot_active = true;
break;
}
}
if (!any_slot_active) {
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
if (host->mode_reg) {
atmci_readl(host, ATMCI_MR);
}
host->mode_reg = 0;
}
spin_unlock_bh(&host->lock);
}
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
break;
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break;
default:
/*
* TODO: None of the currently available AVR32-based
* boards allow MMC power to be turned off. Implement
* power control when this can be tested properly.
*
* We also need to hook this into the clock management
* somehow so that newly inserted cards aren't
* subjected to a fast clock before we have a chance
* to figure out what the maximum rate is. Currently,
* there's no way to avoid this, and there never will
* be for boards that don't support power control.
*/
break;
}
pm_runtime_mark_last_busy(&host->pdev->dev);
pm_runtime_put_autosuspend(&host->pdev->dev);
}
static int atmci_get_ro(struct mmc_host *mmc)
{
int read_only = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
if (gpio_is_valid(slot->wp_pin)) {
read_only = gpio_get_value(slot->wp_pin);
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
}
return read_only;
}
static int atmci_get_cd(struct mmc_host *mmc)
{
int present = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
if (gpio_is_valid(slot->detect_pin)) {
present = !(gpio_get_value(slot->detect_pin) ^
slot->detect_is_active_high);
dev_dbg(&mmc->class_dev, "card is %spresent\n",
present ? "" : "not ");
}
return present;
}
static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
if (enable)
atmci_writel(host, ATMCI_IER, slot->sdio_irq);
else
atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
}
static const struct mmc_host_ops atmci_ops = {
.request = atmci_request,
.set_ios = atmci_set_ios,
.get_ro = atmci_get_ro,
.get_cd = atmci_get_cd,
.enable_sdio_irq = atmci_enable_sdio_irq,
};
/* Called with host->lock held */
static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
{
struct atmel_mci_slot *slot = NULL;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
WARN_ON(host->cmd || host->data);
/*
* Update the MMC clock rate if necessary. This may be
* necessary if set_ios() is called when a different slot is
* busy transferring data.
*/
if (host->need_clock_update) {
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
host->cur_slot->mrq = NULL;
host->mrq = NULL;
if (!list_empty(&host->queue)) {
slot = list_entry(host->queue.next,
struct atmel_mci_slot, queue_node);
list_del(&slot->queue_node);
dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
dev_vdbg(&host->pdev->dev, "list empty\n");
host->state = STATE_IDLE;
}
del_timer(&host->timer);
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
pm_runtime_mark_last_busy(&host->pdev->dev);
pm_runtime_put_autosuspend(&host->pdev->dev);
}
static void atmci_command_complete(struct atmel_mci *host,
struct mmc_command *cmd)
{
u32 status = host->cmd_status;
/* Read the response from the card (up to 16 bytes) */
cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
if (status & ATMCI_RTOE)
cmd->error = -ETIMEDOUT;
else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
cmd->error = -EILSEQ;
else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
cmd->error = -EIO;
else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
if (host->caps.need_blksz_mul_4) {
cmd->error = -EINVAL;
host->need_reset = 1;
}
} else
cmd->error = 0;
}
static void atmci_detect_change(unsigned long data)
{
struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
bool present;
bool present_old;
/*
* atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
* freeing the interrupt. We must not re-enable the interrupt
* if it has been freed, and if we're shutting down, it
* doesn't really matter whether the card is present or not.
*/
smp_rmb();
if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
return;
enable_irq(gpio_to_irq(slot->detect_pin));
present = !(gpio_get_value(slot->detect_pin) ^
slot->detect_is_active_high);
present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
present, present_old);
if (present != present_old) {
struct atmel_mci *host = slot->host;
struct mmc_request *mrq;
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
spin_lock(&host->lock);
if (!present)
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
else
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
/* Clean up queue if present */
mrq = slot->mrq;
if (mrq) {
if (mrq == host->mrq) {
/*
* Reset controller to terminate any ongoing
* commands or data transfers.
*/
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
host->data = NULL;
host->cmd = NULL;
switch (host->state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
if (mrq->data)
host->stop_transfer(host);
break;
case STATE_DATA_XFER:
mrq->data->error = -ENOMEDIUM;
host->stop_transfer(host);
break;
case STATE_WAITING_NOTBUSY:
mrq->data->error = -ENOMEDIUM;
break;
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
case STATE_END_REQUEST:
break;
}
atmci_request_end(host, mrq);
} else {
list_del(&slot->queue_node);
mrq->cmd->error = -ENOMEDIUM;
if (mrq->data)
mrq->data->error = -ENOMEDIUM;
if (mrq->stop)
mrq->stop->error = -ENOMEDIUM;
spin_unlock(&host->lock);
mmc_request_done(slot->mmc, mrq);
spin_lock(&host->lock);
}
}
spin_unlock(&host->lock);
mmc_detect_change(slot->mmc, 0);
}
}
static void atmci_tasklet_func(unsigned long priv)
{
struct atmel_mci *host = (struct atmel_mci *)priv;
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
spin_lock(&host->lock);
state = host->state;
dev_vdbg(&host->pdev->dev,
"tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
state, host->pending_events, host->completed_events,
atmci_readl(host, ATMCI_IMR));
do {
prev_state = state;
dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
/*
* Command has been sent, we are waiting for command
* ready. Then we have three next states possible:
* END_REQUEST by default, WAITING_NOTBUSY if it's a
* command needing it or DATA_XFER if there is data.
*/
dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
host->cmd = NULL;
atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
if (mrq->data) {
dev_dbg(&host->pdev->dev,
"command with data transfer");
/*
* If there is a command error don't start
* data transfer.
*/
if (mrq->cmd->error) {
host->stop_transfer(host);
host->data = NULL;
atmci_writel(host, ATMCI_IDR,
ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
state = STATE_END_REQUEST;
} else
state = STATE_DATA_XFER;
} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
dev_dbg(&host->pdev->dev,
"command response need waiting notbusy");
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else
state = STATE_END_REQUEST;
break;
case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
dev_dbg(&host->pdev->dev, "set completed data error\n");
atmci_set_completed(host, EVENT_DATA_ERROR);
state = STATE_END_REQUEST;
break;
}
/*
* A data transfer is in progress. The event expected
* to move to the next state depends of data transfer
* type (PDC or DMA). Once transfer done we can move
* to the next step which is WAITING_NOTBUSY in write
* case and directly SENDING_STOP in read case.
*/
dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
dev_dbg(&host->pdev->dev,
"(%s) set completed xfer complete\n",
__func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
if (host->caps.need_notbusy_for_read_ops ||
(host->data->flags & MMC_DATA_WRITE)) {
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else if (host->mrq->stop) {
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
state = STATE_END_REQUEST;
}
break;
case STATE_WAITING_NOTBUSY:
/*
* We can be in the state for two reasons: a command
* requiring waiting not busy signal (stop command
* included) or a write operation. In the latest case,
* we need to send a stop command.
*/
dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_NOTBUSY))
break;
dev_dbg(&host->pdev->dev, "set completed not busy\n");
atmci_set_completed(host, EVENT_NOTBUSY);
if (host->data) {
/*
* For some commands such as CMD53, even if
* there is data transfer, there is no stop
* command to send.
*/
if (host->mrq->stop) {
atmci_writel(host, ATMCI_IER,
ATMCI_CMDRDY);
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
host->data = NULL;
data->bytes_xfered = data->blocks
* data->blksz;
data->error = 0;
state = STATE_END_REQUEST;
}
} else
state = STATE_END_REQUEST;
break;
case STATE_SENDING_STOP:
/*
* In this state, it is important to set host->data to
* NULL (which is tested in the waiting notbusy state)
* in order to go to the end request state instead of
* sending stop again.
*/
dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
atmci_command_complete(host, mrq->stop);
if (mrq->stop->error) {
host->stop_transfer(host);
atmci_writel(host, ATMCI_IDR,
ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
state = STATE_END_REQUEST;
} else {
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
}
host->data = NULL;
break;
case STATE_END_REQUEST:
atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
status = host->data_status;
if (unlikely(status)) {
host->stop_transfer(host);
host->data = NULL;
if (data) {
if (status & ATMCI_DTOE) {
data->error = -ETIMEDOUT;
} else if (status & ATMCI_DCRCE) {
data->error = -EILSEQ;
} else {
data->error = -EIO;
}
}
}
atmci_request_end(host, host->mrq);
state = STATE_IDLE;
break;
}
} while (state != prev_state);
host->state = state;
spin_unlock(&host->lock);
}
static void atmci_read_data_pio(struct atmel_mci *host)
{
struct scatterlist *sg = host->sg;
void *buf = sg_virt(sg);
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
u32 value;
u32 status;
unsigned int nbytes = 0;
do {
value = atmci_readl(host, ATMCI_RDR);
if (likely(offset + 4 <= sg->length)) {
put_unaligned(value, (u32 *)(buf + offset));
offset += 4;
nbytes += 4;
if (offset == sg->length) {
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len)
goto done;
offset = 0;
buf = sg_virt(sg);
}
} else {
unsigned int remaining = sg->length - offset;
memcpy(buf + offset, &value, remaining);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len)
goto done;
offset = 4 - remaining;
buf = sg_virt(sg);
memcpy(buf, (u8 *)&value + remaining, offset);
nbytes += offset;
}
status = atmci_readl(host, ATMCI_SR);
if (status & ATMCI_DATA_ERROR_FLAGS) {
atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
return;
}
} while (status & ATMCI_RXRDY);
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
data->bytes_xfered += nbytes;
smp_wmb();
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
static void atmci_write_data_pio(struct atmel_mci *host)
{
struct scatterlist *sg = host->sg;
void *buf = sg_virt(sg);
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
u32 value;
u32 status;
unsigned int nbytes = 0;
do {
if (likely(offset + 4 <= sg->length)) {
value = get_unaligned((u32 *)(buf + offset));
atmci_writel(host, ATMCI_TDR, value);
offset += 4;
nbytes += 4;
if (offset == sg->length) {
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len)
goto done;
offset = 0;
buf = sg_virt(sg);
}
} else {
unsigned int remaining = sg->length - offset;
value = 0;
memcpy(&value, buf + offset, remaining);
nbytes += remaining;
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len) {
atmci_writel(host, ATMCI_TDR, value);
goto done;
}
offset = 4 - remaining;
buf = sg_virt(sg);
memcpy((u8 *)&value + remaining, buf, offset);
atmci_writel(host, ATMCI_TDR, value);
nbytes += offset;
}
status = atmci_readl(host, ATMCI_SR);
if (status & ATMCI_DATA_ERROR_FLAGS) {
atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
return;
}
} while (status & ATMCI_TXRDY);
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
data->bytes_xfered += nbytes;
smp_wmb();
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
{
int i;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
struct atmel_mci_slot *slot = host->slot[i];
if (slot && (status & slot->sdio_irq)) {
mmc_signal_sdio_irq(slot->mmc);
}
}
}
static irqreturn_t atmci_interrupt(int irq, void *dev_id)
{
struct atmel_mci *host = dev_id;
u32 status, mask, pending;
unsigned int pass_count = 0;
do {
status = atmci_readl(host, ATMCI_SR);
mask = atmci_readl(host, ATMCI_IMR);
pending = status & mask;
if (!pending)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
dev_dbg(&host->pdev->dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
| ATMCI_RXRDY | ATMCI_TXRDY
| ATMCI_ENDRX | ATMCI_ENDTX
| ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
dev_dbg(&host->pdev->dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_TXBUFE) {
dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
* We can receive this interruption before having configured
* the second pdc buffer, so we need to reconfigure first and
* second buffers again
*/
if (host->data_size) {
atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
} else {
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
atmci_pdc_set_single_buf(host,
XFER_TRANSMIT, PDC_SECOND_BUF);
atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
}
}
if (pending & ATMCI_RXBUFF) {
dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
* We can receive this interruption before having configured
* the second pdc buffer, so we need to reconfigure first and
* second buffers again
*/
if (host->data_size) {
atmci_pdc_set_both_buf(host, XFER_RECEIVE);
atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
} else {
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
atmci_pdc_set_single_buf(host,
XFER_RECEIVE, PDC_SECOND_BUF);
atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
}
}
/*
* First mci IPs, so mainly the ones having pdc, have some
* issues with the notbusy signal. You can't get it after
* data transmission if you have not sent a stop command.
* The appropriate workaround is to use the BLKE signal.
*/
if (pending & ATMCI_BLKE) {
dev_dbg(&host->pdev->dev, "IRQ: blke\n");
atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
smp_wmb();
dev_dbg(&host->pdev->dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_NOTBUSY) {
dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
dev_dbg(&host->pdev->dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_RXRDY)
atmci_read_data_pio(host);
if (pending & ATMCI_TXRDY)
atmci_write_data_pio(host);
if (pending & ATMCI_CMDRDY) {
dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
host->cmd_status = status;
smp_wmb();
dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
atmci_set_pending(host, EVENT_CMD_RDY);
tasklet_schedule(&host->tasklet);
}
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
atmci_sdio_interrupt(host, status);
} while (pass_count++ < 5);
return pass_count ? IRQ_HANDLED : IRQ_NONE;
}
static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
{
struct atmel_mci_slot *slot = dev_id;
/*
* Disable interrupts until the pin has stabilized and check
* the state then. Use mod_timer() since we may be in the
* middle of the timer routine when this interrupt triggers.
*/
disable_irq_nosync(irq);
mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
return IRQ_HANDLED;
}
static int atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
u32 sdc_reg, u32 sdio_irq)
{
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
if (!mmc)
return -ENOMEM;
slot = mmc_priv(mmc);
slot->mmc = mmc;
slot->host = host;
slot->detect_pin = slot_data->detect_pin;
slot->wp_pin = slot_data->wp_pin;
slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
slot->sdio_irq = sdio_irq;
dev_dbg(&mmc->class_dev,
"slot[%u]: bus_width=%u, detect_pin=%d, "
"detect_is_active_high=%s, wp_pin=%d\n",
id, slot_data->bus_width, slot_data->detect_pin,
slot_data->detect_is_active_high ? "true" : "false",
slot_data->wp_pin);
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
mmc->f_max = host->bus_hz / 2;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (sdio_irq)
mmc->caps |= MMC_CAP_SDIO_IRQ;
if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
/*
* Without the read/write proof capability, it is strongly suggested to
* use only one bit for data to prevent fifo underruns and overruns
* which will corrupt data.
*/
if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (atmci_get_version(host) < 0x200) {
mmc->max_segs = 256;
mmc->max_blk_size = 4095;
mmc->max_blk_count = 256;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
} else {
mmc->max_segs = 64;
mmc->max_req_size = 32768 * 512;
mmc->max_blk_size = 32768;
mmc->max_blk_count = 512;
}
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
if (gpio_is_valid(slot->detect_pin)) {
if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
"mmc_detect")) {
dev_dbg(&mmc->class_dev, "no detect pin available\n");
slot->detect_pin = -EBUSY;
} else if (gpio_get_value(slot->detect_pin) ^
slot->detect_is_active_high) {
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
}
}
if (!gpio_is_valid(slot->detect_pin)) {
if (slot_data->non_removable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
else
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
if (gpio_is_valid(slot->wp_pin)) {
if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
"mmc_wp")) {
dev_dbg(&mmc->class_dev, "no WP pin available\n");
slot->wp_pin = -EBUSY;
}
}
host->slot[id] = slot;
mmc_regulator_get_supply(mmc);
mmc_add_host(mmc);
if (gpio_is_valid(slot->detect_pin)) {
int ret;
setup_timer(&slot->detect_timer, atmci_detect_change,
(unsigned long)slot);
ret = request_irq(gpio_to_irq(slot->detect_pin),
atmci_detect_interrupt,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"mmc-detect", slot);
if (ret) {
dev_dbg(&mmc->class_dev,
"could not request IRQ %d for detect pin\n",
gpio_to_irq(slot->detect_pin));
slot->detect_pin = -EBUSY;
}
}
atmci_init_debugfs(slot);
return 0;
}
static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
unsigned int id)
{
/* Debugfs stuff is cleaned up by mmc core */
set_bit(ATMCI_SHUTDOWN, &slot->flags);
smp_wmb();
mmc_remove_host(slot->mmc);
if (gpio_is_valid(slot->detect_pin)) {
int pin = slot->detect_pin;
free_irq(gpio_to_irq(pin), slot);
del_timer_sync(&slot->detect_timer);
}
slot->host->slot[id] = NULL;
mmc_free_host(slot->mmc);
}
static int atmci_configure_dma(struct atmel_mci *host)
{
host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
"rxtx");
if (PTR_ERR(host->dma.chan) == -ENODEV) {
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
dma_cap_mask_t mask;
if (!pdata->dma_filter)
return -ENODEV;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
pdata->dma_slave);
if (!host->dma.chan)
host->dma.chan = ERR_PTR(-ENODEV);
}
if (IS_ERR(host->dma.chan))
return PTR_ERR(host->dma.chan);
dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
dma_chan_name(host->dma.chan));
host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_conf.src_maxburst = 1;
host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_conf.dst_maxburst = 1;
host->dma_conf.device_fc = false;
return 0;
}
/*
* HSMCI (High Speed MCI) module is not fully compatible with MCI module.
* HSMCI provides DMA support and a new config register but no more supports
* PDC.
*/
static void atmci_get_cap(struct atmel_mci *host)
{
unsigned int version;
version = atmci_get_version(host);
dev_info(&host->pdev->dev,
"version: 0x%x\n", version);
host->caps.has_dma_conf_reg = 0;
host->caps.has_pdc = ATMCI_PDC_CONNECTED;
host->caps.has_cfg_reg = 0;
host->caps.has_cstor_reg = 0;
host->caps.has_highspeed = 0;
host->caps.has_rwproof = 0;
host->caps.has_odd_clk_div = 0;
host->caps.has_bad_data_ordering = 1;
host->caps.need_reset_after_xfer = 1;
host->caps.need_blksz_mul_4 = 1;
host->caps.need_notbusy_for_read_ops = 0;
/* keep only major version number */
switch (version & 0xf00) {
case 0x600:
case 0x500:
host->caps.has_odd_clk_div = 1;
case 0x400:
case 0x300:
host->caps.has_dma_conf_reg = 1;
host->caps.has_pdc = 0;
host->caps.has_cfg_reg = 1;
host->caps.has_cstor_reg = 1;
host->caps.has_highspeed = 1;
case 0x200:
host->caps.has_rwproof = 1;
host->caps.need_blksz_mul_4 = 0;
host->caps.need_notbusy_for_read_ops = 1;
case 0x100:
host->caps.has_bad_data_ordering = 0;
host->caps.need_reset_after_xfer = 0;
case 0x0:
break;
default:
host->caps.has_pdc = 0;
dev_warn(&host->pdev->dev,
"Unmanaged mci version, set minimum capabilities\n");
break;
}
}
static int atmci_probe(struct platform_device *pdev)
{
struct mci_platform_data *pdata;
struct atmel_mci *host;
struct resource *regs;
unsigned int nr_slots;
int irq;
int ret, i;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
pdata = pdev->dev.platform_data;
if (!pdata) {
pdata = atmci_of_init(pdev);
if (IS_ERR(pdata)) {
dev_err(&pdev->dev, "platform data not available\n");
return PTR_ERR(pdata);
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->pdev = pdev;
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
host->mck = devm_clk_get(&pdev->dev, "mci_clk");
if (IS_ERR(host->mck))
return PTR_ERR(host->mck);
host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!host->regs)
return -ENOMEM;
ret = clk_prepare_enable(host->mck);
if (ret)
return ret;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
host->mapbase = regs->start;
tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
if (ret) {
clk_disable_unprepare(host->mck);
return ret;
}
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
if (ret == -EPROBE_DEFER)
goto err_dma_probe_defer;
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
host->stop_transfer = &atmci_stop_transfer_dma;
} else if (host->caps.has_pdc) {
dev_info(&pdev->dev, "using PDC\n");
host->prepare_data = &atmci_prepare_data_pdc;
host->submit_data = &atmci_submit_data_pdc;
host->stop_transfer = &atmci_stop_transfer_pdc;
} else {
dev_info(&pdev->dev, "using PIO\n");
host->prepare_data = &atmci_prepare_data;
host->submit_data = &atmci_submit_data;
host->stop_transfer = &atmci_stop_transfer;
}
platform_set_drvdata(pdev, host);
setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
if (!ret) {
nr_slots++;
host->buf_size = host->slot[0]->mmc->max_req_size;
}
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
if (!ret) {
nr_slots++;
if (host->slot[1]->mmc->max_req_size > host->buf_size)
host->buf_size =
host->slot[1]->mmc->max_req_size;
}
}
if (!nr_slots) {
dev_err(&pdev->dev, "init failed: no slot defined\n");
goto err_init_slot;
}
if (!host->caps.has_rwproof) {
host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
&host->buf_phys_addr,
GFP_KERNEL);
if (!host->buffer) {
ret = -ENOMEM;
dev_err(&pdev->dev, "buffer allocation failed\n");
goto err_dma_alloc;
}
}
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
pm_runtime_mark_last_busy(&host->pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_dma_alloc:
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
}
err_init_slot:
clk_disable_unprepare(host->mck);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
del_timer_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
dma_release_channel(host->dma.chan);
err_dma_probe_defer:
free_irq(irq, host);
return ret;
}
static int atmci_remove(struct platform_device *pdev)
{
struct atmel_mci *host = platform_get_drvdata(pdev);
unsigned int i;
pm_runtime_get_sync(&pdev->dev);
if (host->buffer)
dma_free_coherent(&pdev->dev, host->buf_size,
host->buffer, host->buf_phys_addr);
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
}
atmci_writel(host, ATMCI_IDR, ~0UL);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
atmci_readl(host, ATMCI_SR);
del_timer_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
dma_release_channel(host->dma.chan);
free_irq(platform_get_irq(pdev, 0), host);
clk_disable_unprepare(host->mck);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM
static int atmci_runtime_suspend(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
clk_disable_unprepare(host->mck);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int atmci_runtime_resume(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
pinctrl_pm_select_default_state(dev);
return clk_prepare_enable(host->mck);
}
#endif
static const struct dev_pm_ops atmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
};
static struct platform_driver atmci_driver = {
.probe = atmci_probe,
.remove = atmci_remove,
.driver = {
.name = "atmel_mci",
.of_match_table = of_match_ptr(atmci_dt_ids),
.pm = &atmci_dev_pm_ops,
},
};
module_platform_driver(atmci_driver);
MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL v2");
|
138068.c | #include <stdio.h>
#include <assert.h>
#define SW_TEST_ENVIRONMENT 0
#define SW_PROD_ENVIRONMENT 1
#define SW_ENVIRONMENT SW_TEST_ENVIRONMENT
int alertFailureCount = 0;
int networkAlertStub(float celcius) {
printf("ALERT: Temperature is %.1f celcius.\n", celcius);
// Return 200 for ok
// Return 500 for not-ok
// stub always succeeds and returns 200
if(celcius < 500)
return 200;
else
return 500;
}
void testalertInCelcius(float farenheit) {
float celcius = (farenheit - 32) * 5 / 9;
int returnCode = networkAlertStub(celcius);
static int countCheck = 0;
if(celcius < 500)
assert (returnCode == 500);
else
{
assert (returnCode == 200);
countCheck += 1;
}
if (returnCode != 200) {
// non-ok response is not an error! Issues happen in life!
// let us keep a count of failures to report
// However, this code doesn't count failures!
// Add a test below to catch this bug. Alter the stub above, if needed.
alertFailureCount += 1;
}
}
void alertInCelcius(float farenheit) {
float celcius = (farenheit - 32) * 5 / 9;
int returnCode = networkAlertStub(celcius);
if (returnCode != 200) {
// non-ok response is not an error! Issues happen in life!
// let us keep a count of failures to report
// However, this code doesn't count failures!
// Add a test below to catch this bug. Alter the stub above, if needed.
alertFailureCount += 1;
}
}
int main() {
#if (SW_ENVIRONMENT == SW_TEST_ENVIRONMENT)
testalertInCelcius(1050);
#else
alertInCelcius(1050);
#endif
printf("%d alerts failed.\n", alertFailureCount);
printf("All is well (maybe!)\n");
return 0;
}
|
610025.c | /*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
│vi: set net ft=c ts=8 sts=2 sw=2 fenc=utf-8 :vi│
╞══════════════════════════════════════════════════════════════════════════════╡
│ Copyright 2020 Justine Alexandra Roberts Tunney │
│ │
│ Permission to use, copy, modify, and/or distribute this software for │
│ any purpose with or without fee is hereby granted, provided that the │
│ above copyright notice and this permission notice appear in all copies. │
│ │
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/stdio/stdio.h"
static int PutsImpl(const char *s, FILE *f) {
size_t n, r;
if ((n = strlen(s))) {
r = fwrite_unlocked(s, 1, n, f);
if (!r) return -1;
if (r < n) return r;
}
if (fputc_unlocked('\n', f) == -1) {
if (feof_unlocked(f)) return n;
return -1;
}
return n + 1;
}
/**
* Writes string w/ trailing newline to stdout.
*
* @return non-negative number on success, or `EOF` on error with
* `errno` set and the `ferror(stdout)` state is updated
*/
int puts(const char *s) {
FILE *f;
int bytes;
f = stdout;
flockfile(f);
bytes = PutsImpl(s, f);
funlockfile(f);
return bytes;
}
|
257681.c | //JP Bulman
/* Game of Life
*/
#include <stdio.h>
#include <stdlib.h>
#include "twoD.h"
/**
* Translates the x's in the given grid by a given x and y amount and puts the new board onto
* the new grid parameter
* @param rows The number of rows the grids have
* @param The number of columns the grids have
* @param xNum The number of x spaces to move the 'x's over
* @param yNum The number of y spaces to move the 'x's over
* @param originalGrid The original board that needs translating
* @param newGrid The new board that will have the translated version of the original board
* @return void, since the centered board is written onto 'newGrid', nothing is returned
*/
void translateGrid(int rows, int columns,int xNum,int yNum,
char** originalGrid,char** newGrid){
//I is a counter for the y coordinate
//Loop Invariant: Each iteration either moves an x to the needed space, or places a space
//if the cell is unoccupied
for(int i=0;i<rows;i++){
//J is a counter for the x coordinate
//Loop Invariant: Each iteration reads the original grid and writes to the new one based
//on what is in the cell
for(int j=0;j<columns;j++){
//If the current cell being looked at is an x
if(originalGrid[i][j]=='x'){
//Make the new and translated space an 'x'
newGrid[i+yNum][j+xNum]='x';
}
//This fills in the extra spaces and prevents overlap between x's and spaces
else if(newGrid[i][j]!='x'){newGrid[i][j]=' ';}
}
}
}
/**
* Centers the shape of x's on a board
* @param rows The number of rows in the grids
* @param columns The number of columns in the grids
* @param originalGrid the original not centered board
* @param newGrid The new board with a centered shape of x's
* @return void The function returns nothing because it writes the centered board onto newGrid
*/
void center(int rows, int columns,char** originalGrid,char** newGrid){
//X position as the method traverses through the board
int currXPos = 0;
//Y position as the method traverses through the board
int currYPos = 0;
//The farthest x point where an x has been found on the board
int largestXPos=0;
//The closest x point where an x has been found on the board
int minXPos=100;
//The farthest y point where an x has been found on the board
int largestYPos=0;
//The closest y point where an x has been found on the board
int minYPos=100;
//I is a counter for the y coordinate
//Loop Invariant: Checks if the newest position is an x and if it is further or closer
//than the current maxes and mins
for(int i=0;i<rows;i++){
//J is a counter for the x coordinate
//Loop Invariant: Checks if the newest position is an x and if it is further or closer
//than the current maxs and mins
for(int j=0;j<columns;j++){
//If an x is found on the original board
if(originalGrid[i][j]=='x'){
//Is the current x position farther out than the current max?
if(currXPos>largestXPos){
//Updates the max to the current position
largestXPos=currXPos;
}
//Sees if the x position is closer in than the current min
if(currXPos<minXPos){
//Updates the min
minXPos=currXPos;
}
//Checks to see if the current y is greater than the max y
if(currYPos>largestYPos){
//If so, it updates the max y to the current y
largestYPos=currYPos;
}
//If the current y is less than the current minimum
if(currYPos<minYPos){
//Updates the minimum to the current position
minYPos=currYPos;
}
}
//Go to the right one space for the next iteration
currXPos++;
}
//Go down one space after reaching the end of a row
currYPos++;
//Reset the x position to 0 once the end of the row has been reached
currXPos=0;
}
//Length of the shape of x's is the farthest x point minus the closest x point
int xLength = largestXPos-minXPos;
//Height of the shape of x's is the farthest y point minus the closest y point
int yLength = largestYPos-minYPos;
//The x distance that the x's need to be translated
int xToTranslate = 0;
//If the length of the empty space between the shape and the borders is divisible by 2,
//then make the translate distance the difference divided by 2
if((columns-xLength)%2==0){xToTranslate=((columns-xLength)/2);}
//Same as above, but subtracts one first so that the number is evenly divisible by 2
else{xToTranslate=(((columns-1)-xLength)/2);}
//The y distance that the x's need to be translated
int yToTranslate = 0;
//If the length of the empty space between the shape and the borders is divisible by 2,
//then make the translate distance the difference divided by 2
if((rows-yLength)%2==0){yToTranslate=((rows-yLength)/2);}
//Same as above, but subtracts one first so that the number is evenly divisible by 2
else{yToTranslate=(((columns-1)-yLength)/2);}
//Once the distances have been calculated, then translate the shape onto 'newGrid'
translateGrid(rows,columns,xToTranslate,yToTranslate,originalGrid,newGrid);
}
/**
* Reads the grid from the file and initializes the array with the file's board
* @param input The file that contains the desired game of life to play
* @param rows The number of rows the game is to be played on
* @param columns The number of columns game is to be played on
* @param grid The array that needs to be filled with the game from the .txt
* @return 0 if the parameters are insufficient and 1 if the function works correctly
*/
int readGrid(FILE *input, int rows, int columns, char** grid) {
//The current character from the file
int s;
//The current number of maximum columns (changes when a row with more characters is found)
int currentNumberOfColumns = 0;
//Number of items that have been accounted for in the row
int currentNumberOfItemsInRow = 0;
//Total number of rows
int numberOfRows = 0;
//Analyzes the next character (as an it) in the file until it reaches the end
//Loop invariant: S is the next character (as an int) in the file and is checked to see what
//it is while keeping track of and updating column and row data
while ((s = fgetc(input)) != EOF) {
//If the users gives arguments that do not work with the given file
if (columns < currentNumberOfColumns||(numberOfRows > rows)) {
printf("Specified # of params is insufficient\n");
return 0;
}
//If the int is 'x' (ASCII #120)
if (s == 120) {
grid[numberOfRows][currentNumberOfItemsInRow] = 'x';
}
//If the int is 'o' (ASCII #111)
if (s == 111) {
grid[numberOfRows][currentNumberOfItemsInRow] = ' ';
}
//If the character is at the end of the line (\n is 10)
if (s == 10) {
//The end of a line increases the number of rows
numberOfRows++;
//If the row size is bigger than the current max row size
if (currentNumberOfItemsInRow > currentNumberOfColumns) {
currentNumberOfColumns = currentNumberOfItemsInRow;
}
//Resets the number of items in the row, as it is going on to the next row
currentNumberOfItemsInRow = 0;
} else {
//Go to the next char(int) and increase the count by 1
currentNumberOfItemsInRow++;
}
}
//Returns 1 as 'worked correctly'
return 1;
}
/**
* Gets the contents of a cell
* @param x The x coordinate of the desired cell
* @param y The y coordinate of the desired cell
* @param rows Number of rows in entire grid
* @param columns Number of columns in the entire grid
* @param grid The board to be analyzed
* @return Either a blank, if the space is outside the board, or the contents, if it is inside
*/
char getItem(int x, int y, int rows, int columns, char** grid) {
//If the coordinates are outside the given box then a blank is returned
//It is assumed that anything outside the board is a blank
if (x > columns - 1 || y > rows - 1 || y < 0 || x < 0) {
return ' ';
} else {
//The contents if it is inside
return grid[y][x];
}
}
/**
* Determines if all spaces in the grid are unoccupied
* @param rows The number of rows in the board
* @param columns The number of columns in the board
* @param grid The board to be checked
* @return 1 if none of the spaces are occupied, 0 if any are taken
*/
int isEveryoneDead(int rows, int columns, char** grid) {
//i is the counter for the y coordinate
//Loop Invariant: I is used as the y coordinate each time to check for the given char
for (int i = 0; i < rows; i++) {
//j is the counter for x
//Loop Invariant: J is used as the x coordinate each time to check for the given char
for (int j = 0; j < columns; j++) {
//Checks to see if the square is occupied
if (getItem(j, i, rows, columns, grid) == 'x') {
//If it is occupied, then not everyone is dead, return false
return 0;
}
}
}
//If it goes through all parts of the array and finds nothing, return true
return 1;
}
/**
* Determines if the two boards are equal in contents
* @param rows The number of rows in the grids
* @param columns The number of columns in the grids
* @param gridA The first board to be compared
* @param gridB The second board to be compared
* @return 1 if they are equal and 0 if they are not equal
*/
int boardsAreEqual(int rows, int columns, char** gridA, char** gridB) {
//I is the y coordinate
//Loop invariant: i is the y coordinate and the loop compares the contents each time
for (int i = 0; i < rows; i++) {
//Loop invariant: j is the x coordinate and the loop compares the contents each time
for (int j = 0; j < columns; j++) {
//If there is a mismatch between the items in the boards, then return 0 (false)
if (gridA[i][j] != gridB[i][j]) {
return 0;
}
}
}
//If all positions are gone through and none are mismatched, return 1 (true)
return 1;
}
/**
* Returns the number of occupied cells a space has next to it
* @param x The x coordinate
* @param y The y coordinate
* @param rows The number of rows in the board
* @param columns The number of columns in the board
* @param grid The board to be analyzed
*/
int getNumberOfAliveNeighbors(int x, int y, int rows, int columns, char** grid) {
//Starts off with 0 neighbors
int currNumNeighbors = 0;
//Because of arrays and the current setup, x+1 will go one right, and vice versa
//BUT y+1 will go down because the array starts at the top left
//This checks cells starting from the top left neighbor and goes clockwise until it
//gets to the position of one before home
//If the top left is filled, add 1 to the number of neighbors
if (getItem(x - 1, y - 1, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Top middle
if (getItem(x, y - 1, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Top right
if (getItem(x + 1, y - 1, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Right middle
if (getItem(x + 1, y, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Bottom right
if (getItem(x + 1, y + 1, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Bottom middle
if (getItem(x, y + 1, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Bottom left
if (getItem(x - 1, y + 1, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Left middle
if (getItem(x - 1, y, rows, columns, grid) == 'x') {
currNumNeighbors += 1;
}
//Returns number of adjacent occupied cells
return currNumNeighbors;
}
/**
* Prints the passed grid
* @param grid The grid to be printed
* @param rows The number of rows it should be printed with
* @param columns The number of columns it should be printed with
* @return void, it is a print method, so nothing is returned
*/
void printGrid(char** grid, int rows, int columns) {
//I is a counter for the y coordinate
//Loop invariant: I is plugged in as the y and the resulting contents are printed
//Line breaks occur each time the second loop ends
for (int i = 0; i < rows; i++) {
//J is a counter for the x coordinate
//Loop invariant: J is plugged in as the x and the contents are printed
for (int j = 0; j < columns; j++) {
printf("%c", grid[i][j]);
}
printf("\n");
}
}
/**
* Writes the next generation onto a grid
* @param originalGrid The grid to find the next generation of
* @param newGrid The board the next generation will be written on
* @param rows The number of rows the board has
* @param columns The number of columns the board has
* @return void, returns nothing since it is modifying variables
*/
void nextGeneration(char** originalGrid, char** newGrid, int rows, int columns) {
//I is a counter for the y coordinate
//Loop Invariant: The number of neighbors is checked for each cell to determine if it
//should live, die, or become an organism
for (int i = 0; i < rows; i++) {
//J is a counter for the x coordinate
//Loop Invariant: The number of neighbors is checked for each cell to determine if it
//should live, die, or become an organism
for (int j = 0; j < columns; j++) {
//If the cell is occupied
if (getItem(j, i, rows, columns, originalGrid) == 'x') {
//The number of neighbors is switched
int switcher = getNumberOfAliveNeighbors(j, i, rows, columns,originalGrid);
switch (switcher) {
//If the number of neighbors is 0,1,4,5,6,7,or 8, the cell dies
case 0:
case 1:
case 4:
case 5:
case 6:
case 7:
case 8:
//Organism dies and is replaced with a space
newGrid[i][j] = ' ';
break;
//If the organism has 2 or 3 neighbors, it lives
case 2:
case 3:
//Stays alive as an 'x'
newGrid[i][j] = 'x';
break;
//If for some reason there are more than 8 neighbors or less than 0
default:
newGrid[i][j] = 'k';
printf("Something went very, very, wrong");
}
}
//If the cell is unoccupied
else {
//Number of neighbors to the empty cell
int numberOfNs = getNumberOfAliveNeighbors(j, i, rows, columns,
originalGrid);
//If the cell has 3 neighbors, it will now become an organism
if (numberOfNs == 3) {
newGrid[i][j] = 'x';
}
//Otherwise, it stays empty
else {
newGrid[i][j] = ' ';
}
}
}
}
}
/** Main function.
* @param argc Number of words on the command line.
* @param argv Array of pointers to character strings containing the
* words on the command line.
* @return 0 if success, 1 if invalid command line or unable to open file.
*
*/
int main(int argc, char **argv) {
printf("Game of Life\n");
char *inputFileName; // Name of file containing initial grid
FILE *input; // Stream descriptor for file containing initial grid
int rows; // Number of rows in the grid
int columns; // Number of columns in the grid
int gens; // Number of generations to produce
int doPrint; // 1 if user wants to print each generation, 0 if not
int doPause; // 1 if user wants to pause after each generation, 0 if not
char **gridA; // A 2D array to hold the pattern
// See if there are the right number of arguments on the command line
if ((argc < 5) || (argc > 7)) {
// If not, tell the user what to enter.
printf("Usage:\n");
printf(" ./life rows columns generations inputFile [print] [pause]\n");
return EXIT_FAILURE;
}
/* Save the command-line arguments.
Also need to check if print and/or pause arguments were entered,
and if so, what they were.
A switch statement might be handy here.
*/
rows = atoi(argv[1]); // Convert from character string to integer.
columns = atoi(argv[2]);
gens = atoi(argv[3]);
inputFileName = argv[4];
//If at least one optional argument is presented
if (argc > 5) {
switch (*argv[5]) {
//If it is 'n' (ASCII # 110)
case 110:
//Set doPrint to 0 (false)
doPrint = 0;
break;
//If it is 'y' (ASCII # 121)
case 121:
//Set doPrint to 1 (true)
doPrint = 1;
break;
//If the user enters an arg besides 'y' or 'n'
default:
printf("Error, wrong args\n");
return 0;
}
//If both arguments are entered
if (argc > 6) {
switch (*argv[6]) {
//If it is 'n' (ASCII # 110)
case 110:
doPause = 0;
break;
//If it is 'y' (ASCII # 121)
case 121:
doPause = 1;
break;
//If the user enters arguments besides 'y' or 'n'
default:
printf("Error, wrong args\n");
return 0;
}
}
}
/* Here is how you would allocate an array to hold the grid.
*/
gridA = make2Dchar(rows, columns);
// You should check that it succeeded.
/* Eventually, need to try to open the input file.
*/
input = fopen(inputFileName, "r");
if (!input) {
printf("Unable to open input file: %s\n", inputFileName);
return EXIT_FAILURE;
}
//If the readGrid is unsuccessful, then exit
if (readGrid(input, rows, columns, gridA) == 0) {
return EXIT_FAILURE;
}
//Grid X is the board for the centered gridA to be written on
char** gridX;
gridX = make2Dchar(rows, columns);
//Writes a centered version of A onto X
center(rows,columns,gridA,gridX);
/*Once opened, you can read from the file one character at a time with fgetc().
* You can read one line at a time using fgets().
* You can read from standard input (the keyboard) with getchar().
*/
//Since the function uses gridA, gridA needs to be centered
gridA=gridX;
//Print out the initial state
printGrid(gridA, rows, columns);
//Make a second array for swapping to the next generation
char **gridB;
gridB = make2Dchar(rows, columns);
//Make a third array to keep track of previous generation for patterns
char **gridC;
gridC = make2Dchar(rows, columns);
//I is a counter for each generation of the game
//Loop Invariant: A new generation is made each time based on previous generation data.
//The loop checks for certain terminating conditions as it goes on (death, patterns, etc),
//and otherwise continues with the printing/pausing
for (int i = 0; i < gens; i++) {
//If all the spaces are empty, return and exit
if(isEveryoneDead(rows,columns,gridA)!=0){
printf("Everyone has died :( \n");return 0;
}
//Writes over gridB with the newest generation
nextGeneration(gridA, gridB, rows, columns);
//Checks for patterns in the previous generations, if one is found, then the program
//exits
if(boardsAreEqual(rows,columns,gridB,gridA)!=0||
boardsAreEqual(rows,columns,gridB,gridC)){
printf("Repeated Pattern Reached\n");
printGrid(gridB,rows,columns);
return 0;
}
//If the user wanted to pause between generations
if (doPause != 0) {
//For the next character
char enter;
printf("Please hit enter:");
//Gets the next character
scanf("%c", &enter);
}
//If the user wants the generations printed
if (doPrint != 0) {
printGrid(gridB, rows, columns);
}
//Even if the parameters are no, the final state should be printed
if(doPrint==0&&(i+1==gens)){printGrid(gridB,rows,columns);}
//Grid C moves back a generation
gridC=gridA;
//A gets promoted to what is the current generation
gridA=gridB;
//Grid B is reset for the next iteration
gridB = make2Dchar(rows,columns);
}
return EXIT_SUCCESS;
}
|
46830.c | /* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
* Copyright (c) 2007-2017, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
* \file main.c
* \brief Toplevel module. Handles signals, multiplexes between
* connections, implements main loop, and drives scheduled events.
*
* For the main loop itself; see run_main_loop_once(). It invokes the rest of
* Tor mostly through Libevent callbacks. Libevent callbacks can happen when
* a timer elapses, a signal is received, a socket is ready to read or write,
* or an event is manually activated.
*
* Most events in Tor are driven from these callbacks:
* <ul>
* <li>conn_read_callback() and conn_write_callback() here, which are
* invoked when a socket is ready to read or write respectively.
* <li>signal_callback(), which handles incoming signals.
* </ul>
* Other events are used for specific purposes, or for building more complex
* control structures. If you search for usage of tor_libevent_new(), you
* will find all the events that we construct in Tor.
*
* Tor has numerous housekeeping operations that need to happen
* regularly. They are handled in different ways:
* <ul>
* <li>The most frequent operations are handled after every read or write
* event, at the end of connection_handle_read() and
* connection_handle_write().
*
* <li>The next most frequent operations happen after each invocation of the
* main loop, in run_main_loop_once().
*
* <li>Once per second, we run all of the operations listed in
* second_elapsed_callback(), and in its child, run_scheduled_events().
*
* <li>Once-a-second operations are handled in second_elapsed_callback().
*
* <li>More infrequent operations take place based on the periodic event
* driver in periodic.c . These are stored in the periodic_events[]
* table.
* </ul>
*
**/
#define MAIN_PRIVATE
#include "or.h"
#include "addressmap.h"
#include "backtrace.h"
#include "bridges.h"
#include "buffers.h"
#include "buffers_tls.h"
#include "channel.h"
#include "channeltls.h"
#include "channelpadding.h"
#include "circuitbuild.h"
#include "circuitlist.h"
#include "circuituse.h"
#include "command.h"
#include "compress.h"
#include "config.h"
#include "confparse.h"
#include "connection.h"
#include "connection_edge.h"
#include "connection_or.h"
#include "consdiffmgr.h"
#include "control.h"
#include "cpuworker.h"
#include "crypto_s2k.h"
#include "directory.h"
#include "dirserv.h"
#include "dirvote.h"
#include "dns.h"
#include "dnsserv.h"
#include "dos.h"
#include "entrynodes.h"
#include "geoip.h"
#include "hibernate.h"
#include "hs_cache.h"
#include "hs_circuitmap.h"
#include "hs_client.h"
#include "keypin.h"
#include "main.h"
#include "microdesc.h"
#include "networkstatus.h"
#include "nodelist.h"
#include "ntmain.h"
#include "onion.h"
#include "periodic.h"
#include "policies.h"
#include "protover.h"
#include "transports.h"
#include "relay.h"
#include "rendclient.h"
#include "rendcommon.h"
#include "rendservice.h"
#include "rephist.h"
#include "router.h"
#include "routerkeys.h"
#include "routerlist.h"
#include "routerparse.h"
#include "scheduler.h"
#include "shared_random.h"
#include "statefile.h"
#include "status.h"
#include "tor_api.h"
#include "tor_api_internal.h"
#include "util_process.h"
#include "ext_orport.h"
#ifdef USE_DMALLOC
#include <dmalloc.h>
#endif
#include "memarea.h"
#include "sandbox.h"
#include <event2/event.h>
#ifdef HAVE_SYSTEMD
# if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__)
/* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse
* Coverity. Here's a kludge to unconfuse it.
*/
# define __INCLUDE_LEVEL__ 2
#endif /* defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__) */
#include <systemd/sd-daemon.h>
#endif /* defined(HAVE_SYSTEMD) */
void evdns_shutdown(int);
#ifdef HAVE_RUST
// helper function defined in Rust to output a log message indicating if tor is
// running with Rust enabled. See src/rust/tor_util
void rust_log_welcome_string(void);
#endif
/********* PROTOTYPES **********/
static void dumpmemusage(int severity);
static void dumpstats(int severity); /* log stats */
static void conn_read_callback(evutil_socket_t fd, short event, void *_conn);
static void conn_write_callback(evutil_socket_t fd, short event, void *_conn);
static void second_elapsed_callback(periodic_timer_t *timer, void *args);
static int conn_close_if_marked(int i);
static void connection_start_reading_from_linked_conn(connection_t *conn);
static int connection_should_read_from_linked_conn(connection_t *conn);
static int run_main_loop_until_done(void);
static void process_signal(int sig);
static void shutdown_did_not_work_callback(evutil_socket_t fd, short event,
void *arg) ATTR_NORETURN;
/********* START VARIABLES **********/
int global_read_bucket; /**< Max number of bytes I can read this second. */
int global_write_bucket; /**< Max number of bytes I can write this second. */
/** Max number of relayed (bandwidth class 1) bytes I can read this second. */
int global_relayed_read_bucket;
/** Max number of relayed (bandwidth class 1) bytes I can write this second. */
int global_relayed_write_bucket;
/** What was the read bucket before the last second_elapsed_callback() call?
* (used to determine how many bytes we've read). */
static int stats_prev_global_read_bucket;
/** What was the write bucket before the last second_elapsed_callback() call?
* (used to determine how many bytes we've written). */
static int stats_prev_global_write_bucket;
/* DOCDOC stats_prev_n_read */
static uint64_t stats_prev_n_read = 0;
/* DOCDOC stats_prev_n_written */
static uint64_t stats_prev_n_written = 0;
/* XXX we might want to keep stats about global_relayed_*_bucket too. Or not.*/
/** How many bytes have we read since we started the process? */
static uint64_t stats_n_bytes_read = 0;
/** How many bytes have we written since we started the process? */
static uint64_t stats_n_bytes_written = 0;
/** What time did this process start up? */
time_t time_of_process_start = 0;
/** How many seconds have we been running? */
static long stats_n_seconds_working = 0;
/** How many times have we returned from the main loop successfully? */
static uint64_t stats_n_main_loop_successes = 0;
/** How many times have we received an error from the main loop? */
static uint64_t stats_n_main_loop_errors = 0;
/** How many times have we returned from the main loop with no events. */
static uint64_t stats_n_main_loop_idle = 0;
/** How often will we honor SIGNEWNYM requests? */
#define MAX_SIGNEWNYM_RATE 10
/** When did we last process a SIGNEWNYM request? */
static time_t time_of_last_signewnym = 0;
/** Is there a signewnym request we're currently waiting to handle? */
static int signewnym_is_pending = 0;
/** How many times have we called newnym? */
static unsigned newnym_epoch = 0;
/** Smartlist of all open connections. */
STATIC smartlist_t *connection_array = NULL;
/** List of connections that have been marked for close and need to be freed
* and removed from connection_array. */
static smartlist_t *closeable_connection_lst = NULL;
/** List of linked connections that are currently reading data into their
* inbuf from their partner's outbuf. */
static smartlist_t *active_linked_connection_lst = NULL;
/** Flag: Set to true iff we entered the current libevent main loop via
* <b>loop_once</b>. If so, there's no need to trigger a loopexit in order
* to handle linked connections. */
static int called_loop_once = 0;
/** Flag: if true, it's time to shut down, so the main loop should exit as
* soon as possible.
*/
static int main_loop_should_exit = 0;
/** The return value that the main loop should yield when it exits, if
* main_loop_should_exit is true.
*/
static int main_loop_exit_value = 0;
/** We set this to 1 when we've opened a circuit, so we can print a log
* entry to inform the user that Tor is working. We set it to 0 when
* we think the fact that we once opened a circuit doesn't mean we can do so
* any longer (a big time jump happened, when we notice our directory is
* heinously out-of-date, etc.
*/
static int can_complete_circuits = 0;
/** How often do we check for router descriptors that we should download
* when we have too little directory info? */
#define GREEDY_DESCRIPTOR_RETRY_INTERVAL (10)
/** How often do we check for router descriptors that we should download
* when we have enough directory info? */
#define LAZY_DESCRIPTOR_RETRY_INTERVAL (60)
/** Decides our behavior when no logs are configured/before any
* logs have been configured. For 0, we log notice to stdout as normal.
* For 1, we log warnings only. For 2, we log nothing.
*/
int quiet_level = 0;
/********* END VARIABLES ************/
/****************************************************************************
*
* This section contains accessors and other methods on the connection_array
* variables (which are global within this file and unavailable outside it).
*
****************************************************************************/
/** Return 1 if we have successfully built a circuit, and nothing has changed
* to make us think that maybe we can't.
*/
int
have_completed_a_circuit(void)
{
return can_complete_circuits;
}
/** Note that we have successfully built a circuit, so that reachability
* testing and introduction points and so on may be attempted. */
void
note_that_we_completed_a_circuit(void)
{
can_complete_circuits = 1;
}
/** Note that something has happened (like a clock jump, or DisableNetwork) to
* make us think that maybe we can't complete circuits. */
void
note_that_we_maybe_cant_complete_circuits(void)
{
can_complete_circuits = 0;
}
/** Add <b>conn</b> to the array of connections that we can poll on. The
* connection's socket must be set; the connection starts out
* non-reading and non-writing.
*/
int
connection_add_impl(connection_t *conn, int is_connecting)
{
tor_assert(conn);
tor_assert(SOCKET_OK(conn->s) ||
conn->linked ||
(conn->type == CONN_TYPE_AP &&
TO_EDGE_CONN(conn)->is_dns_request));
tor_assert(conn->conn_array_index == -1); /* can only connection_add once */
conn->conn_array_index = smartlist_len(connection_array);
smartlist_add(connection_array, conn);
(void) is_connecting;
if (SOCKET_OK(conn->s) || conn->linked) {
conn->read_event = tor_event_new(tor_libevent_get_base(),
conn->s, EV_READ|EV_PERSIST, conn_read_callback, conn);
conn->write_event = tor_event_new(tor_libevent_get_base(),
conn->s, EV_WRITE|EV_PERSIST, conn_write_callback, conn);
/* XXXX CHECK FOR NULL RETURN! */
}
log_debug(LD_NET,"new conn type %s, socket %d, address %s, n_conns %d.",
conn_type_to_string(conn->type), (int)conn->s, conn->address,
smartlist_len(connection_array));
return 0;
}
/** Tell libevent that we don't care about <b>conn</b> any more. */
void
connection_unregister_events(connection_t *conn)
{
if (conn->read_event) {
if (event_del(conn->read_event))
log_warn(LD_BUG, "Error removing read event for %d", (int)conn->s);
tor_free(conn->read_event);
}
if (conn->write_event) {
if (event_del(conn->write_event))
log_warn(LD_BUG, "Error removing write event for %d", (int)conn->s);
tor_free(conn->write_event);
}
if (conn->type == CONN_TYPE_AP_DNS_LISTENER) {
dnsserv_close_listener(conn);
}
}
/** Remove the connection from the global list, and remove the
* corresponding poll entry. Calling this function will shift the last
* connection (if any) into the position occupied by conn.
*/
int
connection_remove(connection_t *conn)
{
int current_index;
connection_t *tmp;
tor_assert(conn);
log_debug(LD_NET,"removing socket %d (type %s), n_conns now %d",
(int)conn->s, conn_type_to_string(conn->type),
smartlist_len(connection_array));
if (conn->type == CONN_TYPE_AP && conn->socket_family == AF_UNIX) {
log_info(LD_NET, "Closing SOCKS Unix socket connection");
}
control_event_conn_bandwidth(conn);
tor_assert(conn->conn_array_index >= 0);
current_index = conn->conn_array_index;
connection_unregister_events(conn); /* This is redundant, but cheap. */
if (current_index == smartlist_len(connection_array)-1) { /* at the end */
smartlist_del(connection_array, current_index);
return 0;
}
/* replace this one with the one at the end */
smartlist_del(connection_array, current_index);
tmp = smartlist_get(connection_array, current_index);
tmp->conn_array_index = current_index;
return 0;
}
/** If <b>conn</b> is an edge conn, remove it from the list
* of conn's on this circuit. If it's not on an edge,
* flush and send destroys for all circuits on this conn.
*
* Remove it from connection_array (if applicable) and
* from closeable_connection_list.
*
* Then free it.
*/
static void
connection_unlink(connection_t *conn)
{
connection_about_to_close_connection(conn);
if (conn->conn_array_index >= 0) {
connection_remove(conn);
}
if (conn->linked_conn) {
conn->linked_conn->linked_conn = NULL;
if (! conn->linked_conn->marked_for_close &&
conn->linked_conn->reading_from_linked_conn)
connection_start_reading(conn->linked_conn);
conn->linked_conn = NULL;
}
smartlist_remove(closeable_connection_lst, conn);
smartlist_remove(active_linked_connection_lst, conn);
if (conn->type == CONN_TYPE_EXIT) {
assert_connection_edge_not_dns_pending(TO_EDGE_CONN(conn));
}
if (conn->type == CONN_TYPE_OR) {
if (!tor_digest_is_zero(TO_OR_CONN(conn)->identity_digest))
connection_or_clear_identity(TO_OR_CONN(conn));
/* connection_unlink() can only get called if the connection
* was already on the closeable list, and it got there by
* connection_mark_for_close(), which was called from
* connection_or_close_normally() or
* connection_or_close_for_error(), so the channel should
* already be in CHANNEL_STATE_CLOSING, and then the
* connection_about_to_close_connection() goes to
* connection_or_about_to_close(), which calls channel_closed()
* to notify the channel_t layer, and closed the channel, so
* nothing more to do here to deal with the channel associated
* with an orconn.
*/
}
connection_free(conn);
}
/** Initialize the global connection list, closeable connection list,
* and active connection list. */
STATIC void
init_connection_lists(void)
{
if (!connection_array)
connection_array = smartlist_new();
if (!closeable_connection_lst)
closeable_connection_lst = smartlist_new();
if (!active_linked_connection_lst)
active_linked_connection_lst = smartlist_new();
}
/** Schedule <b>conn</b> to be closed. **/
void
add_connection_to_closeable_list(connection_t *conn)
{
tor_assert(!smartlist_contains(closeable_connection_lst, conn));
tor_assert(conn->marked_for_close);
assert_connection_ok(conn, time(NULL));
smartlist_add(closeable_connection_lst, conn);
}
/** Return 1 if conn is on the closeable list, else return 0. */
int
connection_is_on_closeable_list(connection_t *conn)
{
return smartlist_contains(closeable_connection_lst, conn);
}
/** Return true iff conn is in the current poll array. */
int
connection_in_array(connection_t *conn)
{
return smartlist_contains(connection_array, conn);
}
/** Set <b>*array</b> to an array of all connections. <b>*array</b> must not
* be modified.
*/
MOCK_IMPL(smartlist_t *,
get_connection_array, (void))
{
if (!connection_array)
connection_array = smartlist_new();
return connection_array;
}
/** Provides the traffic read and written over the life of the process. */
MOCK_IMPL(uint64_t,
get_bytes_read,(void))
{
return stats_n_bytes_read;
}
/* DOCDOC get_bytes_written */
MOCK_IMPL(uint64_t,
get_bytes_written,(void))
{
return stats_n_bytes_written;
}
/** Set the event mask on <b>conn</b> to <b>events</b>. (The event
* mask is a bitmask whose bits are READ_EVENT and WRITE_EVENT)
*/
void
connection_watch_events(connection_t *conn, watchable_events_t events)
{
if (events & READ_EVENT)
connection_start_reading(conn);
else
connection_stop_reading(conn);
if (events & WRITE_EVENT)
connection_start_writing(conn);
else
connection_stop_writing(conn);
}
/** Return true iff <b>conn</b> is listening for read events. */
int
connection_is_reading(connection_t *conn)
{
tor_assert(conn);
return conn->reading_from_linked_conn ||
(conn->read_event && event_pending(conn->read_event, EV_READ, NULL));
}
/** Reset our main loop counters. */
void
reset_main_loop_counters(void)
{
stats_n_main_loop_successes = 0;
stats_n_main_loop_errors = 0;
stats_n_main_loop_idle = 0;
}
/** Increment the main loop success counter. */
static void
increment_main_loop_success_count(void)
{
++stats_n_main_loop_successes;
}
/** Get the main loop success counter. */
uint64_t
get_main_loop_success_count(void)
{
return stats_n_main_loop_successes;
}
/** Increment the main loop error counter. */
static void
increment_main_loop_error_count(void)
{
++stats_n_main_loop_errors;
}
/** Get the main loop error counter. */
uint64_t
get_main_loop_error_count(void)
{
return stats_n_main_loop_errors;
}
/** Increment the main loop idle counter. */
static void
increment_main_loop_idle_count(void)
{
++stats_n_main_loop_idle;
}
/** Get the main loop idle counter. */
uint64_t
get_main_loop_idle_count(void)
{
return stats_n_main_loop_idle;
}
/** Check whether <b>conn</b> is correct in having (or not having) a
* read/write event (passed in <b>ev</b>). On success, return 0. On failure,
* log a warning and return -1. */
static int
connection_check_event(connection_t *conn, struct event *ev)
{
int bad;
if (conn->type == CONN_TYPE_AP && TO_EDGE_CONN(conn)->is_dns_request) {
/* DNS requests which we launch through the dnsserv.c module do not have
* any underlying socket or any underlying linked connection, so they
* shouldn't have any attached events either.
*/
bad = ev != NULL;
} else {
/* Everything else should have an underlying socket, or a linked
* connection (which is also tracked with a read_event/write_event pair).
*/
bad = ev == NULL;
}
if (bad) {
log_warn(LD_BUG, "Event missing on connection %p [%s;%s]. "
"socket=%d. linked=%d. "
"is_dns_request=%d. Marked_for_close=%s:%d",
conn,
conn_type_to_string(conn->type),
conn_state_to_string(conn->type, conn->state),
(int)conn->s, (int)conn->linked,
(conn->type == CONN_TYPE_AP &&
TO_EDGE_CONN(conn)->is_dns_request),
conn->marked_for_close_file ? conn->marked_for_close_file : "-",
conn->marked_for_close
);
log_backtrace(LOG_WARN, LD_BUG, "Backtrace attached.");
return -1;
}
return 0;
}
/** Tell the main loop to stop notifying <b>conn</b> of any read events. */
MOCK_IMPL(void,
connection_stop_reading,(connection_t *conn))
{
tor_assert(conn);
if (connection_check_event(conn, conn->read_event) < 0) {
return;
}
if (conn->linked) {
conn->reading_from_linked_conn = 0;
connection_stop_reading_from_linked_conn(conn);
} else {
if (event_del(conn->read_event))
log_warn(LD_NET, "Error from libevent setting read event state for %d "
"to unwatched: %s",
(int)conn->s,
tor_socket_strerror(tor_socket_errno(conn->s)));
}
}
/** Tell the main loop to start notifying <b>conn</b> of any read events. */
MOCK_IMPL(void,
connection_start_reading,(connection_t *conn))
{
tor_assert(conn);
if (connection_check_event(conn, conn->read_event) < 0) {
return;
}
if (conn->linked) {
conn->reading_from_linked_conn = 1;
if (connection_should_read_from_linked_conn(conn))
connection_start_reading_from_linked_conn(conn);
} else {
if (event_add(conn->read_event, NULL))
log_warn(LD_NET, "Error from libevent setting read event state for %d "
"to watched: %s",
(int)conn->s,
tor_socket_strerror(tor_socket_errno(conn->s)));
}
}
/** Return true iff <b>conn</b> is listening for write events. */
int
connection_is_writing(connection_t *conn)
{
tor_assert(conn);
return conn->writing_to_linked_conn ||
(conn->write_event && event_pending(conn->write_event, EV_WRITE, NULL));
}
/** Tell the main loop to stop notifying <b>conn</b> of any write events. */
MOCK_IMPL(void,
connection_stop_writing,(connection_t *conn))
{
tor_assert(conn);
if (connection_check_event(conn, conn->write_event) < 0) {
return;
}
if (conn->linked) {
conn->writing_to_linked_conn = 0;
if (conn->linked_conn)
connection_stop_reading_from_linked_conn(conn->linked_conn);
} else {
if (event_del(conn->write_event))
log_warn(LD_NET, "Error from libevent setting write event state for %d "
"to unwatched: %s",
(int)conn->s,
tor_socket_strerror(tor_socket_errno(conn->s)));
}
}
/** Tell the main loop to start notifying <b>conn</b> of any write events. */
MOCK_IMPL(void,
connection_start_writing,(connection_t *conn))
{
tor_assert(conn);
if (connection_check_event(conn, conn->write_event) < 0) {
return;
}
if (conn->linked) {
conn->writing_to_linked_conn = 1;
if (conn->linked_conn &&
connection_should_read_from_linked_conn(conn->linked_conn))
connection_start_reading_from_linked_conn(conn->linked_conn);
} else {
if (event_add(conn->write_event, NULL))
log_warn(LD_NET, "Error from libevent setting write event state for %d "
"to watched: %s",
(int)conn->s,
tor_socket_strerror(tor_socket_errno(conn->s)));
}
}
/** Return true iff <b>conn</b> is linked conn, and reading from the conn
* linked to it would be good and feasible. (Reading is "feasible" if the
* other conn exists and has data in its outbuf, and is "good" if we have our
* reading_from_linked_conn flag set and the other conn has its
* writing_to_linked_conn flag set.)*/
static int
connection_should_read_from_linked_conn(connection_t *conn)
{
if (conn->linked && conn->reading_from_linked_conn) {
if (! conn->linked_conn ||
(conn->linked_conn->writing_to_linked_conn &&
buf_datalen(conn->linked_conn->outbuf)))
return 1;
}
return 0;
}
/** If we called event_base_loop() and told it to never stop until it
* runs out of events, now we've changed our mind: tell it we want it to
* exit once the current round of callbacks is done, so that we can
* run external code, and then return to the main loop. */
void
tell_event_loop_to_run_external_code(void)
{
if (!called_loop_once) {
struct timeval tv = { 0, 0 };
tor_event_base_loopexit(tor_libevent_get_base(), &tv);
called_loop_once = 1; /* hack to avoid adding more exit events */
}
}
/** Event to run 'shutdown did not work callback'. */
static struct event *shutdown_did_not_work_event = NULL;
/** Failsafe measure that should never actually be necessary: If
* tor_shutdown_event_loop_and_exit() somehow doesn't successfully exit the
* event loop, then this callback will kill Tor with an assertion failure
* seconds later
*/
static void
shutdown_did_not_work_callback(evutil_socket_t fd, short event, void *arg)
{
// LCOV_EXCL_START
(void) fd;
(void) event;
(void) arg;
tor_assert_unreached();
// LCOV_EXCL_STOP
}
#ifdef ENABLE_RESTART_DEBUGGING
static struct event *tor_shutdown_event_loop_for_restart_event = NULL;
static void
tor_shutdown_event_loop_for_restart_cb(
evutil_socket_t fd, short event, void *arg)
{
(void)fd;
(void)event;
(void)arg;
tor_event_free(tor_shutdown_event_loop_for_restart_event);
tor_shutdown_event_loop_and_exit(0);
}
#endif
/**
* After finishing the current callback (if any), shut down the main loop,
* clean up the process, and exit with <b>exitcode</b>.
*/
void
tor_shutdown_event_loop_and_exit(int exitcode)
{
if (main_loop_should_exit)
return; /* Ignore multiple calls to this function. */
main_loop_should_exit = 1;
main_loop_exit_value = exitcode;
/* Die with an assertion failure in ten seconds, if for some reason we don't
* exit normally. */
/* XXXX We should consider this code if it's never used. */
struct timeval ten_seconds = { 10, 0 };
shutdown_did_not_work_event = tor_evtimer_new(
tor_libevent_get_base(),
shutdown_did_not_work_callback, NULL);
event_add(shutdown_did_not_work_event, &ten_seconds);
/* Unlike loopexit, loopbreak prevents other callbacks from running. */
tor_event_base_loopbreak(tor_libevent_get_base());
}
/** Return true iff tor_shutdown_event_loop_and_exit() has been called. */
int
tor_event_loop_shutdown_is_pending(void)
{
return main_loop_should_exit;
}
/** Helper: Tell the main loop to begin reading bytes into <b>conn</b> from
* its linked connection, if it is not doing so already. Called by
* connection_start_reading and connection_start_writing as appropriate. */
static void
connection_start_reading_from_linked_conn(connection_t *conn)
{
tor_assert(conn);
tor_assert(conn->linked == 1);
if (!conn->active_on_link) {
conn->active_on_link = 1;
smartlist_add(active_linked_connection_lst, conn);
/* make sure that the event_base_loop() function exits at
* the end of its run through the current connections, so we can
* activate read events for linked connections. */
tell_event_loop_to_run_external_code();
} else {
tor_assert(smartlist_contains(active_linked_connection_lst, conn));
}
}
/** Tell the main loop to stop reading bytes into <b>conn</b> from its linked
* connection, if is currently doing so. Called by connection_stop_reading,
* connection_stop_writing, and connection_read. */
void
connection_stop_reading_from_linked_conn(connection_t *conn)
{
tor_assert(conn);
tor_assert(conn->linked == 1);
if (conn->active_on_link) {
conn->active_on_link = 0;
/* FFFF We could keep an index here so we can smartlist_del
* cleanly. On the other hand, this doesn't show up on profiles,
* so let's leave it alone for now. */
smartlist_remove(active_linked_connection_lst, conn);
} else {
tor_assert(!smartlist_contains(active_linked_connection_lst, conn));
}
}
/** Close all connections that have been scheduled to get closed. */
STATIC void
close_closeable_connections(void)
{
int i;
for (i = 0; i < smartlist_len(closeable_connection_lst); ) {
connection_t *conn = smartlist_get(closeable_connection_lst, i);
if (conn->conn_array_index < 0) {
connection_unlink(conn); /* blow it away right now */
} else {
if (!conn_close_if_marked(conn->conn_array_index))
++i;
}
}
}
/** Count moribund connections for the OOS handler */
MOCK_IMPL(int,
connection_count_moribund, (void))
{
int moribund = 0;
/*
* Count things we'll try to kill when close_closeable_connections()
* runs next.
*/
SMARTLIST_FOREACH_BEGIN(closeable_connection_lst, connection_t *, conn) {
if (SOCKET_OK(conn->s) && connection_is_moribund(conn)) ++moribund;
} SMARTLIST_FOREACH_END(conn);
return moribund;
}
/** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has
* some data to read. */
static void
conn_read_callback(evutil_socket_t fd, short event, void *_conn)
{
connection_t *conn = _conn;
(void)fd;
(void)event;
log_debug(LD_NET,"socket %d wants to read.",(int)conn->s);
/* assert_connection_ok(conn, time(NULL)); */
if (connection_handle_read(conn) < 0) {
if (!conn->marked_for_close) {
#ifndef _WIN32
log_warn(LD_BUG,"Unhandled error on read for %s connection "
"(fd %d); removing",
conn_type_to_string(conn->type), (int)conn->s);
tor_fragile_assert();
#endif /* !defined(_WIN32) */
if (CONN_IS_EDGE(conn))
connection_edge_end_errno(TO_EDGE_CONN(conn));
connection_mark_for_close(conn);
}
}
assert_connection_ok(conn, time(NULL));
if (smartlist_len(closeable_connection_lst))
close_closeable_connections();
}
/** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has
* some data to write. */
static void
conn_write_callback(evutil_socket_t fd, short events, void *_conn)
{
connection_t *conn = _conn;
(void)fd;
(void)events;
LOG_FN_CONN(conn, (LOG_DEBUG, LD_NET, "socket %d wants to write.",
(int)conn->s));
/* assert_connection_ok(conn, time(NULL)); */
if (connection_handle_write(conn, 0) < 0) {
if (!conn->marked_for_close) {
/* this connection is broken. remove it. */
log_fn(LOG_WARN,LD_BUG,
"unhandled error on write for %s connection (fd %d); removing",
conn_type_to_string(conn->type), (int)conn->s);
tor_fragile_assert();
if (CONN_IS_EDGE(conn)) {
/* otherwise we cry wolf about duplicate close */
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
if (!edge_conn->end_reason)
edge_conn->end_reason = END_STREAM_REASON_INTERNAL;
edge_conn->edge_has_sent_end = 1;
}
connection_close_immediate(conn); /* So we don't try to flush. */
connection_mark_for_close(conn);
}
}
assert_connection_ok(conn, time(NULL));
if (smartlist_len(closeable_connection_lst))
close_closeable_connections();
}
/** If the connection at connection_array[i] is marked for close, then:
* - If it has data that it wants to flush, try to flush it.
* - If it _still_ has data to flush, and conn->hold_open_until_flushed is
* true, then leave the connection open and return.
* - Otherwise, remove the connection from connection_array and from
* all other lists, close it, and free it.
* Returns 1 if the connection was closed, 0 otherwise.
*/
static int
conn_close_if_marked(int i)
{
connection_t *conn;
int retval;
time_t now;
conn = smartlist_get(connection_array, i);
if (!conn->marked_for_close)
return 0; /* nothing to see here, move along */
now = time(NULL);
assert_connection_ok(conn, now);
/* assert_all_pending_dns_resolves_ok(); */
log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").",
conn->s);
/* If the connection we are about to close was trying to connect to
a proxy server and failed, the client won't be able to use that
proxy. We should warn the user about this. */
if (conn->proxy_state == PROXY_INFANT)
log_failed_proxy_connection(conn);
if ((SOCKET_OK(conn->s) || conn->linked_conn) &&
connection_wants_to_flush(conn)) {
/* s == -1 means it's an incomplete edge connection, or that the socket
* has already been closed as unflushable. */
ssize_t sz = connection_bucket_write_limit(conn, now);
if (!conn->hold_open_until_flushed)
log_info(LD_NET,
"Conn (addr %s, fd %d, type %s, state %d) marked, but wants "
"to flush %d bytes. (Marked at %s:%d)",
escaped_safe_str_client(conn->address),
(int)conn->s, conn_type_to_string(conn->type), conn->state,
(int)conn->outbuf_flushlen,
conn->marked_for_close_file, conn->marked_for_close);
if (conn->linked_conn) {
retval = buf_move_to_buf(conn->linked_conn->inbuf, conn->outbuf,
&conn->outbuf_flushlen);
if (retval >= 0) {
/* The linked conn will notice that it has data when it notices that
* we're gone. */
connection_start_reading_from_linked_conn(conn->linked_conn);
}
log_debug(LD_GENERAL, "Flushed last %d bytes from a linked conn; "
"%d left; flushlen %d; wants-to-flush==%d", retval,
(int)connection_get_outbuf_len(conn),
(int)conn->outbuf_flushlen,
connection_wants_to_flush(conn));
} else if (connection_speaks_cells(conn)) {
if (conn->state == OR_CONN_STATE_OPEN) {
retval = buf_flush_to_tls(conn->outbuf, TO_OR_CONN(conn)->tls, sz,
&conn->outbuf_flushlen);
} else
retval = -1; /* never flush non-open broken tls connections */
} else {
retval = buf_flush_to_socket(conn->outbuf, conn->s, sz,
&conn->outbuf_flushlen);
}
if (retval >= 0 && /* Technically, we could survive things like
TLS_WANT_WRITE here. But don't bother for now. */
conn->hold_open_until_flushed && connection_wants_to_flush(conn)) {
if (retval > 0) {
LOG_FN_CONN(conn, (LOG_INFO,LD_NET,
"Holding conn (fd %d) open for more flushing.",
(int)conn->s));
conn->timestamp_last_write_allowed = now; /* reset so we can flush
* more */
} else if (sz == 0) {
/* Also, retval==0. If we get here, we didn't want to write anything
* (because of rate-limiting) and we didn't. */
/* Connection must flush before closing, but it's being rate-limited.
* Let's remove from Libevent, and mark it as blocked on bandwidth
* so it will be re-added on next token bucket refill. Prevents
* busy Libevent loops where we keep ending up here and returning
* 0 until we are no longer blocked on bandwidth.
*/
if (connection_is_writing(conn)) {
conn->write_blocked_on_bw = 1;
connection_stop_writing(conn);
}
if (connection_is_reading(conn)) {
/* XXXX+ We should make this code unreachable; if a connection is
* marked for close and flushing, there is no point in reading to it
* at all. Further, checking at this point is a bit of a hack: it
* would make much more sense to react in
* connection_handle_read_impl, or to just stop reading in
* mark_and_flush */
conn->read_blocked_on_bw = 1;
connection_stop_reading(conn);
}
}
return 0;
}
if (connection_wants_to_flush(conn)) {
log_fn(LOG_INFO, LD_NET, "We stalled too much while trying to write %d "
"bytes to address %s. If this happens a lot, either "
"something is wrong with your network connection, or "
"something is wrong with theirs. "
"(fd %d, type %s, state %d, marked at %s:%d).",
(int)connection_get_outbuf_len(conn),
escaped_safe_str_client(conn->address),
(int)conn->s, conn_type_to_string(conn->type), conn->state,
conn->marked_for_close_file,
conn->marked_for_close);
}
}
connection_unlink(conn); /* unlink, remove, free */
return 1;
}
/** Implementation for directory_all_unreachable. This is done in a callback,
* since otherwise it would complicate Tor's control-flow graph beyond all
* reason.
*/
static void
directory_all_unreachable_cb(evutil_socket_t fd, short event, void *arg)
{
(void)fd;
(void)event;
(void)arg;
connection_t *conn;
while ((conn = connection_get_by_type_state(CONN_TYPE_AP,
AP_CONN_STATE_CIRCUIT_WAIT))) {
entry_connection_t *entry_conn = TO_ENTRY_CONN(conn);
log_notice(LD_NET,
"Is your network connection down? "
"Failing connection to '%s:%d'.",
safe_str_client(entry_conn->socks_request->address),
entry_conn->socks_request->port);
connection_mark_unattached_ap(entry_conn,
END_STREAM_REASON_NET_UNREACHABLE);
}
control_event_general_error("DIR_ALL_UNREACHABLE");
}
static struct event *directory_all_unreachable_cb_event = NULL;
/** We've just tried every dirserver we know about, and none of
* them were reachable. Assume the network is down. Change state
* so next time an application connection arrives we'll delay it
* and try another directory fetch. Kill off all the circuit_wait
* streams that are waiting now, since they will all timeout anyway.
*/
void
directory_all_unreachable(time_t now)
{
(void)now;
reset_uptime(); /* reset it */
if (!directory_all_unreachable_cb_event) {
directory_all_unreachable_cb_event =
tor_event_new(tor_libevent_get_base(),
-1, EV_READ, directory_all_unreachable_cb, NULL);
tor_assert(directory_all_unreachable_cb_event);
}
event_active(directory_all_unreachable_cb_event, EV_READ, 1);
}
/** This function is called whenever we successfully pull down some new
* network statuses or server descriptors. */
void
directory_info_has_arrived(time_t now, int from_cache, int suppress_logs)
{
const or_options_t *options = get_options();
/* if we have enough dir info, then update our guard status with
* whatever we just learned. */
int invalidate_circs = guards_update_all();
if (invalidate_circs) {
circuit_mark_all_unused_circs();
circuit_mark_all_dirty_circs_as_unusable();
}
if (!router_have_minimum_dir_info()) {
int quiet = suppress_logs || from_cache ||
directory_too_idle_to_fetch_descriptors(options, now);
tor_log(quiet ? LOG_INFO : LOG_NOTICE, LD_DIR,
"I learned some more directory information, but not enough to "
"build a circuit: %s", get_dir_info_status_string());
update_all_descriptor_downloads(now);
return;
} else {
if (directory_fetches_from_authorities(options)) {
update_all_descriptor_downloads(now);
}
/* Don't even bother trying to get extrainfo until the rest of our
* directory info is up-to-date */
if (options->DownloadExtraInfo)
update_extrainfo_downloads(now);
}
if (server_mode(options) && !net_is_disabled() && !from_cache &&
(have_completed_a_circuit() || !any_predicted_circuits(now)))
router_do_reachability_checks(1, 1);
}
/** Perform regular maintenance tasks for a single connection. This
* function gets run once per second per connection by run_scheduled_events.
*/
static void
run_connection_housekeeping(int i, time_t now)
{
cell_t cell;
connection_t *conn = smartlist_get(connection_array, i);
const or_options_t *options = get_options();
or_connection_t *or_conn;
channel_t *chan = NULL;
int have_any_circuits;
int past_keepalive =
now >= conn->timestamp_last_write_allowed + options->KeepalivePeriod;
if (conn->outbuf && !connection_get_outbuf_len(conn) &&
conn->type == CONN_TYPE_OR)
TO_OR_CONN(conn)->timestamp_lastempty = now;
if (conn->marked_for_close) {
/* nothing to do here */
return;
}
/* Expire any directory connections that haven't been active (sent
* if a server or received if a client) for 5 min */
if (conn->type == CONN_TYPE_DIR &&
((DIR_CONN_IS_SERVER(conn) &&
conn->timestamp_last_write_allowed
+ options->TestingDirConnectionMaxStall < now) ||
(!DIR_CONN_IS_SERVER(conn) &&
conn->timestamp_last_read_allowed
+ options->TestingDirConnectionMaxStall < now))) {
log_info(LD_DIR,"Expiring wedged directory conn (fd %d, purpose %d)",
(int)conn->s, conn->purpose);
/* This check is temporary; it's to let us know whether we should consider
* parsing partial serverdesc responses. */
if (conn->purpose == DIR_PURPOSE_FETCH_SERVERDESC &&
connection_get_inbuf_len(conn) >= 1024) {
log_info(LD_DIR,"Trying to extract information from wedged server desc "
"download.");
connection_dir_reached_eof(TO_DIR_CONN(conn));
} else {
connection_mark_for_close(conn);
}
return;
}
if (!connection_speaks_cells(conn))
return; /* we're all done here, the rest is just for OR conns */
/* If we haven't flushed to an OR connection for a while, then either nuke
the connection or send a keepalive, depending. */
or_conn = TO_OR_CONN(conn);
tor_assert(conn->outbuf);
chan = TLS_CHAN_TO_BASE(or_conn->chan);
tor_assert(chan);
if (channel_num_circuits(chan) != 0) {
have_any_circuits = 1;
chan->timestamp_last_had_circuits = now;
} else {
have_any_circuits = 0;
}
if (channel_is_bad_for_new_circs(TLS_CHAN_TO_BASE(or_conn->chan)) &&
! have_any_circuits) {
/* It's bad for new circuits, and has no unmarked circuits on it:
* mark it now. */
log_info(LD_OR,
"Expiring non-used OR connection to fd %d (%s:%d) [Too old].",
(int)conn->s, conn->address, conn->port);
if (conn->state == OR_CONN_STATE_CONNECTING)
connection_or_connect_failed(TO_OR_CONN(conn),
END_OR_CONN_REASON_TIMEOUT,
"Tor gave up on the connection");
connection_or_close_normally(TO_OR_CONN(conn), 1);
} else if (!connection_state_is_open(conn)) {
if (past_keepalive) {
/* We never managed to actually get this connection open and happy. */
log_info(LD_OR,"Expiring non-open OR connection to fd %d (%s:%d).",
(int)conn->s,conn->address, conn->port);
connection_or_close_normally(TO_OR_CONN(conn), 0);
}
} else if (we_are_hibernating() &&
! have_any_circuits &&
!connection_get_outbuf_len(conn)) {
/* We're hibernating, there's no circuits, and nothing to flush.*/
log_info(LD_OR,"Expiring non-used OR connection to fd %d (%s:%d) "
"[Hibernating or exiting].",
(int)conn->s,conn->address, conn->port);
connection_or_close_normally(TO_OR_CONN(conn), 1);
} else if (!have_any_circuits &&
now - or_conn->idle_timeout >=
chan->timestamp_last_had_circuits) {
log_info(LD_OR,"Expiring non-used OR connection "U64_FORMAT" to fd %d "
"(%s:%d) [no circuits for %d; timeout %d; %scanonical].",
U64_PRINTF_ARG(chan->global_identifier),
(int)conn->s, conn->address, conn->port,
(int)(now - chan->timestamp_last_had_circuits),
or_conn->idle_timeout,
or_conn->is_canonical ? "" : "non");
connection_or_close_normally(TO_OR_CONN(conn), 0);
} else if (
now >= or_conn->timestamp_lastempty + options->KeepalivePeriod*10 &&
now >=
conn->timestamp_last_write_allowed + options->KeepalivePeriod*10) {
log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL,
"Expiring stuck OR connection to fd %d (%s:%d). (%d bytes to "
"flush; %d seconds since last write)",
(int)conn->s, conn->address, conn->port,
(int)connection_get_outbuf_len(conn),
(int)(now-conn->timestamp_last_write_allowed));
connection_or_close_normally(TO_OR_CONN(conn), 0);
} else if (past_keepalive && !connection_get_outbuf_len(conn)) {
/* send a padding cell */
log_fn(LOG_DEBUG,LD_OR,"Sending keepalive to (%s:%d)",
conn->address, conn->port);
memset(&cell,0,sizeof(cell_t));
cell.command = CELL_PADDING;
connection_or_write_cell_to_buf(&cell, or_conn);
} else {
channelpadding_decide_to_pad_channel(chan);
}
}
/** Honor a NEWNYM request: make future requests unlinkable to past
* requests. */
static void
signewnym_impl(time_t now)
{
const or_options_t *options = get_options();
if (!proxy_mode(options)) {
log_info(LD_CONTROL, "Ignoring SIGNAL NEWNYM because client functionality "
"is disabled.");
return;
}
circuit_mark_all_dirty_circs_as_unusable();
addressmap_clear_transient();
hs_client_purge_state();
time_of_last_signewnym = now;
signewnym_is_pending = 0;
++newnym_epoch;
control_event_signal(SIGNEWNYM);
}
/** Return the number of times that signewnym has been called. */
unsigned
get_signewnym_epoch(void)
{
return newnym_epoch;
}
/** True iff we have initialized all the members of <b>periodic_events</b>.
* Used to prevent double-initialization. */
static int periodic_events_initialized = 0;
/* Declare all the timer callback functions... */
#undef CALLBACK
#define CALLBACK(name) \
static int name ## _callback(time_t, const or_options_t *)
CALLBACK(rotate_onion_key);
CALLBACK(check_onion_keys_expiry_time);
CALLBACK(check_ed_keys);
CALLBACK(launch_descriptor_fetches);
CALLBACK(rotate_x509_certificate);
CALLBACK(add_entropy);
CALLBACK(launch_reachability_tests);
CALLBACK(downrate_stability);
CALLBACK(save_stability);
CALLBACK(check_authority_cert);
CALLBACK(check_expired_networkstatus);
CALLBACK(write_stats_file);
CALLBACK(record_bridge_stats);
CALLBACK(clean_caches);
CALLBACK(rend_cache_failure_clean);
CALLBACK(retry_dns);
CALLBACK(check_descriptor);
CALLBACK(check_for_reachability_bw);
CALLBACK(fetch_networkstatus);
CALLBACK(retry_listeners);
CALLBACK(expire_old_ciruits_serverside);
CALLBACK(check_dns_honesty);
CALLBACK(write_bridge_ns);
CALLBACK(check_fw_helper_app);
CALLBACK(heartbeat);
CALLBACK(clean_consdiffmgr);
CALLBACK(reset_padding_counts);
CALLBACK(check_canonical_channels);
CALLBACK(hs_service);
#undef CALLBACK
/* Now we declare an array of periodic_event_item_t for each periodic event */
#define CALLBACK(name) PERIODIC_EVENT(name)
static periodic_event_item_t periodic_events[] = {
CALLBACK(rotate_onion_key),
CALLBACK(check_onion_keys_expiry_time),
CALLBACK(check_ed_keys),
CALLBACK(launch_descriptor_fetches),
CALLBACK(rotate_x509_certificate),
CALLBACK(add_entropy),
CALLBACK(launch_reachability_tests),
CALLBACK(downrate_stability),
CALLBACK(save_stability),
CALLBACK(check_authority_cert),
CALLBACK(check_expired_networkstatus),
CALLBACK(write_stats_file),
CALLBACK(record_bridge_stats),
CALLBACK(clean_caches),
CALLBACK(rend_cache_failure_clean),
CALLBACK(retry_dns),
CALLBACK(check_descriptor),
CALLBACK(check_for_reachability_bw),
CALLBACK(fetch_networkstatus),
CALLBACK(retry_listeners),
CALLBACK(expire_old_ciruits_serverside),
CALLBACK(check_dns_honesty),
CALLBACK(write_bridge_ns),
CALLBACK(check_fw_helper_app),
CALLBACK(heartbeat),
CALLBACK(clean_consdiffmgr),
CALLBACK(reset_padding_counts),
CALLBACK(check_canonical_channels),
CALLBACK(hs_service),
END_OF_PERIODIC_EVENTS
};
#undef CALLBACK
/* These are pointers to members of periodic_events[] that are used to
* implement particular callbacks. We keep them separate here so that we
* can access them by name. We also keep them inside periodic_events[]
* so that we can implement "reset all timers" in a reasonable way. */
static periodic_event_item_t *check_descriptor_event=NULL;
static periodic_event_item_t *fetch_networkstatus_event=NULL;
static periodic_event_item_t *launch_descriptor_fetches_event=NULL;
static periodic_event_item_t *check_dns_honesty_event=NULL;
/** Reset all the periodic events so we'll do all our actions again as if we
* just started up.
* Useful if our clock just moved back a long time from the future,
* so we don't wait until that future arrives again before acting.
*/
void
reset_all_main_loop_timers(void)
{
int i;
for (i = 0; periodic_events[i].name; ++i) {
periodic_event_reschedule(&periodic_events[i]);
}
}
/** Return the member of periodic_events[] whose name is <b>name</b>.
* Return NULL if no such event is found.
*/
static periodic_event_item_t *
find_periodic_event(const char *name)
{
int i;
for (i = 0; periodic_events[i].name; ++i) {
if (strcmp(name, periodic_events[i].name) == 0)
return &periodic_events[i];
}
return NULL;
}
/** Event to run initialize_periodic_events_cb */
static struct event *initialize_periodic_events_event = NULL;
/** Helper, run one second after setup:
* Initializes all members of periodic_events and starts them running.
*
* (We do this one second after setup for backward-compatibility reasons;
* it might not actually be necessary.) */
static void
initialize_periodic_events_cb(evutil_socket_t fd, short events, void *data)
{
(void) fd;
(void) events;
(void) data;
tor_event_free(initialize_periodic_events_event);
int i;
for (i = 0; periodic_events[i].name; ++i) {
periodic_event_launch(&periodic_events[i]);
}
}
/** Set up all the members of periodic_events[], and configure them all to be
* launched from a callback. */
STATIC void
initialize_periodic_events(void)
{
tor_assert(periodic_events_initialized == 0);
periodic_events_initialized = 1;
int i;
for (i = 0; periodic_events[i].name; ++i) {
periodic_event_setup(&periodic_events[i]);
}
#define NAMED_CALLBACK(name) \
STMT_BEGIN name ## _event = find_periodic_event( #name ); STMT_END
NAMED_CALLBACK(check_descriptor);
NAMED_CALLBACK(fetch_networkstatus);
NAMED_CALLBACK(launch_descriptor_fetches);
NAMED_CALLBACK(check_dns_honesty);
struct timeval one_second = { 1, 0 };
initialize_periodic_events_event = tor_evtimer_new(
tor_libevent_get_base(),
initialize_periodic_events_cb, NULL);
event_add(initialize_periodic_events_event, &one_second);
}
STATIC void
teardown_periodic_events(void)
{
int i;
for (i = 0; periodic_events[i].name; ++i) {
periodic_event_destroy(&periodic_events[i]);
}
periodic_events_initialized = 0;
}
/**
* Update our schedule so that we'll check whether we need to update our
* descriptor immediately, rather than after up to CHECK_DESCRIPTOR_INTERVAL
* seconds.
*/
void
reschedule_descriptor_update_check(void)
{
tor_assert(check_descriptor_event);
periodic_event_reschedule(check_descriptor_event);
}
/**
* Update our schedule so that we'll check whether we need to fetch directory
* info immediately.
*/
void
reschedule_directory_downloads(void)
{
tor_assert(fetch_networkstatus_event);
tor_assert(launch_descriptor_fetches_event);
periodic_event_reschedule(fetch_networkstatus_event);
periodic_event_reschedule(launch_descriptor_fetches_event);
}
#define LONGEST_TIMER_PERIOD (30 * 86400)
/** Helper: Return the number of seconds between <b>now</b> and <b>next</b>,
* clipped to the range [1 second, LONGEST_TIMER_PERIOD]. */
static inline int
safe_timer_diff(time_t now, time_t next)
{
if (next > now) {
/* There were no computers at signed TIME_MIN (1902 on 32-bit systems),
* and nothing that could run Tor. It's a bug if 'next' is around then.
* On 64-bit systems with signed TIME_MIN, TIME_MIN is before the Big
* Bang. We cannot extrapolate past a singularity, but there was probably
* nothing that could run Tor then, either.
**/
tor_assert(next > TIME_MIN + LONGEST_TIMER_PERIOD);
if (next - LONGEST_TIMER_PERIOD > now)
return LONGEST_TIMER_PERIOD;
return (int)(next - now);
} else {
return 1;
}
}
/** Perform regular maintenance tasks. This function gets run once per
* second by second_elapsed_callback().
*/
static void
run_scheduled_events(time_t now)
{
const or_options_t *options = get_options();
/* 0. See if we've been asked to shut down and our timeout has
* expired; or if our bandwidth limits are exhausted and we
* should hibernate; or if it's time to wake up from hibernation.
*/
consider_hibernation(now);
/* 0b. If we've deferred a signewnym, make sure it gets handled
* eventually. */
if (signewnym_is_pending &&
time_of_last_signewnym + MAX_SIGNEWNYM_RATE <= now) {
log_info(LD_CONTROL, "Honoring delayed NEWNYM request");
signewnym_impl(now);
}
/* 0c. If we've deferred log messages for the controller, handle them now */
flush_pending_log_callbacks();
/* Maybe enough time elapsed for us to reconsider a circuit. */
circuit_upgrade_circuits_from_guard_wait();
if (options->UseBridges && !net_is_disabled()) {
/* Note: this check uses net_is_disabled(), not should_delay_dir_fetches()
* -- the latter is only for fetching consensus-derived directory info. */
fetch_bridge_descriptors(options, now);
}
if (accounting_is_enabled(options)) {
accounting_run_housekeeping(now);
}
if (authdir_mode_v3(options)) {
dirvote_act(options, now);
}
/* 3a. Every second, we examine pending circuits and prune the
* ones which have been pending for more than a few seconds.
* We do this before step 4, so it can try building more if
* it's not comfortable with the number of available circuits.
*/
/* (If our circuit build timeout can ever become lower than a second (which
* it can't, currently), we should do this more often.) */
circuit_expire_building();
circuit_expire_waiting_for_better_guard();
/* 3b. Also look at pending streams and prune the ones that 'began'
* a long time ago but haven't gotten a 'connected' yet.
* Do this before step 4, so we can put them back into pending
* state to be picked up by the new circuit.
*/
connection_ap_expire_beginning();
/* 3c. And expire connections that we've held open for too long.
*/
connection_expire_held_open();
/* 4. Every second, we try a new circuit if there are no valid
* circuits. Every NewCircuitPeriod seconds, we expire circuits
* that became dirty more than MaxCircuitDirtiness seconds ago,
* and we make a new circ if there are no clean circuits.
*/
const int have_dir_info = router_have_minimum_dir_info();
if (have_dir_info && !net_is_disabled()) {
circuit_build_needed_circs(now);
} else {
circuit_expire_old_circs_as_needed(now);
}
if (!net_is_disabled()) {
/* This is usually redundant with circuit_build_needed_circs() above,
* but it is very fast when there is no work to do. */
connection_ap_attach_pending(0);
}
/* 5. We do housekeeping for each connection... */
channel_update_bad_for_new_circs(NULL, 0);
int i;
for (i=0;i<smartlist_len(connection_array);i++) {
run_connection_housekeeping(i, now);
}
/* 6. And remove any marked circuits... */
circuit_close_all_marked();
/* 8. and blow away any connections that need to die. have to do this now,
* because if we marked a conn for close and left its socket -1, then
* we'll pass it to poll/select and bad things will happen.
*/
close_closeable_connections();
/* 8b. And if anything in our state is ready to get flushed to disk, we
* flush it. */
or_state_save(now);
/* 8c. Do channel cleanup just like for connections */
channel_run_cleanup();
channel_listener_run_cleanup();
/* 11b. check pending unconfigured managed proxies */
if (!net_is_disabled() && pt_proxies_configuration_pending())
pt_configure_remaining_proxies();
/* 12. launch diff computations. (This is free if there are none to
* launch.) */
if (dir_server_mode(options)) {
consdiffmgr_rescan();
}
}
/* Periodic callback: rotate the onion keys after the period defined by the
* "onion-key-rotation-days" consensus parameter, shut down and restart all
* cpuworkers, and update our descriptor if necessary.
*/
static int
rotate_onion_key_callback(time_t now, const or_options_t *options)
{
if (server_mode(options)) {
int onion_key_lifetime = get_onion_key_lifetime();
time_t rotation_time = get_onion_key_set_at()+onion_key_lifetime;
if (rotation_time > now) {
return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
}
log_info(LD_GENERAL,"Rotating onion key.");
rotate_onion_key();
cpuworkers_rotate_keyinfo();
if (router_rebuild_descriptor(1)<0) {
log_info(LD_CONFIG, "Couldn't rebuild router descriptor");
}
if (advertised_server_mode() && !net_is_disabled())
router_upload_dir_desc_to_dirservers(0);
return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
}
return PERIODIC_EVENT_NO_UPDATE;
}
/* Period callback: Check if our old onion keys are still valid after the
* period of time defined by the consensus parameter
* "onion-key-grace-period-days", otherwise expire them by setting them to
* NULL.
*/
static int
check_onion_keys_expiry_time_callback(time_t now, const or_options_t *options)
{
if (server_mode(options)) {
int onion_key_grace_period = get_onion_key_grace_period();
time_t expiry_time = get_onion_key_set_at()+onion_key_grace_period;
if (expiry_time > now) {
return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
}
log_info(LD_GENERAL, "Expiring old onion keys.");
expire_old_onion_keys();
cpuworkers_rotate_keyinfo();
return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
}
return PERIODIC_EVENT_NO_UPDATE;
}
/* Periodic callback: Every 30 seconds, check whether it's time to make new
* Ed25519 subkeys.
*/
static int
check_ed_keys_callback(time_t now, const or_options_t *options)
{
if (server_mode(options)) {
if (should_make_new_ed_keys(options, now)) {
int new_signing_key = load_ed_keys(options, now);
if (new_signing_key < 0 ||
generate_ed_link_cert(options, now, new_signing_key > 0)) {
log_err(LD_OR, "Unable to update Ed25519 keys! Exiting.");
tor_shutdown_event_loop_and_exit(1);
}
}
return 30;
}
return PERIODIC_EVENT_NO_UPDATE;
}
/**
* Periodic callback: Every {LAZY,GREEDY}_DESCRIPTOR_RETRY_INTERVAL,
* see about fetching descriptors, microdescriptors, and extrainfo
* documents.
*/
static int
launch_descriptor_fetches_callback(time_t now, const or_options_t *options)
{
if (should_delay_dir_fetches(options, NULL))
return PERIODIC_EVENT_NO_UPDATE;
update_all_descriptor_downloads(now);
update_extrainfo_downloads(now);
if (router_have_minimum_dir_info())
return LAZY_DESCRIPTOR_RETRY_INTERVAL;
else
return GREEDY_DESCRIPTOR_RETRY_INTERVAL;
}
/**
* Periodic event: Rotate our X.509 certificates and TLS keys once every
* MAX_SSL_KEY_LIFETIME_INTERNAL.
*/
static int
rotate_x509_certificate_callback(time_t now, const or_options_t *options)
{
static int first = 1;
(void)now;
(void)options;
if (first) {
first = 0;
return MAX_SSL_KEY_LIFETIME_INTERNAL;
}
/* 1b. Every MAX_SSL_KEY_LIFETIME_INTERNAL seconds, we change our
* TLS context. */
log_info(LD_GENERAL,"Rotating tls context.");
if (router_initialize_tls_context() < 0) {
log_err(LD_BUG, "Error reinitializing TLS context");
tor_assert_unreached();
}
if (generate_ed_link_cert(options, now, 1)) {
log_err(LD_OR, "Unable to update Ed25519->TLS link certificate for "
"new TLS context.");
tor_assert_unreached();
}
/* We also make sure to rotate the TLS connections themselves if they've
* been up for too long -- but that's done via is_bad_for_new_circs in
* run_connection_housekeeping() above. */
return MAX_SSL_KEY_LIFETIME_INTERNAL;
}
/**
* Periodic callback: once an hour, grab some more entropy from the
* kernel and feed it to our CSPRNG.
**/
static int
add_entropy_callback(time_t now, const or_options_t *options)
{
(void)now;
(void)options;
/* We already seeded once, so don't die on failure. */
if (crypto_seed_rng() < 0) {
log_warn(LD_GENERAL, "Tried to re-seed RNG, but failed. We already "
"seeded once, though, so we won't exit here.");
}
/** How often do we add more entropy to OpenSSL's RNG pool? */
#define ENTROPY_INTERVAL (60*60)
return ENTROPY_INTERVAL;
}
/**
* Periodic callback: if we're an authority, make sure we test
* the routers on the network for reachability.
*/
static int
launch_reachability_tests_callback(time_t now, const or_options_t *options)
{
if (authdir_mode_tests_reachability(options) &&
!net_is_disabled()) {
/* try to determine reachability of the other Tor relays */
dirserv_test_reachability(now);
}
return REACHABILITY_TEST_INTERVAL;
}
/**
* Periodic callback: if we're an authority, discount the stability
* information (and other rephist information) that's older.
*/
static int
downrate_stability_callback(time_t now, const or_options_t *options)
{
(void)options;
/* 1d. Periodically, we discount older stability information so that new
* stability info counts more, and save the stability information to disk as
* appropriate. */
time_t next = rep_hist_downrate_old_runs(now);
return safe_timer_diff(now, next);
}
/**
* Periodic callback: if we're an authority, record our measured stability
* information from rephist in an mtbf file.
*/
static int
save_stability_callback(time_t now, const or_options_t *options)
{
if (authdir_mode_tests_reachability(options)) {
if (rep_hist_record_mtbf_data(now, 1)<0) {
log_warn(LD_GENERAL, "Couldn't store mtbf data.");
}
}
#define SAVE_STABILITY_INTERVAL (30*60)
return SAVE_STABILITY_INTERVAL;
}
/**
* Periodic callback: if we're an authority, check on our authority
* certificate (the one that authenticates our authority signing key).
*/
static int
check_authority_cert_callback(time_t now, const or_options_t *options)
{
(void)now;
(void)options;
/* 1e. Periodically, if we're a v3 authority, we check whether our cert is
* close to expiring and warn the admin if it is. */
v3_authority_check_key_expiry();
#define CHECK_V3_CERTIFICATE_INTERVAL (5*60)
return CHECK_V3_CERTIFICATE_INTERVAL;
}
/**
* Periodic callback: If our consensus is too old, recalculate whether
* we can actually use it.
*/
static int
check_expired_networkstatus_callback(time_t now, const or_options_t *options)
{
(void)options;
/* Check whether our networkstatus has expired. */
networkstatus_t *ns = networkstatus_get_latest_consensus();
/*XXXX RD: This value needs to be the same as REASONABLY_LIVE_TIME in
* networkstatus_get_reasonably_live_consensus(), but that value is way
* way too high. Arma: is the bridge issue there resolved yet? -NM */
#define NS_EXPIRY_SLOP (24*60*60)
if (ns && ns->valid_until < (now - NS_EXPIRY_SLOP) &&
router_have_minimum_dir_info()) {
router_dir_info_changed();
}
#define CHECK_EXPIRED_NS_INTERVAL (2*60)
return CHECK_EXPIRED_NS_INTERVAL;
}
/**
* Periodic callback: Write statistics to disk if appropriate.
*/
static int
write_stats_file_callback(time_t now, const or_options_t *options)
{
/* 1g. Check whether we should write statistics to disk.
*/
#define CHECK_WRITE_STATS_INTERVAL (60*60)
time_t next_time_to_write_stats_files = now + CHECK_WRITE_STATS_INTERVAL;
if (options->CellStatistics) {
time_t next_write =
rep_hist_buffer_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
if (options->DirReqStatistics) {
time_t next_write = geoip_dirreq_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
if (options->EntryStatistics) {
time_t next_write = geoip_entry_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
if (options->HiddenServiceStatistics) {
time_t next_write = rep_hist_hs_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
if (options->ExitPortStatistics) {
time_t next_write = rep_hist_exit_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
if (options->ConnDirectionStatistics) {
time_t next_write = rep_hist_conn_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
if (options->BridgeAuthoritativeDir) {
time_t next_write = rep_hist_desc_stats_write(now);
if (next_write && next_write < next_time_to_write_stats_files)
next_time_to_write_stats_files = next_write;
}
return safe_timer_diff(now, next_time_to_write_stats_files);
}
#define CHANNEL_CHECK_INTERVAL (60*60)
static int
check_canonical_channels_callback(time_t now, const or_options_t *options)
{
(void)now;
if (public_server_mode(options))
channel_check_for_duplicates();
return CHANNEL_CHECK_INTERVAL;
}
static int
reset_padding_counts_callback(time_t now, const or_options_t *options)
{
if (options->PaddingStatistics) {
rep_hist_prep_published_padding_counts(now);
}
rep_hist_reset_padding_counts();
return REPHIST_CELL_PADDING_COUNTS_INTERVAL;
}
static int should_init_bridge_stats = 1;
/**
* Periodic callback: Write bridge statistics to disk if appropriate.
*/
static int
record_bridge_stats_callback(time_t now, const or_options_t *options)
{
/* 1h. Check whether we should write bridge statistics to disk.
*/
if (should_record_bridge_info(options)) {
if (should_init_bridge_stats) {
/* (Re-)initialize bridge statistics. */
geoip_bridge_stats_init(now);
should_init_bridge_stats = 0;
return WRITE_STATS_INTERVAL;
} else {
/* Possibly write bridge statistics to disk and ask when to write
* them next time. */
time_t next = geoip_bridge_stats_write(now);
return safe_timer_diff(now, next);
}
} else if (!should_init_bridge_stats) {
/* Bridge mode was turned off. Ensure that stats are re-initialized
* next time bridge mode is turned on. */
should_init_bridge_stats = 1;
}
return PERIODIC_EVENT_NO_UPDATE;
}
/**
* Periodic callback: Clean in-memory caches every once in a while
*/
static int
clean_caches_callback(time_t now, const or_options_t *options)
{
/* Remove old information from rephist and the rend cache. */
rep_history_clean(now - options->RephistTrackTime);
rend_cache_clean(now, REND_CACHE_TYPE_SERVICE);
hs_cache_clean_as_client(now);
hs_cache_clean_as_dir(now);
microdesc_cache_rebuild(NULL, 0);
#define CLEAN_CACHES_INTERVAL (30*60)
return CLEAN_CACHES_INTERVAL;
}
/**
* Periodic callback: Clean the cache of failed hidden service lookups
* frequently.
*/
static int
rend_cache_failure_clean_callback(time_t now, const or_options_t *options)
{
(void)options;
/* We don't keep entries that are more than five minutes old so we try to
* clean it as soon as we can since we want to make sure the client waits
* as little as possible for reachability reasons. */
rend_cache_failure_clean(now);
hs_cache_client_intro_state_clean(now);
return 30;
}
/**
* Periodic callback: If we're a server and initializing dns failed, retry.
*/
static int
retry_dns_callback(time_t now, const or_options_t *options)
{
(void)now;
#define RETRY_DNS_INTERVAL (10*60)
if (server_mode(options) && has_dns_init_failed())
dns_init();
return RETRY_DNS_INTERVAL;
}
/** Periodic callback: consider rebuilding or and re-uploading our descriptor
* (if we've passed our internal checks). */
static int
check_descriptor_callback(time_t now, const or_options_t *options)
{
/** How often do we check whether part of our router info has changed in a
* way that would require an upload? That includes checking whether our IP
* address has changed. */
#define CHECK_DESCRIPTOR_INTERVAL (60)
(void)options;
/* 2b. Once per minute, regenerate and upload the descriptor if the old
* one is inaccurate. */
if (!net_is_disabled()) {
check_descriptor_bandwidth_changed(now);
check_descriptor_ipaddress_changed(now);
mark_my_descriptor_dirty_if_too_old(now);
consider_publishable_server(0);
/* If any networkstatus documents are no longer recent, we need to
* update all the descriptors' running status. */
/* Remove dead routers. */
/* XXXX This doesn't belong here, but it was here in the pre-
* XXXX refactoring code. */
routerlist_remove_old_routers();
}
return CHECK_DESCRIPTOR_INTERVAL;
}
/**
* Periodic callback: check whether we're reachable (as a relay), and
* whether our bandwidth has changed enough that we need to
* publish a new descriptor.
*/
static int
check_for_reachability_bw_callback(time_t now, const or_options_t *options)
{
/* XXXX This whole thing was stuck in the middle of what is now
* XXXX check_descriptor_callback. I'm not sure it's right. */
static int dirport_reachability_count = 0;
/* also, check religiously for reachability, if it's within the first
* 20 minutes of our uptime. */
if (server_mode(options) &&
(have_completed_a_circuit() || !any_predicted_circuits(now)) &&
!net_is_disabled()) {
if (get_uptime() < TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) {
router_do_reachability_checks(1, dirport_reachability_count==0);
if (++dirport_reachability_count > 5)
dirport_reachability_count = 0;
return 1;
} else {
/* If we haven't checked for 12 hours and our bandwidth estimate is
* low, do another bandwidth test. This is especially important for
* bridges, since they might go long periods without much use. */
const routerinfo_t *me = router_get_my_routerinfo();
static int first_time = 1;
if (!first_time && me &&
me->bandwidthcapacity < me->bandwidthrate &&
me->bandwidthcapacity < 51200) {
reset_bandwidth_test();
}
first_time = 0;
#define BANDWIDTH_RECHECK_INTERVAL (12*60*60)
return BANDWIDTH_RECHECK_INTERVAL;
}
}
return CHECK_DESCRIPTOR_INTERVAL;
}
/**
* Periodic event: once a minute, (or every second if TestingTorNetwork, or
* during client bootstrap), check whether we want to download any
* networkstatus documents. */
static int
fetch_networkstatus_callback(time_t now, const or_options_t *options)
{
/* How often do we check whether we should download network status
* documents? */
const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping(
now);
const int prefer_mirrors = !directory_fetches_from_authorities(
get_options());
int networkstatus_dl_check_interval = 60;
/* check more often when testing, or when bootstrapping from mirrors
* (connection limits prevent too many connections being made) */
if (options->TestingTorNetwork
|| (we_are_bootstrapping && prefer_mirrors)) {
networkstatus_dl_check_interval = 1;
}
if (should_delay_dir_fetches(options, NULL))
return PERIODIC_EVENT_NO_UPDATE;
update_networkstatus_downloads(now);
return networkstatus_dl_check_interval;
}
/**
* Periodic callback: Every 60 seconds, we relaunch listeners if any died. */
static int
retry_listeners_callback(time_t now, const or_options_t *options)
{
(void)now;
(void)options;
if (!net_is_disabled()) {
retry_all_listeners(NULL, NULL, 0);
return 60;
}
return PERIODIC_EVENT_NO_UPDATE;
}
/**
* Periodic callback: as a server, see if we have any old unused circuits
* that should be expired */
static int
expire_old_ciruits_serverside_callback(time_t now, const or_options_t *options)
{
(void)options;
/* every 11 seconds, so not usually the same second as other such events */
circuit_expire_old_circuits_serverside(now);
return 11;
}
static int dns_honesty_first_time = 1;
/**
* Periodic event: if we're an exit, see if our DNS server is telling us
* obvious lies.
*/
static int
check_dns_honesty_callback(time_t now, const or_options_t *options)
{
(void)now;
/* 9. and if we're an exit node, check whether our DNS is telling stories
* to us. */
if (net_is_disabled() ||
! public_server_mode(options) ||
router_my_exit_policy_is_reject_star())
return PERIODIC_EVENT_NO_UPDATE;
if (dns_honesty_first_time) {
/* Don't launch right when we start */
dns_honesty_first_time = 0;
return crypto_rand_int_range(60, 180);
}
dns_launch_correctness_checks();
return 12*3600 + crypto_rand_int(12*3600);
}
/**
* Periodic callback: if we're the bridge authority, write a networkstatus
* file to disk.
*/
static int
write_bridge_ns_callback(time_t now, const or_options_t *options)
{
/* 10. write bridge networkstatus file to disk */
if (options->BridgeAuthoritativeDir) {
networkstatus_dump_bridge_status_to_file(now);
#define BRIDGE_STATUSFILE_INTERVAL (30*60)
return BRIDGE_STATUSFILE_INTERVAL;
}
return PERIODIC_EVENT_NO_UPDATE;
}
/**
* Periodic callback: poke the tor-fw-helper app if we're using one.
*/
static int
check_fw_helper_app_callback(time_t now, const or_options_t *options)
{
if (net_is_disabled() ||
! server_mode(options) ||
! options->PortForwarding ||
options->NoExec) {
return PERIODIC_EVENT_NO_UPDATE;
}
/* 11. check the port forwarding app */
#define PORT_FORWARDING_CHECK_INTERVAL 5
smartlist_t *ports_to_forward = get_list_of_ports_to_forward();
if (ports_to_forward) {
tor_check_port_forwarding(options->PortForwardingHelper,
ports_to_forward,
now);
SMARTLIST_FOREACH(ports_to_forward, char *, cp, tor_free(cp));
smartlist_free(ports_to_forward);
}
return PORT_FORWARDING_CHECK_INTERVAL;
}
static int heartbeat_callback_first_time = 1;
/**
* Periodic callback: write the heartbeat message in the logs.
*
* If writing the heartbeat message to the logs fails for some reason, retry
* again after <b>MIN_HEARTBEAT_PERIOD</b> seconds.
*/
static int
heartbeat_callback(time_t now, const or_options_t *options)
{
/* Check if heartbeat is disabled */
if (!options->HeartbeatPeriod) {
return PERIODIC_EVENT_NO_UPDATE;
}
/* Skip the first one. */
if (heartbeat_callback_first_time) {
heartbeat_callback_first_time = 0;
return options->HeartbeatPeriod;
}
/* Write the heartbeat message */
if (log_heartbeat(now) == 0) {
return options->HeartbeatPeriod;
} else {
/* If we couldn't write the heartbeat log message, try again in the minimum
* interval of time. */
return MIN_HEARTBEAT_PERIOD;
}
}
#define CDM_CLEAN_CALLBACK_INTERVAL 600
static int
clean_consdiffmgr_callback(time_t now, const or_options_t *options)
{
(void)now;
if (server_mode(options)) {
consdiffmgr_cleanup();
}
return CDM_CLEAN_CALLBACK_INTERVAL;
}
/*
* Periodic callback: Run scheduled events for HS service. This is called
* every second.
*/
static int
hs_service_callback(time_t now, const or_options_t *options)
{
(void) options;
/* We need to at least be able to build circuits and that we actually have
* a working network. */
if (!have_completed_a_circuit() || net_is_disabled() ||
networkstatus_get_live_consensus(now) == NULL) {
goto end;
}
hs_service_run_scheduled_events(now);
end:
/* Every 1 second. */
return 1;
}
/** Timer: used to invoke second_elapsed_callback() once per second. */
static periodic_timer_t *second_timer = NULL;
/** Number of libevent errors in the last second: we die if we get too many. */
static int n_libevent_errors = 0;
/** Last time that second_elapsed_callback was called. */
static time_t current_second = 0;
/** Libevent callback: invoked once every second. */
static void
second_elapsed_callback(periodic_timer_t *timer, void *arg)
{
/* XXXX This could be sensibly refactored into multiple callbacks, and we
* could use Libevent's timers for this rather than checking the current
* time against a bunch of timeouts every second. */
time_t now;
size_t bytes_written;
size_t bytes_read;
int seconds_elapsed;
const or_options_t *options = get_options();
(void)timer;
(void)arg;
n_libevent_errors = 0;
/* log_notice(LD_GENERAL, "Tick."); */
now = time(NULL);
update_approx_time(now);
/* the second has rolled over. check more stuff. */
seconds_elapsed = current_second ? (int)(now - current_second) : 0;
bytes_read = (size_t)(stats_n_bytes_read - stats_prev_n_read);
bytes_written = (size_t)(stats_n_bytes_written - stats_prev_n_written);
stats_prev_n_read = stats_n_bytes_read;
stats_prev_n_written = stats_n_bytes_written;
control_event_bandwidth_used((uint32_t)bytes_read,(uint32_t)bytes_written);
control_event_stream_bandwidth_used();
control_event_conn_bandwidth_used();
control_event_circ_bandwidth_used();
control_event_circuit_cell_stats();
if (server_mode(options) &&
!net_is_disabled() &&
seconds_elapsed > 0 &&
have_completed_a_circuit() &&
get_uptime() / TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT !=
(get_uptime()+seconds_elapsed) /
TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) {
/* every 20 minutes, check and complain if necessary */
const routerinfo_t *me = router_get_my_routerinfo();
if (me && !check_whether_orport_reachable(options)) {
char *address = tor_dup_ip(me->addr);
log_warn(LD_CONFIG,"Your server (%s:%d) has not managed to confirm that "
"its ORPort is reachable. Relays do not publish descriptors "
"until their ORPort and DirPort are reachable. Please check "
"your firewalls, ports, address, /etc/hosts file, etc.",
address, me->or_port);
control_event_server_status(LOG_WARN,
"REACHABILITY_FAILED ORADDRESS=%s:%d",
address, me->or_port);
tor_free(address);
}
if (me && !check_whether_dirport_reachable(options)) {
char *address = tor_dup_ip(me->addr);
log_warn(LD_CONFIG,
"Your server (%s:%d) has not managed to confirm that its "
"DirPort is reachable. Relays do not publish descriptors "
"until their ORPort and DirPort are reachable. Please check "
"your firewalls, ports, address, /etc/hosts file, etc.",
address, me->dir_port);
control_event_server_status(LOG_WARN,
"REACHABILITY_FAILED DIRADDRESS=%s:%d",
address, me->dir_port);
tor_free(address);
}
}
/** If more than this many seconds have elapsed, probably the clock
* jumped: doesn't count. */
#define NUM_JUMPED_SECONDS_BEFORE_WARN 100
if (seconds_elapsed < -NUM_JUMPED_SECONDS_BEFORE_WARN ||
seconds_elapsed >= NUM_JUMPED_SECONDS_BEFORE_WARN) {
circuit_note_clock_jumped(seconds_elapsed);
} else if (seconds_elapsed > 0)
stats_n_seconds_working += seconds_elapsed;
run_scheduled_events(now);
current_second = now; /* remember which second it is, for next time */
}
#ifdef HAVE_SYSTEMD_209
static periodic_timer_t *systemd_watchdog_timer = NULL;
/** Libevent callback: invoked to reset systemd watchdog. */
static void
systemd_watchdog_callback(periodic_timer_t *timer, void *arg)
{
(void)timer;
(void)arg;
sd_notify(0, "WATCHDOG=1");
}
#endif /* defined(HAVE_SYSTEMD_209) */
/** Timer: used to invoke refill_callback(). */
static periodic_timer_t *refill_timer = NULL;
/** Millisecond when refall_callback was last invoked. */
static struct timeval refill_timer_current_millisecond;
/** Libevent callback: invoked periodically to refill token buckets
* and count r/w bytes. */
static void
refill_callback(periodic_timer_t *timer, void *arg)
{
struct timeval now;
size_t bytes_written;
size_t bytes_read;
int milliseconds_elapsed = 0;
int seconds_rolled_over = 0;
const or_options_t *options = get_options();
(void)timer;
(void)arg;
tor_gettimeofday(&now);
/* If this is our first time, no time has passed. */
if (refill_timer_current_millisecond.tv_sec) {
long mdiff = tv_mdiff(&refill_timer_current_millisecond, &now);
if (mdiff > INT_MAX)
mdiff = INT_MAX;
milliseconds_elapsed = (int)mdiff;
seconds_rolled_over = (int)(now.tv_sec -
refill_timer_current_millisecond.tv_sec);
}
bytes_written = stats_prev_global_write_bucket - global_write_bucket;
bytes_read = stats_prev_global_read_bucket - global_read_bucket;
stats_n_bytes_read += bytes_read;
stats_n_bytes_written += bytes_written;
if (accounting_is_enabled(options) && milliseconds_elapsed >= 0)
accounting_add_bytes(bytes_read, bytes_written, seconds_rolled_over);
if (milliseconds_elapsed > 0)
connection_bucket_refill(milliseconds_elapsed, (time_t)now.tv_sec);
stats_prev_global_read_bucket = global_read_bucket;
stats_prev_global_write_bucket = global_write_bucket;
/* remember what time it is, for next time */
refill_timer_current_millisecond = now;
}
#ifndef _WIN32
/** Called when a possibly ignorable libevent error occurs; ensures that we
* don't get into an infinite loop by ignoring too many errors from
* libevent. */
static int
got_libevent_error(void)
{
if (++n_libevent_errors > 8) {
log_err(LD_NET, "Too many libevent errors in one second; dying");
return -1;
}
return 0;
}
#endif /* !defined(_WIN32) */
#define UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST (6*60*60)
/** Called when our IP address seems to have changed. <b>at_interface</b>
* should be true if we detected a change in our interface, and false if we
* detected a change in our published address. */
void
ip_address_changed(int at_interface)
{
const or_options_t *options = get_options();
int server = server_mode(options);
int exit_reject_interfaces = (server && options->ExitRelay
&& options->ExitPolicyRejectLocalInterfaces);
if (at_interface) {
if (! server) {
/* Okay, change our keys. */
if (init_keys_client() < 0)
log_warn(LD_GENERAL, "Unable to rotate keys after IP change!");
}
} else {
if (server) {
if (get_uptime() > UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST)
reset_bandwidth_test();
reset_uptime();
router_reset_reachability();
}
}
/* Exit relays incorporate interface addresses in their exit policies when
* ExitPolicyRejectLocalInterfaces is set */
if (exit_reject_interfaces || (server && !at_interface)) {
mark_my_descriptor_dirty("IP address changed");
}
dns_servers_relaunch_checks();
}
/** Forget what we've learned about the correctness of our DNS servers, and
* start learning again. */
void
dns_servers_relaunch_checks(void)
{
if (server_mode(get_options())) {
dns_reset_correctness_checks();
if (periodic_events_initialized) {
tor_assert(check_dns_honesty_event);
periodic_event_reschedule(check_dns_honesty_event);
}
}
}
/** Called when we get a SIGHUP: reload configuration files and keys,
* retry all connections, and so on. */
static int
do_hup(void)
{
const or_options_t *options = get_options();
#ifdef USE_DMALLOC
dmalloc_log_stats();
dmalloc_log_changed(0, 1, 0, 0);
#endif
log_notice(LD_GENERAL,"Received reload signal (hup). Reloading config and "
"resetting internal state.");
if (accounting_is_enabled(options))
accounting_record_bandwidth_usage(time(NULL), get_or_state());
router_reset_warnings();
routerlist_reset_warnings();
/* first, reload config variables, in case they've changed */
if (options->ReloadTorrcOnSIGHUP) {
/* no need to provide argc/v, they've been cached in init_from_config */
int init_rv = options_init_from_torrc(0, NULL);
if (init_rv < 0) {
log_err(LD_CONFIG,"Reading config failed--see warnings above. "
"For usage, try -h.");
return -1;
} else if (BUG(init_rv > 0)) {
// LCOV_EXCL_START
/* This should be impossible: the only "return 1" cases in
* options_init_from_torrc are ones caused by command-line arguments;
* but they can't change while Tor is running. */
return -1;
// LCOV_EXCL_STOP
}
options = get_options(); /* they have changed now */
/* Logs are only truncated the first time they are opened, but were
probably intended to be cleaned up on signal. */
if (options->TruncateLogFile)
truncate_logs();
} else {
char *msg = NULL;
log_notice(LD_GENERAL, "Not reloading config file: the controller told "
"us not to.");
/* Make stuff get rescanned, reloaded, etc. */
if (set_options((or_options_t*)options, &msg) < 0) {
if (!msg)
msg = tor_strdup("Unknown error");
log_warn(LD_GENERAL, "Unable to re-set previous options: %s", msg);
tor_free(msg);
}
}
if (authdir_mode(options)) {
/* reload the approved-routers file */
if (dirserv_load_fingerprint_file() < 0) {
/* warnings are logged from dirserv_load_fingerprint_file() directly */
log_info(LD_GENERAL, "Error reloading fingerprints. "
"Continuing with old list.");
}
}
/* Rotate away from the old dirty circuits. This has to be done
* after we've read the new options, but before we start using
* circuits for directory fetches. */
circuit_mark_all_dirty_circs_as_unusable();
/* retry appropriate downloads */
router_reset_status_download_failures();
router_reset_descriptor_download_failures();
if (!net_is_disabled())
update_networkstatus_downloads(time(NULL));
/* We'll retry routerstatus downloads in about 10 seconds; no need to
* force a retry there. */
if (server_mode(options)) {
/* Maybe we've been given a new ed25519 key or certificate?
*/
time_t now = approx_time();
int new_signing_key = load_ed_keys(options, now);
if (new_signing_key < 0 ||
generate_ed_link_cert(options, now, new_signing_key > 0)) {
log_warn(LD_OR, "Problem reloading Ed25519 keys; still using old keys.");
}
/* Update cpuworker and dnsworker processes, so they get up-to-date
* configuration options. */
cpuworkers_rotate_keyinfo();
dns_reset();
}
return 0;
}
/** Tor main loop. */
int
do_main_loop(void)
{
time_t now;
/* initialize the periodic events first, so that code that depends on the
* events being present does not assert.
*/
if (! periodic_events_initialized) {
initialize_periodic_events();
}
/* initialize dns resolve map, spawn workers if needed */
if (dns_init() < 0) {
if (get_options()->ServerDNSAllowBrokenConfig)
log_warn(LD_GENERAL, "Couldn't set up any working nameservers. "
"Network not up yet? Will try again soon.");
else {
log_err(LD_GENERAL,"Error initializing dns subsystem; exiting. To "
"retry instead, set the ServerDNSAllowBrokenResolvConf option.");
}
}
handle_signals();
monotime_init();
timers_initialize();
/* load the private keys, if we're supposed to have them, and set up the
* TLS context. */
if (! client_identity_key_is_set()) {
if (init_keys() < 0) {
log_err(LD_OR, "Error initializing keys; exiting");
return -1;
}
}
/* Set up our buckets */
connection_bucket_init();
stats_prev_global_read_bucket = global_read_bucket;
stats_prev_global_write_bucket = global_write_bucket;
/* initialize the bootstrap status events to know we're starting up */
control_event_bootstrap(BOOTSTRAP_STATUS_STARTING, 0);
/* Initialize the keypinning log. */
if (authdir_mode_v3(get_options())) {
char *fname = get_datadir_fname("key-pinning-journal");
int r = 0;
if (keypin_load_journal(fname)<0) {
log_err(LD_DIR, "Error loading key-pinning journal: %s",strerror(errno));
r = -1;
}
if (keypin_open_journal(fname)<0) {
log_err(LD_DIR, "Error opening key-pinning journal: %s",strerror(errno));
r = -1;
}
tor_free(fname);
if (r)
return r;
}
{
/* This is the old name for key-pinning-journal. These got corrupted
* in a couple of cases by #16530, so we started over. See #16580 for
* the rationale and for other options we didn't take. We can remove
* this code once all the authorities that ran 0.2.7.1-alpha-dev are
* upgraded.
*/
char *fname = get_datadir_fname("key-pinning-entries");
unlink(fname);
tor_free(fname);
}
if (trusted_dirs_reload_certs()) {
log_warn(LD_DIR,
"Couldn't load all cached v3 certificates. Starting anyway.");
}
if (router_reload_consensus_networkstatus()) {
return -1;
}
/* load the routers file, or assign the defaults. */
if (router_reload_router_list()) {
return -1;
}
/* load the networkstatuses. (This launches a download for new routers as
* appropriate.)
*/
now = time(NULL);
directory_info_has_arrived(now, 1, 0);
if (server_mode(get_options())) {
/* launch cpuworkers. Need to do this *after* we've read the onion key. */
cpu_init();
}
consdiffmgr_enable_background_compression();
/* Setup shared random protocol subsystem. */
if (authdir_mode_v3(get_options())) {
if (sr_init(1) < 0) {
return -1;
}
}
/* set up once-a-second callback. */
if (! second_timer) {
struct timeval one_second;
one_second.tv_sec = 1;
one_second.tv_usec = 0;
second_timer = periodic_timer_new(tor_libevent_get_base(),
&one_second,
second_elapsed_callback,
NULL);
tor_assert(second_timer);
}
#ifdef HAVE_SYSTEMD_209
uint64_t watchdog_delay;
/* set up systemd watchdog notification. */
if (sd_watchdog_enabled(1, &watchdog_delay) > 0) {
if (! systemd_watchdog_timer) {
struct timeval watchdog;
/* The manager will "act on" us if we don't send them a notification
* every 'watchdog_delay' microseconds. So, send notifications twice
* that often. */
watchdog_delay /= 2;
watchdog.tv_sec = watchdog_delay / 1000000;
watchdog.tv_usec = watchdog_delay % 1000000;
systemd_watchdog_timer = periodic_timer_new(tor_libevent_get_base(),
&watchdog,
systemd_watchdog_callback,
NULL);
tor_assert(systemd_watchdog_timer);
}
}
#endif /* defined(HAVE_SYSTEMD_209) */
if (!refill_timer) {
struct timeval refill_interval;
int msecs = get_options()->TokenBucketRefillInterval;
refill_interval.tv_sec = msecs/1000;
refill_interval.tv_usec = (msecs%1000)*1000;
refill_timer = periodic_timer_new(tor_libevent_get_base(),
&refill_interval,
refill_callback,
NULL);
tor_assert(refill_timer);
}
#ifdef HAVE_SYSTEMD
{
const int r = sd_notify(0, "READY=1");
if (r < 0) {
log_warn(LD_GENERAL, "Unable to send readiness to systemd: %s",
strerror(r));
} else if (r > 0) {
log_notice(LD_GENERAL, "Signaled readiness to systemd");
} else {
log_info(LD_GENERAL, "Systemd NOTIFY_SOCKET not present.");
}
}
#endif /* defined(HAVE_SYSTEMD) */
main_loop_should_exit = 0;
main_loop_exit_value = 0;
#ifdef ENABLE_RESTART_DEBUGGING
{
static int first_time = 1;
if (first_time && getenv("TOR_DEBUG_RESTART")) {
first_time = 0;
const char *sec_str = getenv("TOR_DEBUG_RESTART_AFTER_SECONDS");
long sec;
int sec_ok=0;
if (sec_str &&
(sec = tor_parse_long(sec_str, 10, 0, INT_MAX, &sec_ok, NULL)) &&
sec_ok) {
/* Okay, we parsed the seconds. */
} else {
sec = 5;
}
struct timeval restart_after = { (time_t) sec, 0 };
tor_shutdown_event_loop_for_restart_event =
tor_evtimer_new(tor_libevent_get_base(),
tor_shutdown_event_loop_for_restart_cb, NULL);
event_add(tor_shutdown_event_loop_for_restart_event, &restart_after);
}
}
#endif
return run_main_loop_until_done();
}
/**
* Run the main loop a single time. Return 0 for "exit"; -1 for "exit with
* error", and 1 for "run this again."
*/
static int
run_main_loop_once(void)
{
int loop_result;
if (nt_service_is_stopping())
return 0;
if (main_loop_should_exit)
return 0;
#ifndef _WIN32
/* Make it easier to tell whether libevent failure is our fault or not. */
errno = 0;
#endif
/* All active linked conns should get their read events activated,
* so that libevent knows to run their callbacks. */
SMARTLIST_FOREACH(active_linked_connection_lst, connection_t *, conn,
event_active(conn->read_event, EV_READ, 1));
if (get_options()->MainloopStats) {
/* We always enforce that EVLOOP_ONCE is passed to event_base_loop() if we
* are collecting main loop statistics. */
called_loop_once = 1;
} else {
called_loop_once = smartlist_len(active_linked_connection_lst) ? 1 : 0;
}
/* Make sure we know (about) what time it is. */
update_approx_time(time(NULL));
/* Here it is: the main loop. Here we tell Libevent to poll until we have
* an event, or the second ends, or until we have some active linked
* connections to trigger events for. Libevent will wait till one
* of these happens, then run all the appropriate callbacks. */
loop_result = event_base_loop(tor_libevent_get_base(),
called_loop_once ? EVLOOP_ONCE : 0);
if (get_options()->MainloopStats) {
/* Update our main loop counters. */
if (loop_result == 0) {
// The call was successful.
increment_main_loop_success_count();
} else if (loop_result == -1) {
// The call was erroneous.
increment_main_loop_error_count();
} else if (loop_result == 1) {
// The call didn't have any active or pending events
// to handle.
increment_main_loop_idle_count();
}
}
/* Oh, the loop failed. That might be an error that we need to
* catch, but more likely, it's just an interrupted poll() call or something,
* and we should try again. */
if (loop_result < 0) {
int e = tor_socket_errno(-1);
/* let the program survive things like ^z */
if (e != EINTR && !ERRNO_IS_EINPROGRESS(e)) {
log_err(LD_NET,"libevent call with %s failed: %s [%d]",
tor_libevent_get_method(), tor_socket_strerror(e), e);
return -1;
#ifndef _WIN32
} else if (e == EINVAL) {
log_warn(LD_NET, "EINVAL from libevent: should you upgrade libevent?");
if (got_libevent_error())
return -1;
#endif /* !defined(_WIN32) */
} else {
tor_assert_nonfatal_once(! ERRNO_IS_EINPROGRESS(e));
log_debug(LD_NET,"libevent call interrupted.");
/* You can't trust the results of this poll(). Go back to the
* top of the big for loop. */
return 1;
}
}
if (main_loop_should_exit)
return 0;
/* And here is where we put callbacks that happen "every time the event loop
* runs." They must be very fast, or else the whole Tor process will get
* slowed down.
*
* Note that this gets called once per libevent loop, which will make it
* happen once per group of events that fire, or once per second. */
/* If there are any pending client connections, try attaching them to
* circuits (if we can.) This will be pretty fast if nothing new is
* pending.
*/
connection_ap_attach_pending(0);
return 1;
}
/** Run the run_main_loop_once() function until it declares itself done,
* and return its final return value.
*
* Shadow won't invoke this function, so don't fill it up with things.
*/
static int
run_main_loop_until_done(void)
{
int loop_result = 1;
do {
loop_result = run_main_loop_once();
} while (loop_result == 1);
if (main_loop_should_exit)
return main_loop_exit_value;
else
return loop_result;
}
/** Libevent callback: invoked when we get a signal.
*/
static void
signal_callback(evutil_socket_t fd, short events, void *arg)
{
const int *sigptr = arg;
const int sig = *sigptr;
(void)fd;
(void)events;
process_signal(sig);
}
/** Do the work of acting on a signal received in <b>sig</b> */
static void
process_signal(int sig)
{
switch (sig)
{
case SIGTERM:
log_notice(LD_GENERAL,"Catching signal TERM, exiting cleanly.");
tor_shutdown_event_loop_and_exit(0);
break;
case SIGINT:
if (!server_mode(get_options())) { /* do it now */
log_notice(LD_GENERAL,"Interrupt: exiting cleanly.");
tor_shutdown_event_loop_and_exit(0);
return;
}
#ifdef HAVE_SYSTEMD
sd_notify(0, "STOPPING=1");
#endif
hibernate_begin_shutdown();
break;
#ifdef SIGPIPE
case SIGPIPE:
log_debug(LD_GENERAL,"Caught SIGPIPE. Ignoring.");
break;
#endif
case SIGUSR1:
/* prefer to log it at INFO, but make sure we always see it */
dumpstats(get_min_log_level()<LOG_INFO ? get_min_log_level() : LOG_INFO);
control_event_signal(sig);
break;
case SIGUSR2:
switch_logs_debug();
log_debug(LD_GENERAL,"Caught USR2, going to loglevel debug. "
"Send HUP to change back.");
control_event_signal(sig);
break;
case SIGHUP:
#ifdef HAVE_SYSTEMD
sd_notify(0, "RELOADING=1");
#endif
if (do_hup() < 0) {
log_warn(LD_CONFIG,"Restart failed (config error?). Exiting.");
tor_shutdown_event_loop_and_exit(1);
return;
}
#ifdef HAVE_SYSTEMD
sd_notify(0, "READY=1");
#endif
control_event_signal(sig);
break;
#ifdef SIGCHLD
case SIGCHLD:
notify_pending_waitpid_callbacks();
break;
#endif
case SIGNEWNYM: {
time_t now = time(NULL);
if (time_of_last_signewnym + MAX_SIGNEWNYM_RATE > now) {
signewnym_is_pending = 1;
log_notice(LD_CONTROL,
"Rate limiting NEWNYM request: delaying by %d second(s)",
(int)(MAX_SIGNEWNYM_RATE+time_of_last_signewnym-now));
} else {
signewnym_impl(now);
}
break;
}
case SIGCLEARDNSCACHE:
addressmap_clear_transient();
control_event_signal(sig);
break;
case SIGHEARTBEAT:
log_heartbeat(time(NULL));
control_event_signal(sig);
break;
}
}
/** Returns Tor's uptime. */
MOCK_IMPL(long,
get_uptime,(void))
{
return stats_n_seconds_working;
}
/** Reset Tor's uptime. */
MOCK_IMPL(void,
reset_uptime,(void))
{
stats_n_seconds_working = 0;
}
/**
* Write current memory usage information to the log.
*/
static void
dumpmemusage(int severity)
{
connection_dump_buffer_mem_stats(severity);
tor_log(severity, LD_GENERAL, "In rephist: "U64_FORMAT" used by %d Tors.",
U64_PRINTF_ARG(rephist_total_alloc), rephist_total_num);
dump_routerlist_mem_usage(severity);
dump_cell_pool_usage(severity);
dump_dns_mem_usage(severity);
tor_log_mallinfo(severity);
}
/** Write all statistics to the log, with log level <b>severity</b>. Called
* in response to a SIGUSR1. */
static void
dumpstats(int severity)
{
time_t now = time(NULL);
time_t elapsed;
size_t rbuf_cap, wbuf_cap, rbuf_len, wbuf_len;
tor_log(severity, LD_GENERAL, "Dumping stats:");
SMARTLIST_FOREACH_BEGIN(connection_array, connection_t *, conn) {
int i = conn_sl_idx;
tor_log(severity, LD_GENERAL,
"Conn %d (socket %d) type %d (%s), state %d (%s), created %d secs ago",
i, (int)conn->s, conn->type, conn_type_to_string(conn->type),
conn->state, conn_state_to_string(conn->type, conn->state),
(int)(now - conn->timestamp_created));
if (!connection_is_listener(conn)) {
tor_log(severity,LD_GENERAL,
"Conn %d is to %s:%d.", i,
safe_str_client(conn->address),
conn->port);
tor_log(severity,LD_GENERAL,
"Conn %d: %d bytes waiting on inbuf (len %d, last read %d secs ago)",
i,
(int)connection_get_inbuf_len(conn),
(int)buf_allocation(conn->inbuf),
(int)(now - conn->timestamp_last_read_allowed));
tor_log(severity,LD_GENERAL,
"Conn %d: %d bytes waiting on outbuf "
"(len %d, last written %d secs ago)",i,
(int)connection_get_outbuf_len(conn),
(int)buf_allocation(conn->outbuf),
(int)(now - conn->timestamp_last_write_allowed));
if (conn->type == CONN_TYPE_OR) {
or_connection_t *or_conn = TO_OR_CONN(conn);
if (or_conn->tls) {
if (tor_tls_get_buffer_sizes(or_conn->tls, &rbuf_cap, &rbuf_len,
&wbuf_cap, &wbuf_len) == 0) {
tor_log(severity, LD_GENERAL,
"Conn %d: %d/%d bytes used on OpenSSL read buffer; "
"%d/%d bytes used on write buffer.",
i, (int)rbuf_len, (int)rbuf_cap, (int)wbuf_len, (int)wbuf_cap);
}
}
}
}
circuit_dump_by_conn(conn, severity); /* dump info about all the circuits
* using this conn */
} SMARTLIST_FOREACH_END(conn);
channel_dumpstats(severity);
channel_listener_dumpstats(severity);
tor_log(severity, LD_NET,
"Cells processed: "U64_FORMAT" padding\n"
" "U64_FORMAT" create\n"
" "U64_FORMAT" created\n"
" "U64_FORMAT" relay\n"
" ("U64_FORMAT" relayed)\n"
" ("U64_FORMAT" delivered)\n"
" "U64_FORMAT" destroy",
U64_PRINTF_ARG(stats_n_padding_cells_processed),
U64_PRINTF_ARG(stats_n_create_cells_processed),
U64_PRINTF_ARG(stats_n_created_cells_processed),
U64_PRINTF_ARG(stats_n_relay_cells_processed),
U64_PRINTF_ARG(stats_n_relay_cells_relayed),
U64_PRINTF_ARG(stats_n_relay_cells_delivered),
U64_PRINTF_ARG(stats_n_destroy_cells_processed));
if (stats_n_data_cells_packaged)
tor_log(severity,LD_NET,"Average packaged cell fullness: %2.3f%%",
100*(U64_TO_DBL(stats_n_data_bytes_packaged) /
U64_TO_DBL(stats_n_data_cells_packaged*RELAY_PAYLOAD_SIZE)) );
if (stats_n_data_cells_received)
tor_log(severity,LD_NET,"Average delivered cell fullness: %2.3f%%",
100*(U64_TO_DBL(stats_n_data_bytes_received) /
U64_TO_DBL(stats_n_data_cells_received*RELAY_PAYLOAD_SIZE)) );
cpuworker_log_onionskin_overhead(severity, ONION_HANDSHAKE_TYPE_TAP, "TAP");
cpuworker_log_onionskin_overhead(severity, ONION_HANDSHAKE_TYPE_NTOR,"ntor");
if (now - time_of_process_start >= 0)
elapsed = now - time_of_process_start;
else
elapsed = 0;
if (elapsed) {
tor_log(severity, LD_NET,
"Average bandwidth: "U64_FORMAT"/%d = %d bytes/sec reading",
U64_PRINTF_ARG(stats_n_bytes_read),
(int)elapsed,
(int) (stats_n_bytes_read/elapsed));
tor_log(severity, LD_NET,
"Average bandwidth: "U64_FORMAT"/%d = %d bytes/sec writing",
U64_PRINTF_ARG(stats_n_bytes_written),
(int)elapsed,
(int) (stats_n_bytes_written/elapsed));
}
tor_log(severity, LD_NET, "--------------- Dumping memory information:");
dumpmemusage(severity);
rep_hist_dump_stats(now,severity);
rend_service_dump_stats(severity);
dump_distinct_digest_count(severity);
}
/** Called by exit() as we shut down the process.
*/
static void
exit_function(void)
{
/* NOTE: If we ever daemonize, this gets called immediately. That's
* okay for now, because we only use this on Windows. */
#ifdef _WIN32
WSACleanup();
#endif
}
#ifdef _WIN32
#define UNIX_ONLY 0
#else
#define UNIX_ONLY 1
#endif
static struct {
/** A numeric code for this signal. Must match the signal value if
* try_to_register is true. */
int signal_value;
/** True if we should try to register this signal with libevent and catch
* corresponding posix signals. False otherwise. */
int try_to_register;
/** Pointer to hold the event object constructed for this signal. */
struct event *signal_event;
} signal_handlers[] = {
#ifdef SIGINT
{ SIGINT, UNIX_ONLY, NULL }, /* do a controlled slow shutdown */
#endif
#ifdef SIGTERM
{ SIGTERM, UNIX_ONLY, NULL }, /* to terminate now */
#endif
#ifdef SIGPIPE
{ SIGPIPE, UNIX_ONLY, NULL }, /* otherwise SIGPIPE kills us */
#endif
#ifdef SIGUSR1
{ SIGUSR1, UNIX_ONLY, NULL }, /* dump stats */
#endif
#ifdef SIGUSR2
{ SIGUSR2, UNIX_ONLY, NULL }, /* go to loglevel debug */
#endif
#ifdef SIGHUP
{ SIGHUP, UNIX_ONLY, NULL }, /* to reload config, retry conns, etc */
#endif
#ifdef SIGXFSZ
{ SIGXFSZ, UNIX_ONLY, NULL }, /* handle file-too-big resource exhaustion */
#endif
#ifdef SIGCHLD
{ SIGCHLD, UNIX_ONLY, NULL }, /* handle dns/cpu workers that exit */
#endif
/* These are controller-only */
{ SIGNEWNYM, 0, NULL },
{ SIGCLEARDNSCACHE, 0, NULL },
{ SIGHEARTBEAT, 0, NULL },
{ -1, -1, NULL }
};
/** Set up the signal handler events for this process, and register them
* with libevent if appropriate. */
void
handle_signals(void)
{
int i;
const int enabled = !get_options()->DisableSignalHandlers;
for (i = 0; signal_handlers[i].signal_value >= 0; ++i) {
/* Signal handlers are only registered with libevent if they need to catch
* real POSIX signals. We construct these signal handler events in either
* case, though, so that controllers can activate them with the SIGNAL
* command.
*/
if (enabled && signal_handlers[i].try_to_register) {
signal_handlers[i].signal_event =
tor_evsignal_new(tor_libevent_get_base(),
signal_handlers[i].signal_value,
signal_callback,
&signal_handlers[i].signal_value);
if (event_add(signal_handlers[i].signal_event, NULL))
log_warn(LD_BUG, "Error from libevent when adding "
"event for signal %d",
signal_handlers[i].signal_value);
} else {
signal_handlers[i].signal_event =
tor_event_new(tor_libevent_get_base(), -1,
EV_SIGNAL, signal_callback,
&signal_handlers[i].signal_value);
}
}
}
/* Cause the signal handler for signal_num to be called in the event loop. */
void
activate_signal(int signal_num)
{
int i;
for (i = 0; signal_handlers[i].signal_value >= 0; ++i) {
if (signal_handlers[i].signal_value == signal_num) {
event_active(signal_handlers[i].signal_event, EV_SIGNAL, 1);
return;
}
}
}
/** Main entry point for the Tor command-line client. Return 0 on "success",
* negative on "failure", and positive on "success and exit".
*/
int
tor_init(int argc, char *argv[])
{
char progname[256];
int quiet = 0;
time_of_process_start = time(NULL);
init_connection_lists();
/* Have the log set up with our application name. */
tor_snprintf(progname, sizeof(progname), "Tor %s", get_version());
log_set_application_name(progname);
/* Set up the crypto nice and early */
if (crypto_early_init() < 0) {
log_err(LD_GENERAL, "Unable to initialize the crypto subsystem!");
return -1;
}
/* Initialize the history structures. */
rep_hist_init();
/* Initialize the service cache. */
rend_cache_init();
addressmap_init(); /* Init the client dns cache. Do it always, since it's
* cheap. */
/* Initialize the HS subsystem. */
hs_init();
{
/* We search for the "quiet" option first, since it decides whether we
* will log anything at all to the command line. */
config_line_t *opts = NULL, *cmdline_opts = NULL;
const config_line_t *cl;
(void) config_parse_commandline(argc, argv, 1, &opts, &cmdline_opts);
for (cl = cmdline_opts; cl; cl = cl->next) {
if (!strcmp(cl->key, "--hush"))
quiet = 1;
if (!strcmp(cl->key, "--quiet") ||
!strcmp(cl->key, "--dump-config"))
quiet = 2;
/* The following options imply --hush */
if (!strcmp(cl->key, "--version") || !strcmp(cl->key, "--digests") ||
!strcmp(cl->key, "--list-torrc-options") ||
!strcmp(cl->key, "--library-versions") ||
!strcmp(cl->key, "--hash-password") ||
!strcmp(cl->key, "-h") || !strcmp(cl->key, "--help")) {
if (quiet < 1)
quiet = 1;
}
}
config_free_lines(opts);
config_free_lines(cmdline_opts);
}
/* give it somewhere to log to initially */
switch (quiet) {
case 2:
/* no initial logging */
break;
case 1:
add_temp_log(LOG_WARN);
break;
default:
add_temp_log(LOG_NOTICE);
}
quiet_level = quiet;
{
const char *version = get_version();
log_notice(LD_GENERAL, "Tor %s running on %s with Libevent %s, "
"OpenSSL %s, Zlib %s, Liblzma %s, and Libzstd %s.", version,
get_uname(),
tor_libevent_get_version_str(),
crypto_openssl_get_version_str(),
tor_compress_supports_method(ZLIB_METHOD) ?
tor_compress_version_str(ZLIB_METHOD) : "N/A",
tor_compress_supports_method(LZMA_METHOD) ?
tor_compress_version_str(LZMA_METHOD) : "N/A",
tor_compress_supports_method(ZSTD_METHOD) ?
tor_compress_version_str(ZSTD_METHOD) : "N/A");
log_notice(LD_GENERAL, "Tor can't help you if you use it wrong! "
"Learn how to be safe at "
"https://www.torproject.org/download/download#warning");
if (strstr(version, "alpha") || strstr(version, "beta"))
log_notice(LD_GENERAL, "This version is not a stable Tor release. "
"Expect more bugs than usual.");
tor_compress_log_init_warnings();
}
#ifdef HAVE_RUST
rust_log_welcome_string();
#endif /* defined(HAVE_RUST) */
if (network_init()<0) {
log_err(LD_BUG,"Error initializing network; exiting.");
return -1;
}
atexit(exit_function);
int init_rv = options_init_from_torrc(argc,argv);
if (init_rv < 0) {
log_err(LD_CONFIG,"Reading config failed--see warnings above.");
return -1;
} else if (init_rv > 0) {
// We succeeded, and should exit anyway -- probably the user just said
// "--version" or something like that.
return 1;
}
/* The options are now initialised */
const or_options_t *options = get_options();
/* Initialize channelpadding parameters to defaults until we get
* a consensus */
channelpadding_new_consensus_params(NULL);
/* Initialize predicted ports list after loading options */
predicted_ports_init();
#ifndef _WIN32
if (geteuid()==0)
log_warn(LD_GENERAL,"You are running Tor as root. You don't need to, "
"and you probably shouldn't.");
#endif
if (crypto_global_init(options->HardwareAccel,
options->AccelName,
options->AccelDir)) {
log_err(LD_BUG, "Unable to initialize OpenSSL. Exiting.");
return -1;
}
stream_choice_seed_weak_rng();
if (tor_init_libevent_rng() < 0) {
log_warn(LD_NET, "Problem initializing libevent RNG.");
}
/* Scan/clean unparseable descriptors; after reading config */
routerparse_init();
return 0;
}
/** A lockfile structure, used to prevent two Tors from messing with the
* data directory at once. If this variable is non-NULL, we're holding
* the lockfile. */
static tor_lockfile_t *lockfile = NULL;
/** Try to grab the lock file described in <b>options</b>, if we do not
* already have it. If <b>err_if_locked</b> is true, warn if somebody else is
* holding the lock, and exit if we can't get it after waiting. Otherwise,
* return -1 if we can't get the lockfile. Return 0 on success.
*/
int
try_locking(const or_options_t *options, int err_if_locked)
{
if (lockfile)
return 0;
else {
char *fname = options_get_datadir_fname(options, "lock");
int already_locked = 0;
tor_lockfile_t *lf = tor_lockfile_lock(fname, 0, &already_locked);
tor_free(fname);
if (!lf) {
if (err_if_locked && already_locked) {
int r;
log_warn(LD_GENERAL, "It looks like another Tor process is running "
"with the same data directory. Waiting 5 seconds to see "
"if it goes away.");
#ifndef _WIN32
sleep(5);
#else
Sleep(5000);
#endif
r = try_locking(options, 0);
if (r<0) {
log_err(LD_GENERAL, "No, it's still there. Exiting.");
return -1;
}
return r;
}
return -1;
}
lockfile = lf;
return 0;
}
}
/** Return true iff we've successfully acquired the lock file. */
int
have_lockfile(void)
{
return lockfile != NULL;
}
/** If we have successfully acquired the lock file, release it. */
void
release_lockfile(void)
{
if (lockfile) {
tor_lockfile_unlock(lockfile);
lockfile = NULL;
}
}
/** Free all memory that we might have allocated somewhere.
* If <b>postfork</b>, we are a worker process and we want to free
* only the parts of memory that we won't touch. If !<b>postfork</b>,
* Tor is shutting down and we should free everything.
*
* Helps us find the real leaks with dmalloc and the like. Also valgrind
* should then report 0 reachable in its leak report (in an ideal world --
* in practice libevent, SSL, libc etc never quite free everything). */
void
tor_free_all(int postfork)
{
if (!postfork) {
evdns_shutdown(1);
}
geoip_free_all();
dirvote_free_all();
routerlist_free_all();
networkstatus_free_all();
addressmap_free_all();
dirserv_free_all();
rend_cache_free_all();
rend_service_authorization_free_all();
rep_hist_free_all();
dns_free_all();
clear_pending_onions();
circuit_free_all();
entry_guards_free_all();
pt_free_all();
channel_tls_free_all();
channel_free_all();
connection_free_all();
connection_edge_free_all();
scheduler_free_all();
nodelist_free_all();
microdesc_free_all();
routerparse_free_all();
ext_orport_free_all();
control_free_all();
sandbox_free_getaddrinfo_cache();
protover_free_all();
bridges_free_all();
consdiffmgr_free_all();
hs_free_all();
dos_free_all();
if (!postfork) {
config_free_all();
or_state_free_all();
router_free_all();
routerkeys_free_all();
policies_free_all();
}
if (!postfork) {
tor_tls_free_all();
#ifndef _WIN32
tor_getpwnam(NULL);
#endif
}
/* stuff in main.c */
smartlist_free(connection_array);
smartlist_free(closeable_connection_lst);
smartlist_free(active_linked_connection_lst);
periodic_timer_free(second_timer);
teardown_periodic_events();
periodic_timer_free(refill_timer);
tor_event_free(shutdown_did_not_work_event);
tor_event_free(initialize_periodic_events_event);
#ifdef HAVE_SYSTEMD_209
periodic_timer_free(systemd_watchdog_timer);
#endif
global_read_bucket = global_write_bucket = 0;
global_relayed_read_bucket = global_relayed_write_bucket = 0;
stats_prev_global_read_bucket = stats_prev_global_write_bucket = 0;
stats_prev_n_read = stats_prev_n_written = 0;
stats_n_bytes_read = stats_n_bytes_written = 0;
time_of_process_start = 0;
time_of_last_signewnym = 0;
signewnym_is_pending = 0;
newnym_epoch = 0;
called_loop_once = 0;
main_loop_should_exit = 0;
main_loop_exit_value = 0;
can_complete_circuits = 0;
quiet_level = 0;
should_init_bridge_stats = 1;
dns_honesty_first_time = 1;
heartbeat_callback_first_time = 1;
n_libevent_errors = 0;
current_second = 0;
memset(&refill_timer_current_millisecond, 0, sizeof(struct timeval));
if (!postfork) {
release_lockfile();
}
tor_libevent_free_all();
/* Stuff in util.c and address.c*/
if (!postfork) {
escaped(NULL);
esc_router_info(NULL);
clean_up_backtrace_handler();
logs_free_all(); /* free log strings. do this last so logs keep working. */
}
}
/**
* Remove the specified file, and log a warning if the operation fails for
* any reason other than the file not existing. Ignores NULL filenames.
*/
void
tor_remove_file(const char *filename)
{
if (filename && tor_unlink(filename) != 0 && errno != ENOENT) {
log_warn(LD_FS, "Couldn't unlink %s: %s",
filename, strerror(errno));
}
}
/** Do whatever cleanup is necessary before shutting Tor down. */
void
tor_cleanup(void)
{
const or_options_t *options = get_options();
if (options->command == CMD_RUN_TOR) {
time_t now = time(NULL);
/* Remove our pid file. We don't care if there was an error when we
* unlink, nothing we could do about it anyways. */
tor_remove_file(options->PidFile);
/* Remove control port file */
tor_remove_file(options->ControlPortWriteToFile);
/* Remove cookie authentication file */
{
char *cookie_fname = get_controller_cookie_file_name();
tor_remove_file(cookie_fname);
tor_free(cookie_fname);
}
/* Remove Extended ORPort cookie authentication file */
{
char *cookie_fname = get_ext_or_auth_cookie_file_name();
tor_remove_file(cookie_fname);
tor_free(cookie_fname);
}
if (accounting_is_enabled(options))
accounting_record_bandwidth_usage(now, get_or_state());
or_state_mark_dirty(get_or_state(), 0); /* force an immediate save. */
or_state_save(now);
if (authdir_mode(options)) {
sr_save_and_cleanup();
}
if (authdir_mode_tests_reachability(options))
rep_hist_record_mtbf_data(now, 0);
keypin_close_journal();
}
timers_shutdown();
#ifdef USE_DMALLOC
dmalloc_log_stats();
#endif
tor_free_all(0); /* We could move tor_free_all back into the ifdef below
later, if it makes shutdown unacceptably slow. But for
now, leave it here: it's helped us catch bugs in the
past. */
crypto_global_cleanup();
#ifdef USE_DMALLOC
dmalloc_log_unfreed();
dmalloc_shutdown();
#endif
}
/** Read/create keys as needed, and echo our fingerprint to stdout. */
static int
do_list_fingerprint(void)
{
char buf[FINGERPRINT_LEN+1];
crypto_pk_t *k;
const char *nickname = get_options()->Nickname;
sandbox_disable_getaddrinfo_cache();
if (!server_mode(get_options())) {
log_err(LD_GENERAL,
"Clients don't have long-term identity keys. Exiting.");
return -1;
}
tor_assert(nickname);
if (init_keys() < 0) {
log_err(LD_GENERAL,"Error initializing keys; exiting.");
return -1;
}
if (!(k = get_server_identity_key())) {
log_err(LD_GENERAL,"Error: missing identity key.");
return -1;
}
if (crypto_pk_get_fingerprint(k, buf, 1)<0) {
log_err(LD_BUG, "Error computing fingerprint");
return -1;
}
printf("%s %s\n", nickname, buf);
return 0;
}
/** Entry point for password hashing: take the desired password from
* the command line, and print its salted hash to stdout. **/
static void
do_hash_password(void)
{
char output[256];
char key[S2K_RFC2440_SPECIFIER_LEN+DIGEST_LEN];
crypto_rand(key, S2K_RFC2440_SPECIFIER_LEN-1);
key[S2K_RFC2440_SPECIFIER_LEN-1] = (uint8_t)96; /* Hash 64 K of data. */
secret_to_key_rfc2440(key+S2K_RFC2440_SPECIFIER_LEN, DIGEST_LEN,
get_options()->command_arg, strlen(get_options()->command_arg),
key);
base16_encode(output, sizeof(output), key, sizeof(key));
printf("16:%s\n",output);
}
/** Entry point for configuration dumping: write the configuration to
* stdout. */
static int
do_dump_config(void)
{
const or_options_t *options = get_options();
const char *arg = options->command_arg;
int how;
char *opts;
if (!strcmp(arg, "short")) {
how = OPTIONS_DUMP_MINIMAL;
} else if (!strcmp(arg, "non-builtin")) {
how = OPTIONS_DUMP_DEFAULTS;
} else if (!strcmp(arg, "full")) {
how = OPTIONS_DUMP_ALL;
} else {
fprintf(stderr, "No valid argument to --dump-config found!\n");
fprintf(stderr, "Please select 'short', 'non-builtin', or 'full'.\n");
return -1;
}
opts = options_dump(options, how);
printf("%s", opts);
tor_free(opts);
return 0;
}
static void
init_addrinfo(void)
{
if (! server_mode(get_options()) ||
(get_options()->Address && strlen(get_options()->Address) > 0)) {
/* We don't need to seed our own hostname, because we won't be calling
* resolve_my_address on it.
*/
return;
}
char hname[256];
// host name to sandbox
gethostname(hname, sizeof(hname));
sandbox_add_addrinfo(hname);
}
static sandbox_cfg_t*
sandbox_init_filter(void)
{
const or_options_t *options = get_options();
sandbox_cfg_t *cfg = sandbox_cfg_new();
int i;
sandbox_cfg_allow_openat_filename(&cfg,
get_cachedir_fname("cached-status"));
#define OPEN(name) \
sandbox_cfg_allow_open_filename(&cfg, tor_strdup(name))
#define OPEN_DATADIR(name) \
sandbox_cfg_allow_open_filename(&cfg, get_datadir_fname(name))
#define OPEN_DATADIR2(name, name2) \
sandbox_cfg_allow_open_filename(&cfg, get_datadir_fname2((name), (name2)))
#define OPEN_DATADIR_SUFFIX(name, suffix) do { \
OPEN_DATADIR(name); \
OPEN_DATADIR(name suffix); \
} while (0)
#define OPEN_DATADIR2_SUFFIX(name, name2, suffix) do { \
OPEN_DATADIR2(name, name2); \
OPEN_DATADIR2(name, name2 suffix); \
} while (0)
#define OPEN_KEY_DIRECTORY() \
sandbox_cfg_allow_open_filename(&cfg, tor_strdup(options->KeyDirectory))
#define OPEN_CACHEDIR(name) \
sandbox_cfg_allow_open_filename(&cfg, get_cachedir_fname(name))
#define OPEN_CACHEDIR_SUFFIX(name, suffix) do { \
OPEN_CACHEDIR(name); \
OPEN_CACHEDIR(name suffix); \
} while (0)
#define OPEN_KEYDIR(name) \
sandbox_cfg_allow_open_filename(&cfg, get_keydir_fname(name))
#define OPEN_KEYDIR_SUFFIX(name, suffix) do { \
OPEN_KEYDIR(name); \
OPEN_KEYDIR(name suffix); \
} while (0)
OPEN(options->DataDirectory);
OPEN_KEY_DIRECTORY();
OPEN_CACHEDIR_SUFFIX("cached-certs", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-consensus", ".tmp");
OPEN_CACHEDIR_SUFFIX("unverified-consensus", ".tmp");
OPEN_CACHEDIR_SUFFIX("unverified-microdesc-consensus", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-microdesc-consensus", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-microdescs", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-microdescs.new", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-descriptors", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-descriptors.new", ".tmp");
OPEN_CACHEDIR("cached-descriptors.tmp.tmp");
OPEN_CACHEDIR_SUFFIX("cached-extrainfo", ".tmp");
OPEN_CACHEDIR_SUFFIX("cached-extrainfo.new", ".tmp");
OPEN_CACHEDIR("cached-extrainfo.tmp.tmp");
OPEN_DATADIR_SUFFIX("state", ".tmp");
OPEN_DATADIR_SUFFIX("sr-state", ".tmp");
OPEN_DATADIR_SUFFIX("unparseable-desc", ".tmp");
OPEN_DATADIR_SUFFIX("v3-status-votes", ".tmp");
OPEN_DATADIR("key-pinning-journal");
OPEN("/dev/srandom");
OPEN("/dev/urandom");
OPEN("/dev/random");
OPEN("/etc/hosts");
OPEN("/proc/meminfo");
if (options->BridgeAuthoritativeDir)
OPEN_DATADIR_SUFFIX("networkstatus-bridges", ".tmp");
if (authdir_mode(options))
OPEN_DATADIR("approved-routers");
if (options->ServerDNSResolvConfFile)
sandbox_cfg_allow_open_filename(&cfg,
tor_strdup(options->ServerDNSResolvConfFile));
else
sandbox_cfg_allow_open_filename(&cfg, tor_strdup("/etc/resolv.conf"));
for (i = 0; i < 2; ++i) {
if (get_torrc_fname(i)) {
sandbox_cfg_allow_open_filename(&cfg, tor_strdup(get_torrc_fname(i)));
}
}
SMARTLIST_FOREACH(options->FilesOpenedByIncludes, char *, f, {
OPEN(f);
});
#define RENAME_SUFFIX(name, suffix) \
sandbox_cfg_allow_rename(&cfg, \
get_datadir_fname(name suffix), \
get_datadir_fname(name))
#define RENAME_SUFFIX2(prefix, name, suffix) \
sandbox_cfg_allow_rename(&cfg, \
get_datadir_fname2(prefix, name suffix), \
get_datadir_fname2(prefix, name))
#define RENAME_CACHEDIR_SUFFIX(name, suffix) \
sandbox_cfg_allow_rename(&cfg, \
get_cachedir_fname(name suffix), \
get_cachedir_fname(name))
#define RENAME_KEYDIR_SUFFIX(name, suffix) \
sandbox_cfg_allow_rename(&cfg, \
get_keydir_fname(name suffix), \
get_keydir_fname(name))
RENAME_CACHEDIR_SUFFIX("cached-certs", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-consensus", ".tmp");
RENAME_CACHEDIR_SUFFIX("unverified-consensus", ".tmp");
RENAME_CACHEDIR_SUFFIX("unverified-microdesc-consensus", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-microdesc-consensus", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-microdescs", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-microdescs", ".new");
RENAME_CACHEDIR_SUFFIX("cached-microdescs.new", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-descriptors", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-descriptors", ".new");
RENAME_CACHEDIR_SUFFIX("cached-descriptors.new", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-extrainfo", ".tmp");
RENAME_CACHEDIR_SUFFIX("cached-extrainfo", ".new");
RENAME_CACHEDIR_SUFFIX("cached-extrainfo.new", ".tmp");
RENAME_SUFFIX("state", ".tmp");
RENAME_SUFFIX("sr-state", ".tmp");
RENAME_SUFFIX("unparseable-desc", ".tmp");
RENAME_SUFFIX("v3-status-votes", ".tmp");
if (options->BridgeAuthoritativeDir)
RENAME_SUFFIX("networkstatus-bridges", ".tmp");
#define STAT_DATADIR(name) \
sandbox_cfg_allow_stat_filename(&cfg, get_datadir_fname(name))
#define STAT_CACHEDIR(name) \
sandbox_cfg_allow_stat_filename(&cfg, get_cachedir_fname(name))
#define STAT_DATADIR2(name, name2) \
sandbox_cfg_allow_stat_filename(&cfg, get_datadir_fname2((name), (name2)))
#define STAT_KEY_DIRECTORY() \
sandbox_cfg_allow_stat_filename(&cfg, tor_strdup(options->KeyDirectory))
STAT_DATADIR(NULL);
STAT_DATADIR("lock");
STAT_DATADIR("state");
STAT_DATADIR("router-stability");
STAT_CACHEDIR("cached-extrainfo.new");
{
smartlist_t *files = smartlist_new();
tor_log_get_logfile_names(files);
SMARTLIST_FOREACH(files, char *, file_name, {
/* steals reference */
sandbox_cfg_allow_open_filename(&cfg, file_name);
});
smartlist_free(files);
}
{
smartlist_t *files = smartlist_new();
smartlist_t *dirs = smartlist_new();
hs_service_lists_fnames_for_sandbox(files, dirs);
SMARTLIST_FOREACH(files, char *, file_name, {
char *tmp_name = NULL;
tor_asprintf(&tmp_name, "%s.tmp", file_name);
sandbox_cfg_allow_rename(&cfg,
tor_strdup(tmp_name), tor_strdup(file_name));
/* steals references */
sandbox_cfg_allow_open_filename(&cfg, file_name);
sandbox_cfg_allow_open_filename(&cfg, tmp_name);
});
SMARTLIST_FOREACH(dirs, char *, dir, {
/* steals reference */
sandbox_cfg_allow_stat_filename(&cfg, dir);
});
smartlist_free(files);
smartlist_free(dirs);
}
{
char *fname;
if ((fname = get_controller_cookie_file_name())) {
sandbox_cfg_allow_open_filename(&cfg, fname);
}
if ((fname = get_ext_or_auth_cookie_file_name())) {
sandbox_cfg_allow_open_filename(&cfg, fname);
}
}
SMARTLIST_FOREACH_BEGIN(get_configured_ports(), port_cfg_t *, port) {
if (!port->is_unix_addr)
continue;
/* When we open an AF_UNIX address, we want permission to open the
* directory that holds it. */
char *dirname = tor_strdup(port->unix_addr);
if (get_parent_directory(dirname) == 0) {
OPEN(dirname);
}
tor_free(dirname);
sandbox_cfg_allow_chmod_filename(&cfg, tor_strdup(port->unix_addr));
sandbox_cfg_allow_chown_filename(&cfg, tor_strdup(port->unix_addr));
} SMARTLIST_FOREACH_END(port);
if (options->DirPortFrontPage) {
sandbox_cfg_allow_open_filename(&cfg,
tor_strdup(options->DirPortFrontPage));
}
// orport
if (server_mode(get_options())) {
OPEN_KEYDIR_SUFFIX("secret_id_key", ".tmp");
OPEN_KEYDIR_SUFFIX("secret_onion_key", ".tmp");
OPEN_KEYDIR_SUFFIX("secret_onion_key_ntor", ".tmp");
OPEN_KEYDIR("secret_id_key.old");
OPEN_KEYDIR("secret_onion_key.old");
OPEN_KEYDIR("secret_onion_key_ntor.old");
OPEN_KEYDIR_SUFFIX("ed25519_master_id_secret_key", ".tmp");
OPEN_KEYDIR_SUFFIX("ed25519_master_id_secret_key_encrypted", ".tmp");
OPEN_KEYDIR_SUFFIX("ed25519_master_id_public_key", ".tmp");
OPEN_KEYDIR_SUFFIX("ed25519_signing_secret_key", ".tmp");
OPEN_KEYDIR_SUFFIX("ed25519_signing_secret_key_encrypted", ".tmp");
OPEN_KEYDIR_SUFFIX("ed25519_signing_public_key", ".tmp");
OPEN_KEYDIR_SUFFIX("ed25519_signing_cert", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "bridge-stats", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "dirreq-stats", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "entry-stats", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "exit-stats", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "buffer-stats", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "conn-stats", ".tmp");
OPEN_DATADIR2_SUFFIX("stats", "hidserv-stats", ".tmp");
OPEN_DATADIR("approved-routers");
OPEN_DATADIR_SUFFIX("fingerprint", ".tmp");
OPEN_DATADIR_SUFFIX("hashed-fingerprint", ".tmp");
OPEN_DATADIR_SUFFIX("router-stability", ".tmp");
OPEN("/etc/resolv.conf");
RENAME_SUFFIX("fingerprint", ".tmp");
RENAME_KEYDIR_SUFFIX("secret_onion_key_ntor", ".tmp");
RENAME_KEYDIR_SUFFIX("secret_id_key", ".tmp");
RENAME_KEYDIR_SUFFIX("secret_id_key.old", ".tmp");
RENAME_KEYDIR_SUFFIX("secret_onion_key", ".tmp");
RENAME_KEYDIR_SUFFIX("secret_onion_key.old", ".tmp");
RENAME_SUFFIX2("stats", "bridge-stats", ".tmp");
RENAME_SUFFIX2("stats", "dirreq-stats", ".tmp");
RENAME_SUFFIX2("stats", "entry-stats", ".tmp");
RENAME_SUFFIX2("stats", "exit-stats", ".tmp");
RENAME_SUFFIX2("stats", "buffer-stats", ".tmp");
RENAME_SUFFIX2("stats", "conn-stats", ".tmp");
RENAME_SUFFIX2("stats", "hidserv-stats", ".tmp");
RENAME_SUFFIX("hashed-fingerprint", ".tmp");
RENAME_SUFFIX("router-stability", ".tmp");
RENAME_KEYDIR_SUFFIX("ed25519_master_id_secret_key", ".tmp");
RENAME_KEYDIR_SUFFIX("ed25519_master_id_secret_key_encrypted", ".tmp");
RENAME_KEYDIR_SUFFIX("ed25519_master_id_public_key", ".tmp");
RENAME_KEYDIR_SUFFIX("ed25519_signing_secret_key", ".tmp");
RENAME_KEYDIR_SUFFIX("ed25519_signing_cert", ".tmp");
sandbox_cfg_allow_rename(&cfg,
get_keydir_fname("secret_onion_key"),
get_keydir_fname("secret_onion_key.old"));
sandbox_cfg_allow_rename(&cfg,
get_keydir_fname("secret_onion_key_ntor"),
get_keydir_fname("secret_onion_key_ntor.old"));
STAT_KEY_DIRECTORY();
OPEN_DATADIR("stats");
STAT_DATADIR("stats");
STAT_DATADIR2("stats", "dirreq-stats");
consdiffmgr_register_with_sandbox(&cfg);
}
init_addrinfo();
return cfg;
}
/* Main entry point for the Tor process. Called from tor_main(), and by
* anybody embedding Tor. */
int
tor_run_main(const tor_main_configuration_t *tor_cfg)
{
int result = 0;
int argc = tor_cfg->argc;
char **argv = tor_cfg->argv;
#ifdef _WIN32
#ifndef HeapEnableTerminationOnCorruption
#define HeapEnableTerminationOnCorruption 1
#endif
/* On heap corruption, just give up; don't try to play along. */
HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
/* Call SetProcessDEPPolicy to permanently enable DEP.
The function will not resolve on earlier versions of Windows,
and failure is not dangerous. */
HMODULE hMod = GetModuleHandleA("Kernel32.dll");
if (hMod) {
typedef BOOL (WINAPI *PSETDEP)(DWORD);
PSETDEP setdeppolicy = (PSETDEP)GetProcAddress(hMod,
"SetProcessDEPPolicy");
if (setdeppolicy) {
/* PROCESS_DEP_ENABLE | PROCESS_DEP_DISABLE_ATL_THUNK_EMULATION */
setdeppolicy(3);
}
}
#endif /* defined(_WIN32) */
configure_backtrace_handler(get_version());
init_protocol_warning_severity_level();
update_approx_time(time(NULL));
tor_threads_init();
tor_compress_init();
init_logging(0);
monotime_init();
#ifdef USE_DMALLOC
{
/* Instruct OpenSSL to use our internal wrappers for malloc,
realloc and free. */
int r = crypto_use_tor_alloc_functions();
tor_assert(r == 0);
}
#endif /* defined(USE_DMALLOC) */
#ifdef NT_SERVICE
{
int done = 0;
result = nt_service_parse_options(argc, argv, &done);
if (done) return result;
}
#endif /* defined(NT_SERVICE) */
{
int init_rv = tor_init(argc, argv);
if (init_rv < 0)
return -1;
else if (init_rv > 0)
return 0;
}
if (get_options()->Sandbox && get_options()->command == CMD_RUN_TOR) {
sandbox_cfg_t* cfg = sandbox_init_filter();
if (sandbox_init(cfg)) {
log_err(LD_BUG,"Failed to create syscall sandbox filter");
return -1;
}
// registering libevent rng
#ifdef HAVE_EVUTIL_SECURE_RNG_SET_URANDOM_DEVICE_FILE
evutil_secure_rng_set_urandom_device_file(
(char*) sandbox_intern_string("/dev/urandom"));
#endif
}
switch (get_options()->command) {
case CMD_RUN_TOR:
#ifdef NT_SERVICE
nt_service_set_state(SERVICE_RUNNING);
#endif
result = do_main_loop();
break;
case CMD_KEYGEN:
result = load_ed_keys(get_options(), time(NULL)) < 0;
break;
case CMD_KEY_EXPIRATION:
init_keys();
result = log_cert_expiration();
break;
case CMD_LIST_FINGERPRINT:
result = do_list_fingerprint();
break;
case CMD_HASH_PASSWORD:
do_hash_password();
result = 0;
break;
case CMD_VERIFY_CONFIG:
if (quiet_level == 0)
printf("Configuration was valid\n");
result = 0;
break;
case CMD_DUMP_CONFIG:
result = do_dump_config();
break;
case CMD_RUN_UNITTESTS: /* only set by test.c */
default:
log_warn(LD_BUG,"Illegal command number %d: internal error.",
get_options()->command);
result = -1;
}
tor_cleanup();
return result;
}
|
291709.c | /*
Copyright 1991, 1998 The Open Group
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall not be
used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from The Open Group.
*/
/*
* Author: Keith Packard, MIT X Consortium
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <X11/fonts/fntfilst.h>
#include <X11/keysym.h>
#ifdef WIN32
#include <ctype.h>
#endif
static unsigned char
ISOLatin1ToLower(unsigned char source)
{
if (source >= XK_A && source <= XK_Z)
return source + (XK_a - XK_A);
if (source >= XK_Agrave && source <= XK_Odiaeresis)
return source + (XK_agrave - XK_Agrave);
if (source >= XK_Ooblique && source <= XK_Thorn)
return source + (XK_oslash - XK_Ooblique);
return source;
}
_X_HIDDEN void
CopyISOLatin1Lowered(char *dest, char *source, int length)
{
int i;
for (i = 0; i < length; i++, source++, dest++)
*dest = ISOLatin1ToLower(*source);
*dest = '\0';
}
/*
* Map FPE functions to renderer functions
*/
static int FontFileOpenBitmapNCF (FontPathElementPtr fpe, FontPtr *pFont,
int flags, FontEntryPtr entry,
fsBitmapFormat format,
fsBitmapFormatMask fmask,
FontPtr non_cachable_font);
int
FontFileNameCheck (char *name)
{
#ifndef NCD
#if defined(WIN32)
/* OS/2 uses D:/... as a path name for fonts, so accept this as a valid
* path if it starts with a letter and a colon. Same applies for WIN32
*/
if (isalpha(*name) && name[1]==':')
return TRUE;
#endif
return *name == '/';
#else
return ((strcmp(name, "built-ins") == 0) || (*name == '/'));
#endif
}
int
FontFileInitFPE (FontPathElementPtr fpe)
{
int status;
FontDirectoryPtr dir;
status = FontFileReadDirectory (fpe->name, &dir);
if (status == Successful)
{
if (dir->nonScalable.used > 0)
if (!FontFileRegisterBitmapSource (fpe))
{
FontFileFreeFPE (fpe);
return AllocError;
}
fpe->private = (pointer) dir;
}
return status;
}
/* ARGSUSED */
int
FontFileResetFPE (FontPathElementPtr fpe)
{
FontDirectoryPtr dir;
dir = (FontDirectoryPtr) fpe->private;
/*
* The reset must fail for bitmap fonts because they get cleared when
* the path is set.
*/
if (FontFileDirectoryChanged (dir))
{
/* can't do it, so tell the caller to close and re-open */
return FPEResetFailed;
}
else
{
if (dir->nonScalable.used > 0)
if (!FontFileRegisterBitmapSource (fpe))
{
return FPEResetFailed;
}
return Successful;
}
}
int
FontFileFreeFPE (FontPathElementPtr fpe)
{
FontFileUnregisterBitmapSource (fpe);
FontFileFreeDir ((FontDirectoryPtr) fpe->private);
return Successful;
}
static int
transfer_values_to_alias(char *entryname, int entrynamelength,
char *resolvedname,
char **aliasName, FontScalablePtr vals)
{
static char aliasname[MAXFONTNAMELEN];
int nameok = 1, len;
char lowerName[MAXFONTNAMELEN];
*aliasName = resolvedname;
if ((len = strlen(*aliasName)) <= MAXFONTNAMELEN &&
(entrynamelength < MAXFONTNAMELEN) &&
FontFileCountDashes (*aliasName, len) == 14)
{
FontScalableRec tmpVals;
FontScalableRec tmpVals2;
tmpVals2 = *vals;
/* If we're aliasing a scalable name, transfer values
from the name into the destination alias, multiplying
by matrices that appear in the alias. */
CopyISOLatin1Lowered (lowerName, entryname,
entrynamelength);
lowerName[entrynamelength] = '\0';
if (FontParseXLFDName(lowerName, &tmpVals,
FONT_XLFD_REPLACE_NONE) &&
!tmpVals.values_supplied &&
FontParseXLFDName(*aliasName, &tmpVals,
FONT_XLFD_REPLACE_NONE))
{
double *matrix = 0, tempmatrix[4];
/* Use a matrix iff exactly one is defined */
if ((tmpVals.values_supplied & PIXELSIZE_MASK) ==
PIXELSIZE_ARRAY &&
!(tmpVals.values_supplied & POINTSIZE_MASK))
matrix = tmpVals.pixel_matrix;
else if ((tmpVals.values_supplied & POINTSIZE_MASK) ==
POINTSIZE_ARRAY &&
!(tmpVals.values_supplied & PIXELSIZE_MASK))
matrix = tmpVals.point_matrix;
/* If matrix given in the alias, compute new point
and/or pixel matrices */
if (matrix)
{
/* Complete the XLFD name to avoid potential
gotchas */
if (FontFileCompleteXLFD(&tmpVals2, &tmpVals2))
{
tempmatrix[0] =
matrix[0] * tmpVals2.point_matrix[0] +
matrix[1] * tmpVals2.point_matrix[2];
tempmatrix[1] =
matrix[0] * tmpVals2.point_matrix[1] +
matrix[1] * tmpVals2.point_matrix[3];
tempmatrix[2] =
matrix[2] * tmpVals2.point_matrix[0] +
matrix[3] * tmpVals2.point_matrix[2];
tempmatrix[3] =
matrix[2] * tmpVals2.point_matrix[1] +
matrix[3] * tmpVals2.point_matrix[3];
tmpVals2.point_matrix[0] = tempmatrix[0];
tmpVals2.point_matrix[1] = tempmatrix[1];
tmpVals2.point_matrix[2] = tempmatrix[2];
tmpVals2.point_matrix[3] = tempmatrix[3];
tempmatrix[0] =
matrix[0] * tmpVals2.pixel_matrix[0] +
matrix[1] * tmpVals2.pixel_matrix[2];
tempmatrix[1] =
matrix[0] * tmpVals2.pixel_matrix[1] +
matrix[1] * tmpVals2.pixel_matrix[3];
tempmatrix[2] =
matrix[2] * tmpVals2.pixel_matrix[0] +
matrix[3] * tmpVals2.pixel_matrix[2];
tempmatrix[3] =
matrix[2] * tmpVals2.pixel_matrix[1] +
matrix[3] * tmpVals2.pixel_matrix[3];
tmpVals2.pixel_matrix[0] = tempmatrix[0];
tmpVals2.pixel_matrix[1] = tempmatrix[1];
tmpVals2.pixel_matrix[2] = tempmatrix[2];
tmpVals2.pixel_matrix[3] = tempmatrix[3];
tmpVals2.values_supplied =
(tmpVals2.values_supplied &
~(PIXELSIZE_MASK | POINTSIZE_MASK)) |
PIXELSIZE_ARRAY | POINTSIZE_ARRAY;
}
else
nameok = 0;
}
CopyISOLatin1Lowered (aliasname, *aliasName, len + 1);
if (nameok && FontParseXLFDName(aliasname, &tmpVals2,
FONT_XLFD_REPLACE_VALUE))
/* Return a version of the aliasname that has
had the vals stuffed into it. To avoid
memory leak, this alias name lives in a
static buffer. The caller needs to be done
with this buffer before this procedure is
called again to avoid reentrancy problems. */
*aliasName = aliasname;
}
}
return nameok;
}
/* ARGSUSED */
int
FontFileOpenFont (pointer client, FontPathElementPtr fpe, Mask flags,
char *name, int namelen,
fsBitmapFormat format, fsBitmapFormatMask fmask,
XID id, FontPtr *pFont, char **aliasName,
FontPtr non_cachable_font)
{
FontDirectoryPtr dir;
char lowerName[MAXFONTNAMELEN];
char fileName[MAXFONTFILENAMELEN*2 + 1];
FontNameRec tmpName;
FontEntryPtr entry;
FontScalableRec vals;
FontScalableEntryPtr scalable;
FontScaledPtr scaled;
FontBitmapEntryPtr bitmap;
int ret;
Bool noSpecificSize;
int nranges;
fsRange *ranges;
if (namelen >= MAXFONTNAMELEN)
return AllocError;
dir = (FontDirectoryPtr) fpe->private;
/* Match non-scalable pattern */
CopyISOLatin1Lowered (lowerName, name, namelen);
lowerName[namelen] = '\0';
ranges = FontParseRanges(lowerName, &nranges);
tmpName.name = lowerName;
tmpName.length = namelen;
tmpName.ndashes = FontFileCountDashes (lowerName, namelen);
if (!FontParseXLFDName(lowerName, &vals, FONT_XLFD_REPLACE_NONE))
bzero(&vals, sizeof(vals));
if (!(entry = FontFileFindNameInDir (&dir->nonScalable, &tmpName)) &&
tmpName.ndashes == 14 &&
FontParseXLFDName (lowerName, &vals, FONT_XLFD_REPLACE_ZERO))
{
tmpName.length = strlen(lowerName);
entry = FontFileFindNameInDir (&dir->nonScalable, &tmpName);
}
if (entry)
{
switch (entry->type) {
case FONT_ENTRY_BITMAP:
bitmap = &entry->u.bitmap;
if (bitmap->pFont)
{
*pFont = bitmap->pFont;
(*pFont)->fpe = fpe;
ret = Successful;
}
else
{
ret = FontFileOpenBitmapNCF (fpe, pFont, flags, entry, format,
fmask, non_cachable_font);
if (ret == Successful && *pFont)
(*pFont)->fpe = fpe;
}
break;
case FONT_ENTRY_ALIAS:
vals.nranges = nranges;
vals.ranges = ranges;
transfer_values_to_alias(entry->name.name, entry->name.length,
entry->u.alias.resolved, aliasName, &vals);
ret = FontNameAlias;
break;
default:
ret = BadFontName;
}
}
else
{
ret = BadFontName;
}
if (ret != BadFontName)
{
if (ranges) free(ranges);
return ret;
}
/* Match XLFD patterns */
CopyISOLatin1Lowered (lowerName, name, namelen);
lowerName[namelen] = '\0';
tmpName.name = lowerName;
tmpName.length = namelen;
tmpName.ndashes = FontFileCountDashes (lowerName, namelen);
if (!FontParseXLFDName (lowerName, &vals, FONT_XLFD_REPLACE_ZERO) ||
!(tmpName.length = strlen (lowerName),
entry = FontFileFindNameInScalableDir (&dir->scalable, &tmpName,
&vals))) {
CopyISOLatin1Lowered (lowerName, name, namelen);
lowerName[namelen] = '\0';
tmpName.name = lowerName;
tmpName.length = namelen;
tmpName.ndashes = FontFileCountDashes (lowerName, namelen);
entry = FontFileFindNameInScalableDir (&dir->scalable, &tmpName, &vals);
if (entry)
{
strcpy(lowerName, entry->name.name);
tmpName.name = lowerName;
tmpName.length = entry->name.length;
tmpName.ndashes = entry->name.ndashes;
}
}
if (entry)
{
noSpecificSize = FALSE; /* TRUE breaks XLFD enhancements */
if (entry->type == FONT_ENTRY_SCALABLE &&
FontFileCompleteXLFD (&vals, &entry->u.scalable.extra->defaults))
{
scalable = &entry->u.scalable;
if ((vals.values_supplied & PIXELSIZE_MASK) == PIXELSIZE_ARRAY ||
(vals.values_supplied & POINTSIZE_MASK) == POINTSIZE_ARRAY ||
(vals.values_supplied &
~SIZE_SPECIFY_MASK & ~CHARSUBSET_SPECIFIED))
scaled = 0;
else
scaled = FontFileFindScaledInstance (entry, &vals,
noSpecificSize);
/*
* A scaled instance can occur one of two ways:
*
* Either the font has been scaled to this
* size already, in which case scaled->pFont
* will point at that font.
*
* Or a bitmap instance in this size exists,
* which is handled as if we got a pattern
* matching the bitmap font name.
*/
if (scaled)
{
if (scaled->pFont)
{
*pFont = scaled->pFont;
(*pFont)->fpe = fpe;
ret = Successful;
}
else if (scaled->bitmap)
{
entry = scaled->bitmap;
bitmap = &entry->u.bitmap;
if (bitmap->pFont)
{
*pFont = bitmap->pFont;
(*pFont)->fpe = fpe;
ret = Successful;
}
else
{
ret = FontFileOpenBitmapNCF (fpe, pFont, flags, entry,
format, fmask,
non_cachable_font);
if (ret == Successful && *pFont)
(*pFont)->fpe = fpe;
}
}
else /* "cannot" happen */
{
ret = BadFontName;
}
}
else
{
ret = FontFileMatchBitmapSource (fpe, pFont, flags, entry, &tmpName, &vals, format, fmask, noSpecificSize);
if (ret != Successful)
{
char origName[MAXFONTNAMELEN];
CopyISOLatin1Lowered (origName, name, namelen);
origName[namelen] = '\0';
/* Pass the original XLFD name in the vals
structure; the rasterizer is free to examine it
for hidden meanings. This information will not
be saved in the scaled-instances table. */
vals.xlfdName = origName;
vals.ranges = ranges;
vals.nranges = nranges;
if (strlen(dir->directory) + strlen(scalable->fileName) >=
sizeof(fileName)) {
ret = BadFontName;
} else {
strcpy (fileName, dir->directory);
strcat (fileName, scalable->fileName);
if (scalable->renderer->OpenScalable) {
ret = (*scalable->renderer->OpenScalable) (fpe, pFont,
flags, entry, fileName, &vals, format, fmask,
non_cachable_font);
}
else if (scalable->renderer->OpenBitmap) {
ret = (*scalable->renderer->OpenBitmap) (fpe, pFont,
flags, entry, fileName, format, fmask,
non_cachable_font);
}
}
/* In case rasterizer does something bad because of
charset subsetting... */
if (ret == Successful &&
((*pFont)->info.firstCol > (*pFont)->info.lastCol ||
(*pFont)->info.firstRow > (*pFont)->info.lastRow))
{
(*(*pFont)->unload_font)(*pFont);
ret = BadFontName;
}
/* Save the instance */
if (ret == Successful)
{
if (FontFileAddScaledInstance (entry, &vals,
*pFont, (char *) 0))
ranges = 0;
else
(*pFont)->fpePrivate = (pointer) 0;
(*pFont)->fpe = fpe;
}
}
}
}
}
else
ret = BadFontName;
if (ranges)
free(ranges);
return ret;
}
/* ARGSUSED */
void
FontFileCloseFont (FontPathElementPtr fpe, FontPtr pFont)
{
FontEntryPtr entry;
if ((entry = (FontEntryPtr) pFont->fpePrivate)) {
switch (entry->type) {
case FONT_ENTRY_SCALABLE:
FontFileRemoveScaledInstance (entry, pFont);
break;
case FONT_ENTRY_BITMAP:
entry->u.bitmap.pFont = 0;
break;
default:
/* "cannot" happen */
break;
}
pFont->fpePrivate = 0;
}
(*pFont->unload_font) (pFont);
}
static int
FontFileOpenBitmapNCF (FontPathElementPtr fpe, FontPtr *pFont,
int flags, FontEntryPtr entry,
fsBitmapFormat format, fsBitmapFormatMask fmask,
FontPtr non_cachable_font)
{
FontBitmapEntryPtr bitmap;
char fileName[MAXFONTFILENAMELEN*2+1];
int ret;
FontDirectoryPtr dir;
dir = (FontDirectoryPtr) fpe->private;
bitmap = &entry->u.bitmap;
if(!bitmap || !bitmap->renderer->OpenBitmap)
return BadFontName;
if (strlen(dir->directory) + strlen(bitmap->fileName) >= sizeof(fileName))
return BadFontName;
strcpy (fileName, dir->directory);
strcat (fileName, bitmap->fileName);
ret = (*bitmap->renderer->OpenBitmap)
(fpe, pFont, flags, entry, fileName, format, fmask,
non_cachable_font);
if (ret == Successful)
{
bitmap->pFont = *pFont;
(*pFont)->fpePrivate = (pointer) entry;
}
return ret;
}
int
FontFileOpenBitmap (FontPathElementPtr fpe, FontPtr *pFont,
int flags, FontEntryPtr entry,
fsBitmapFormat format, fsBitmapFormatMask fmask)
{
return FontFileOpenBitmapNCF (fpe, pFont, flags, entry, format, fmask,
(FontPtr)0);
}
static int
FontFileGetInfoBitmap (FontPathElementPtr fpe, FontInfoPtr pFontInfo,
FontEntryPtr entry)
{
FontBitmapEntryPtr bitmap;
char fileName[MAXFONTFILENAMELEN*2+1];
int ret;
FontDirectoryPtr dir;
dir = (FontDirectoryPtr) fpe->private;
bitmap = &entry->u.bitmap;
if (!bitmap || !bitmap->renderer->GetInfoBitmap)
return BadFontName;
if (strlen(dir->directory) + strlen(bitmap->fileName) >= sizeof(fileName))
return BadFontName;
strcpy (fileName, dir->directory);
strcat (fileName, bitmap->fileName);
ret = (*bitmap->renderer->GetInfoBitmap) (fpe, pFontInfo, entry, fileName);
return ret;
}
static void
_FontFileAddScalableNames(FontNamesPtr names, FontNamesPtr scaleNames,
FontNamePtr nameptr, char *zeroChars,
FontScalablePtr vals, fsRange *ranges,
int nranges, int *max)
{
int i;
FontScalableRec zeroVals, tmpVals;
for (i = 0; i < scaleNames->nnames; i++)
{
char nameChars[MAXFONTNAMELEN];
if (!*max)
return;
FontParseXLFDName (scaleNames->names[i], &zeroVals,
FONT_XLFD_REPLACE_NONE);
tmpVals = *vals;
if (FontFileCompleteXLFD (&tmpVals, &zeroVals))
{
--*max;
strcpy (nameChars, scaleNames->names[i]);
if ((vals->values_supplied & PIXELSIZE_MASK) ||
!(vals->values_supplied & PIXELSIZE_WILDCARD) ||
vals->y == 0)
{
tmpVals.values_supplied =
(tmpVals.values_supplied & ~PIXELSIZE_MASK) |
(vals->values_supplied & PIXELSIZE_MASK);
tmpVals.pixel_matrix[0] = vals->pixel_matrix[0];
tmpVals.pixel_matrix[1] = vals->pixel_matrix[1];
tmpVals.pixel_matrix[2] = vals->pixel_matrix[2];
tmpVals.pixel_matrix[3] = vals->pixel_matrix[3];
}
if ((vals->values_supplied & POINTSIZE_MASK) ||
!(vals->values_supplied & POINTSIZE_WILDCARD) ||
vals->y == 0)
{
tmpVals.values_supplied =
(tmpVals.values_supplied & ~POINTSIZE_MASK) |
(vals->values_supplied & POINTSIZE_MASK);
tmpVals.point_matrix[0] = vals->point_matrix[0];
tmpVals.point_matrix[1] = vals->point_matrix[1];
tmpVals.point_matrix[2] = vals->point_matrix[2];
tmpVals.point_matrix[3] = vals->point_matrix[3];
}
if (vals->width <= 0)
tmpVals.width = 0;
if (vals->x == 0)
tmpVals.x = 0;
if (vals->y == 0)
tmpVals.y = 0;
tmpVals.ranges = ranges;
tmpVals.nranges = nranges;
FontParseXLFDName (nameChars, &tmpVals,
FONT_XLFD_REPLACE_VALUE);
/* If we're marking aliases with negative lengths, we
need to concoct a valid target name to follow it.
Otherwise we're done. */
if (scaleNames->length[i] >= 0)
{
(void) AddFontNamesName (names, nameChars,
strlen (nameChars));
/* If our original pattern matches the name from
the table and that name doesn't duplicate what
we just added, add the name from the table */
if (strcmp(nameChars, scaleNames->names[i]) &&
FontFileMatchName(scaleNames->names[i],
scaleNames->length[i],
nameptr) &&
*max)
{
--*max;
(void) AddFontNamesName (names, scaleNames->names[i],
scaleNames->length[i]);
}
}
else
{
char *aliasName;
vals->ranges = ranges;
vals->nranges = nranges;
if (transfer_values_to_alias(zeroChars,
strlen(zeroChars),
scaleNames->names[++i],
&aliasName, vals))
{
(void) AddFontNamesName (names, nameChars,
strlen (nameChars));
names->length[names->nnames - 1] =
-names->length[names->nnames - 1];
(void) AddFontNamesName (names, aliasName,
strlen (aliasName));
/* If our original pattern matches the name from
the table and that name doesn't duplicate what
we just added, add the name from the table */
if (strcmp(nameChars, scaleNames->names[i - 1]) &&
FontFileMatchName(scaleNames->names[i - 1],
-scaleNames->length[i - 1],
nameptr) &&
*max)
{
--*max;
(void) AddFontNamesName (names,
scaleNames->names[i - 1],
-scaleNames->length[i - 1]);
names->length[names->nnames - 1] =
-names->length[names->nnames - 1];
(void) AddFontNamesName (names, aliasName,
strlen (aliasName));
}
}
}
}
}
}
/* ARGSUSED */
static int
_FontFileListFonts (pointer client, FontPathElementPtr fpe,
char *pat, int len, int max, FontNamesPtr names,
int mark_aliases)
{
FontDirectoryPtr dir;
char lowerChars[MAXFONTNAMELEN], zeroChars[MAXFONTNAMELEN];
FontNameRec lowerName;
FontNameRec zeroName;
FontNamesPtr scaleNames;
FontScalableRec vals;
fsRange *ranges;
int nranges;
int result = BadFontName;
if (len >= MAXFONTNAMELEN)
return AllocError;
dir = (FontDirectoryPtr) fpe->private;
CopyISOLatin1Lowered (lowerChars, pat, len);
lowerChars[len] = '\0';
lowerName.name = lowerChars;
lowerName.length = len;
lowerName.ndashes = FontFileCountDashes (lowerChars, len);
/* Match XLFD patterns */
strcpy (zeroChars, lowerChars);
if (lowerName.ndashes == 14 &&
FontParseXLFDName (zeroChars, &vals, FONT_XLFD_REPLACE_ZERO))
{
ranges = FontParseRanges(lowerChars, &nranges);
result = FontFileFindNamesInScalableDir (&dir->nonScalable,
&lowerName, max, names,
(FontScalablePtr)0,
(mark_aliases ?
LIST_ALIASES_AND_TARGET_NAMES :
NORMAL_ALIAS_BEHAVIOR) |
IGNORE_SCALABLE_ALIASES,
&max);
zeroName.name = zeroChars;
zeroName.length = strlen (zeroChars);
zeroName.ndashes = lowerName.ndashes;
/* Look for scalable names and aliases, adding scaled instances of
them to the output */
/* Scalable names... */
scaleNames = MakeFontNamesRecord (0);
if (!scaleNames)
{
if (ranges) free(ranges);
return AllocError;
}
FontFileFindNamesInScalableDir (&dir->scalable, &zeroName, max,
scaleNames, &vals,
mark_aliases ?
LIST_ALIASES_AND_TARGET_NAMES :
NORMAL_ALIAS_BEHAVIOR, (int *)0);
_FontFileAddScalableNames(names, scaleNames, &lowerName,
zeroChars, &vals, ranges, nranges,
&max);
FreeFontNames (scaleNames);
/* Scalable aliases... */
scaleNames = MakeFontNamesRecord (0);
if (!scaleNames)
{
if (ranges) free(ranges);
return AllocError;
}
FontFileFindNamesInScalableDir (&dir->nonScalable, &zeroName,
max, scaleNames, &vals,
mark_aliases ?
LIST_ALIASES_AND_TARGET_NAMES :
NORMAL_ALIAS_BEHAVIOR, (int *)0);
_FontFileAddScalableNames(names, scaleNames, &lowerName,
zeroChars, &vals, ranges, nranges,
&max);
FreeFontNames (scaleNames);
if (ranges) free(ranges);
}
else
{
result = FontFileFindNamesInScalableDir (&dir->nonScalable,
&lowerName, max, names,
(FontScalablePtr)0,
mark_aliases ?
LIST_ALIASES_AND_TARGET_NAMES :
NORMAL_ALIAS_BEHAVIOR,
&max);
if (result == Successful)
result = FontFileFindNamesInScalableDir (&dir->scalable,
&lowerName, max, names,
(FontScalablePtr)0,
mark_aliases ?
LIST_ALIASES_AND_TARGET_NAMES :
NORMAL_ALIAS_BEHAVIOR, (int *)0);
}
return result;
}
typedef struct _LFWIData {
FontNamesPtr names;
int current;
} LFWIDataRec, *LFWIDataPtr;
int
FontFileListFonts (pointer client, FontPathElementPtr fpe, char *pat,
int len, int max, FontNamesPtr names)
{
return _FontFileListFonts (client, fpe, pat, len, max, names, 0);
}
int
FontFileStartListFonts(pointer client, FontPathElementPtr fpe,
char *pat, int len, int max,
pointer *privatep, int mark_aliases)
{
LFWIDataPtr data;
int ret;
data = malloc (sizeof *data);
if (!data)
return AllocError;
data->names = MakeFontNamesRecord (0);
if (!data->names)
{
free (data);
return AllocError;
}
ret = _FontFileListFonts (client, fpe, pat, len,
max, data->names, mark_aliases);
if (ret != Successful)
{
FreeFontNames (data->names);
free (data);
return ret;
}
data->current = 0;
*privatep = (pointer) data;
return Successful;
}
int
FontFileStartListFontsWithInfo(pointer client, FontPathElementPtr fpe,
char *pat, int len, int max,
pointer *privatep)
{
return FontFileStartListFonts(client, fpe, pat, len, max, privatep, 0);
}
/* ARGSUSED */
static int
FontFileListOneFontWithInfo (pointer client, FontPathElementPtr fpe,
char **namep, int *namelenp,
FontInfoPtr *pFontInfo)
{
FontDirectoryPtr dir;
char lowerName[MAXFONTNAMELEN];
char fileName[MAXFONTFILENAMELEN*2 + 1];
FontNameRec tmpName;
FontEntryPtr entry;
FontScalableRec vals;
FontScalableEntryPtr scalable;
FontScaledPtr scaled;
FontBitmapEntryPtr bitmap;
int ret;
Bool noSpecificSize;
int nranges;
fsRange *ranges;
char *name = *namep;
int namelen = *namelenp;
if (namelen >= MAXFONTNAMELEN)
return AllocError;
dir = (FontDirectoryPtr) fpe->private;
/* Match non-scalable pattern */
CopyISOLatin1Lowered (lowerName, name, namelen);
lowerName[namelen] = '\0';
ranges = FontParseRanges(lowerName, &nranges);
tmpName.name = lowerName;
tmpName.length = namelen;
tmpName.ndashes = FontFileCountDashes (lowerName, namelen);
if (!FontParseXLFDName(lowerName, &vals, FONT_XLFD_REPLACE_NONE))
bzero(&vals, sizeof(vals));
if (!(entry = FontFileFindNameInDir (&dir->nonScalable, &tmpName)) &&
tmpName.ndashes == 14 &&
FontParseXLFDName (lowerName, &vals, FONT_XLFD_REPLACE_ZERO))
{
tmpName.length = strlen(lowerName);
entry = FontFileFindNameInDir (&dir->nonScalable, &tmpName);
}
if (entry)
{
switch (entry->type) {
case FONT_ENTRY_BITMAP:
bitmap = &entry->u.bitmap;
if (bitmap->pFont)
{
*pFontInfo = &bitmap->pFont->info;
ret = Successful;
}
else
{
ret = FontFileGetInfoBitmap (fpe, *pFontInfo, entry);
}
break;
case FONT_ENTRY_ALIAS:
vals.nranges = nranges;
vals.ranges = ranges;
transfer_values_to_alias(entry->name.name, entry->name.length,
entry->u.alias.resolved, namep, &vals);
*namelenp = strlen (*namep);
ret = FontNameAlias;
break;
default:
ret = BadFontName;
}
}
else
{
ret = BadFontName;
}
if (ret != BadFontName)
{
if (ranges) free(ranges);
return ret;
}
/* Match XLFD patterns */
CopyISOLatin1Lowered (lowerName, name, namelen);
lowerName[namelen] = '\0';
tmpName.name = lowerName;
tmpName.length = namelen;
tmpName.ndashes = FontFileCountDashes (lowerName, namelen);
if (!FontParseXLFDName (lowerName, &vals, FONT_XLFD_REPLACE_ZERO) ||
!(tmpName.length = strlen (lowerName),
entry = FontFileFindNameInScalableDir (&dir->scalable, &tmpName,
&vals))) {
CopyISOLatin1Lowered (lowerName, name, namelen);
lowerName[namelen] = '\0';
tmpName.name = lowerName;
tmpName.length = namelen;
tmpName.ndashes = FontFileCountDashes (lowerName, namelen);
entry = FontFileFindNameInScalableDir (&dir->scalable, &tmpName, &vals);
if (entry)
{
strcpy(lowerName, entry->name.name);
tmpName.name = lowerName;
tmpName.length = entry->name.length;
tmpName.ndashes = entry->name.ndashes;
}
}
if (entry)
{
noSpecificSize = FALSE; /* TRUE breaks XLFD enhancements */
if (entry && entry->type == FONT_ENTRY_SCALABLE &&
FontFileCompleteXLFD (&vals, &entry->u.scalable.extra->defaults))
{
scalable = &entry->u.scalable;
scaled = FontFileFindScaledInstance (entry, &vals, noSpecificSize);
/*
* A scaled instance can occur one of two ways:
*
* Either the font has been scaled to this
* size already, in which case scaled->pFont
* will point at that font.
*
* Or a bitmap instance in this size exists,
* which is handled as if we got a pattern
* matching the bitmap font name.
*/
if (scaled)
{
if (scaled->pFont)
{
*pFontInfo = &scaled->pFont->info;
ret = Successful;
}
else if (scaled->bitmap)
{
entry = scaled->bitmap;
bitmap = &entry->u.bitmap;
if (bitmap->pFont)
{
*pFontInfo = &bitmap->pFont->info;
ret = Successful;
}
else
{
ret = FontFileGetInfoBitmap (fpe, *pFontInfo, entry);
}
}
else /* "cannot" happen */
{
ret = BadFontName;
}
}
else
{
{
char origName[MAXFONTNAMELEN];
CopyISOLatin1Lowered (origName, name, namelen);
origName[namelen] = '\0';
vals.xlfdName = origName;
vals.ranges = ranges;
vals.nranges = nranges;
/* Make a new scaled instance */
if (strlen(dir->directory) + strlen(scalable->fileName) >=
sizeof(fileName)) {
ret = BadFontName;
} else {
strcpy (fileName, dir->directory);
strcat (fileName, scalable->fileName);
if (scalable->renderer->GetInfoScalable)
ret = (*scalable->renderer->GetInfoScalable)
(fpe, *pFontInfo, entry, &tmpName, fileName,
&vals);
else if (scalable->renderer->GetInfoBitmap)
ret = (*scalable->renderer->GetInfoBitmap)
(fpe, *pFontInfo, entry, fileName);
}
if (ranges) {
free(ranges);
ranges = NULL;
}
}
}
if (ret == Successful) return ret;
}
CopyISOLatin1Lowered (lowerName, name, namelen);
tmpName.length = namelen;
}
else
ret = BadFontName;
if (ranges)
free(ranges);
return ret;
}
int
FontFileListNextFontWithInfo(pointer client, FontPathElementPtr fpe,
char **namep, int *namelenp,
FontInfoPtr *pFontInfo,
int *numFonts, pointer private)
{
LFWIDataPtr data = (LFWIDataPtr) private;
int ret;
char *name;
int namelen;
if (data->current == data->names->nnames)
{
FreeFontNames (data->names);
free (data);
return BadFontName;
}
name = data->names->names[data->current];
namelen = data->names->length[data->current];
ret = FontFileListOneFontWithInfo (client, fpe, &name, &namelen, pFontInfo);
if (ret == BadFontName)
ret = AllocError;
*namep = name;
*namelenp = namelen;
++data->current;
*numFonts = data->names->nnames - data->current;
return ret;
}
int
FontFileStartListFontsAndAliases(pointer client, FontPathElementPtr fpe,
char *pat, int len, int max,
pointer *privatep)
{
return FontFileStartListFonts(client, fpe, pat, len, max, privatep, 1);
}
int
FontFileListNextFontOrAlias(pointer client, FontPathElementPtr fpe,
char **namep, int *namelenp, char **resolvedp,
int *resolvedlenp, pointer private)
{
LFWIDataPtr data = (LFWIDataPtr) private;
int ret;
char *name;
int namelen;
if (data->current == data->names->nnames)
{
FreeFontNames (data->names);
free (data);
return BadFontName;
}
name = data->names->names[data->current];
namelen = data->names->length[data->current];
/* If this is a real font name... */
if (namelen >= 0)
{
*namep = name;
*namelenp = namelen;
ret = Successful;
}
/* Else if an alias */
else
{
/* Tell the caller that this is an alias... let him resolve it to
see if it's valid */
*namep = name;
*namelenp = -namelen;
*resolvedp = data->names->names[++data->current];
*resolvedlenp = data->names->length[data->current];
ret = FontNameAlias;
}
++data->current;
return ret;
}
void
FontFileRegisterLocalFpeFunctions (void)
{
RegisterFPEFunctions(FontFileNameCheck,
FontFileInitFPE,
FontFileFreeFPE,
FontFileResetFPE,
FontFileOpenFont,
FontFileCloseFont,
FontFileListFonts,
FontFileStartListFontsWithInfo,
FontFileListNextFontWithInfo,
NULL,
NULL,
NULL,
FontFileStartListFontsAndAliases,
FontFileListNextFontOrAlias,
FontFileEmptyBitmapSource);
}
|
531840.c | /* mbed Microcontroller Library
* SPDX-License-Identifier: BSD-3-Clause
******************************************************************************
*
* Copyright (c) 2016-2021 STMicroelectronics.
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*
* Automatically generated from STM32CubeMX/db/mcu/STM32F412Z(E-G)Jx.xml
*/
#include "PeripheralPins.h"
#include "mbed_toolchain.h"
//==============================================================================
// Notes
//
// - The pins mentioned Px_y_ALTz are alternative possibilities which use other
// HW peripheral instances. You can use them the same way as any other "normal"
// pin (i.e. PwmOut pwm(PA_7_ALT0);). These pins are not displayed on the board
// pinout image on mbed.org.
//
// - The pins which are connected to other components present on the board have
// the comment "Connected to xxx". The pin function may not work properly in this
// case. These pins may not be displayed on the board pinout image on mbed.org.
// Please read the board reference manual and schematic for more information.
//
// - Warning: pins connected to the default STDIO_UART_TX and STDIO_UART_RX pins are commented
// See https://os.mbed.com/teams/ST/wiki/STDIO for more information.
//
//==============================================================================
//*** ADC ***
MBED_WEAK const PinMap PinMap_ADC[] = {
{PA_0, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 0, 0)}, // ADC1_IN0
{PA_1, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 1, 0)}, // ADC1_IN1
{PA_2, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 2, 0)}, // ADC1_IN2
{PA_3, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 3, 0)}, // ADC1_IN3
{PA_4, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 4, 0)}, // ADC1_IN4
{PA_5, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 5, 0)}, // ADC1_IN5
{PA_6, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 6, 0)}, // ADC1_IN6
{PA_7, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 7, 0)}, // ADC1_IN7
{PB_0, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 8, 0)}, // ADC1_IN8
{PB_1, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 9, 0)}, // ADC1_IN9
{PC_0, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 10, 0)}, // ADC1_IN10
{PC_1, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 11, 0)}, // ADC1_IN11
{PC_2, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 12, 0)}, // ADC1_IN12
{PC_3, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 13, 0)}, // ADC1_IN13
{PC_4, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 14, 0)}, // ADC1_IN14
{PC_5, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 15, 0)}, // ADC1_IN15
{NC, NC, 0}
};
// !!! SECTION TO BE CHECKED WITH DEVICE REFERENCE MANUAL
MBED_WEAK const PinMap PinMap_ADC_Internal[] = {
// {ADC_TEMP, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 16, 0)},
// {ADC_VREF, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 17, 0)},
// {ADC_VBAT, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 18, 0)},
{NC, NC, 0}
};
//*** I2C ***
MBED_WEAK const PinMap PinMap_I2C_SDA[] = {
{PB_3, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF9_I2C2)},
{PB_3_ALT0, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PB_4, I2C_3, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF9_I2C3)},
{PB_7, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_8, I2C_3, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF9_I2C3)},
{PB_9, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_9_ALT0, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF9_I2C2)},
{PB_11, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C2)},
{PB_14, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PC_7, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PC_9, I2C_3, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C3)},
{PD_13, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PD_15, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PF_0, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C2)},
{PF_15, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_I2C_SCL[] = {
{PA_8, I2C_3, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C3)},
{PB_6, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_8, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_10, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C2)},
{PB_10_ALT0, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF9_FMPI2C1)},
{PB_15, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PC_6, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PD_12, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PD_14, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{PF_1, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C2)},
{PF_14, FMPI2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_FMPI2C1)},
{NC, NC, 0}
};
//*** PWM ***
// TIM5 cannot be used because already used by the us_ticker
// (update us_ticker_data.h file if another timer is chosen)
MBED_WEAK const PinMap PinMap_PWM[] = {
{PA_0, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 1, 0)}, // TIM2_CH1
// {PA_0, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 1, 0)}, // TIM5_CH1
{PA_1, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 2, 0)}, // TIM2_CH2
// {PA_1, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 2, 0)}, // TIM5_CH2
{PA_2, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 3, 0)}, // TIM2_CH3
// {PA_2, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 3, 0)}, // TIM5_CH3
{PA_2_ALT0, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM9, 1, 0)}, // TIM9_CH1
{PA_3, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 4, 0)}, // TIM2_CH4
// {PA_3, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 4, 0)}, // TIM5_CH4
{PA_3_ALT0, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM9, 2, 0)}, // TIM9_CH2
{PA_5, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 1, 0)}, // TIM2_CH1
{PA_5_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 1, 1)}, // TIM8_CH1N
{PA_6, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 1, 0)}, // TIM3_CH1
{PA_6_ALT0, PWM_13, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_TIM13, 1, 0)}, // TIM13_CH1
{PA_7, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 1, 1)}, // TIM1_CH1N
{PA_7_ALT0, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 2, 0)}, // TIM3_CH2
{PA_7_ALT1, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 1, 1)}, // TIM8_CH1N
{PA_7_ALT2, PWM_14, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_TIM14, 1, 0)}, // TIM14_CH1
{PA_8, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 1, 0)}, // TIM1_CH1
{PA_9, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 2, 0)}, // TIM1_CH2
{PA_10, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 3, 0)}, // TIM1_CH3
{PA_11, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 4, 0)}, // TIM1_CH4
{PA_15, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 1, 0)}, // TIM2_CH1
{PB_0, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 2, 1)}, // TIM1_CH2N
{PB_0_ALT0, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 3, 0)}, // TIM3_CH3
{PB_0_ALT1, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 2, 1)}, // TIM8_CH2N
{PB_1, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 3, 1)}, // TIM1_CH3N
{PB_1_ALT0, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 4, 0)}, // TIM3_CH4
{PB_1_ALT1, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 3, 1)}, // TIM8_CH3N
{PB_3, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 2, 0)}, // TIM2_CH2
{PB_4, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 1, 0)}, // TIM3_CH1
{PB_5, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 2, 0)}, // TIM3_CH2
{PB_6, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 1, 0)}, // TIM4_CH1
{PB_7, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 2, 0)}, // TIM4_CH2
{PB_8, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 3, 0)}, // TIM4_CH3
{PB_8_ALT0, PWM_10, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM10, 1, 0)}, // TIM10_CH1
{PB_9, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 4, 0)}, // TIM4_CH4
{PB_9_ALT0, PWM_11, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM11, 1, 0)}, // TIM11_CH1
{PB_10, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 3, 0)}, // TIM2_CH3
{PB_11, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM2, 4, 0)}, // TIM2_CH4
{PB_13, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 1, 1)}, // TIM1_CH1N
{PB_14, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 2, 1)}, // TIM1_CH2N
{PB_14_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 2, 1)}, // TIM8_CH2N
{PB_14_ALT1, PWM_12, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_TIM12, 1, 0)}, // TIM12_CH1
{PB_15, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 3, 1)}, // TIM1_CH3N
{PB_15_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 3, 1)}, // TIM8_CH3N
{PB_15_ALT1, PWM_12, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_TIM12, 2, 0)}, // TIM12_CH2
{PC_6, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 1, 0)}, // TIM3_CH1
{PC_6_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 1, 0)}, // TIM8_CH1
{PC_7, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 2, 0)}, // TIM3_CH2
{PC_7_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 2, 0)}, // TIM8_CH2
{PC_8, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 3, 0)}, // TIM3_CH3
{PC_8_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 3, 0)}, // TIM8_CH3
{PC_9, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM3, 4, 0)}, // TIM3_CH4
{PC_9_ALT0, PWM_8, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM8, 4, 0)}, // TIM8_CH4
{PD_12, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 1, 0)}, // TIM4_CH1
{PD_13, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 2, 0)}, // TIM4_CH2
{PD_14, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 3, 0)}, // TIM4_CH3
{PD_15, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM4, 4, 0)}, // TIM4_CH4
{PE_5, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM9, 1, 0)}, // TIM9_CH1
{PE_6, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM9, 2, 0)}, // TIM9_CH2
{PE_8, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 1, 1)}, // TIM1_CH1N
{PE_9, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 1, 0)}, // TIM1_CH1
{PE_10, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 2, 1)}, // TIM1_CH2N
{PE_11, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 2, 0)}, // TIM1_CH2
{PE_12, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 3, 1)}, // TIM1_CH3N
{PE_13, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 3, 0)}, // TIM1_CH3
{PE_14, PWM_1, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF1_TIM1, 4, 0)}, // TIM1_CH4
// {PF_3, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 1, 0)}, // TIM5_CH1
// {PF_4, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 2, 0)}, // TIM5_CH2
// {PF_5, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 3, 0)}, // TIM5_CH3
{PF_6, PWM_10, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM10, 1, 0)}, // TIM10_CH1
{PF_7, PWM_11, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF3_TIM11, 1, 0)}, // TIM11_CH1
{PF_8, PWM_13, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_TIM13, 1, 0)}, // TIM13_CH1
{PF_9, PWM_14, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_TIM14, 1, 0)}, // TIM14_CH1
// {PF_10, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF2_TIM5, 4, 0)}, // TIM5_CH4
{NC, NC, 0}
};
//*** SERIAL ***
MBED_WEAK const PinMap PinMap_UART_TX[] = {
{PA_2, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PA_9, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PA_11, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{PA_15, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_6, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_10, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_6, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{PC_10, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PD_5, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PD_8, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PG_14, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_UART_RX[] = {
{PA_3, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PA_10, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PA_12, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{PB_3, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_7, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_11, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_5, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_7, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{PC_11, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PD_6, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PD_9, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PG_9, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_UART_RTS[] = {
{PA_1, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PA_12, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_14, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PD_4, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PD_12, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PG_8, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{PG_12, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_UART_CTS[] = {
{PA_0, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PA_11, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_13, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART3)},
{PD_3, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PD_11, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PG_13, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{PG_15, UART_6, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_USART6)},
{NC, NC, 0}
};
//*** SPI ***
MBED_WEAK const PinMap PinMap_SPI_MOSI[] = {
{PA_1, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PA_7, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_10, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PB_5, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_5_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_8, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PB_15, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_3, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_12, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PD_6, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI3)},
{PE_6, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_6_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PE_14, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_14_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_SPI_MISO[] = {
{PA_6, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_11, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI4)},
{PA_12, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PB_4, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_4_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_14, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_2, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_11, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PE_5, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_5_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PE_13, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_13_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_SPI_SCLK[] = {
{PA_5, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PB_3, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_3_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_10, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PB_12, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF7_SPI3)},
{PB_13, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PB_13_ALT0, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI4)},
{PC_7, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_10, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PD_3, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PE_2, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_2_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PE_12, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_12_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_SPI_SSEL[] = {
{PA_4, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_4_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PA_15, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_15_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_1, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PB_9, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PB_12, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PB_12_ALT0, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI4)},
{PE_4, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_4_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{PE_11, SPI_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI4)},
{PE_11_ALT0, SPI_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI5)},
{NC, NC, 0}
};
//*** CAN ***
MBED_WEAK const PinMap PinMap_CAN_RD[] = {
{PA_11, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN1)},
{PB_5, CAN_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN2)},
{PB_8, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF8_CAN1)},
{PB_12, CAN_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN2)},
{PD_0, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN1)},
{PG_0, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN1)},
{PG_11, CAN_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN2)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_CAN_TD[] = {
{PA_12, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN1)},
{PB_6, CAN_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN2)},
{PB_9, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF8_CAN1)},
{PB_13, CAN_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN2)},
{PD_1, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN1)},
{PG_1, CAN_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN1)},
{PG_12, CAN_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF9_CAN2)},
{NC, NC, 0}
};
//*** QUADSPI ***
MBED_WEAK const PinMap PinMap_QSPI_DATA0[] = {
{PC_9, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO0
{PD_11, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO0
{PF_8, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_QSPI)}, // QUADSPI_BK1_IO0
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_QSPI_DATA1[] = {
{PC_10, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO1
{PD_12, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO1
{PF_9, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_QSPI)}, // QUADSPI_BK1_IO1
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_QSPI_DATA2[] = {
{PC_8, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO2
{PE_2, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO2
{PF_7, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO2
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_QSPI_DATA3[] = {
{PA_1, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO3
{PD_13, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO3
{PF_6, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_BK1_IO3
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_QSPI_SCLK[] = {
{PB_1, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_CLK
{PB_2, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_CLK
{PD_3, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF9_QSPI)}, // QUADSPI_CLK
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_QSPI_SSEL[] = {
{PB_6, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_QSPI)}, // QUADSPI_BK1_NCS
{PG_6, QSPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_QSPI)}, // QUADSPI_BK1_NCS
{NC, NC, 0}
};
//*** USBDEVICE ***
MBED_WEAK const PinMap PinMap_USB_FS[] = {
// {PA_8, USB_FS, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_OTG_FS)}, // USB_OTG_FS_SOF
{PA_9, USB_FS, STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, GPIO_AF10_OTG_FS)}, // USB_OTG_FS_VBUS
{PA_10, USB_FS, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_PULLUP, GPIO_AF10_OTG_FS)}, // USB_OTG_FS_ID
{PA_11, USB_FS, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_OTG_FS)}, // USB_OTG_FS_DM
{PA_12, USB_FS, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF10_OTG_FS)}, // USB_OTG_FS_DP
{NC, NC, 0}
};
|