filename
stringlengths 3
9
| code
stringlengths 4
1.87M
|
---|---|
990876.c | /*
Copyright 2015 Bloomberg Finance L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <comdb2.h>
#include <bdb_sqlstat1.h>
#include <strings.h>
#include <assert.h>
#include <limits.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <inttypes.h>
#include <pthread.h>
#include <epochlib.h>
#include "analyze.h"
#include "sql.h"
#include <sqliteInt.h>
#include "block_internal.h"
#include <thread_malloc.h>
#include <autoanalyze.h>
#include <bdb_schemachange.h>
#include <sqlstat1.h>
#include <sqloffload.h>
#include <comdb2_atomic.h>
#include <ctrace.h>
#include <logmsg.h>
/* amount of thread-memory initialized for this thread */
static int analyze_thread_memory = 1048576;
/* global is-running flag */
volatile int analyze_running_flag = 0;
static int analyze_abort_requested = 0;
/* global enable / disable switch */
static int sampled_tables_enabled = 1;
/* sampling threshold defaults to 100 Mb */
long long sampling_threshold = 104857600;
/* hard-maximum number of analyze-table threads */
static int analyze_hard_max_table_threads = 15;
/* maximum number of analyze-table threads */
int analyze_max_table_threads = 5;
/* current number of analyze-sampling threads */
static int analyze_cur_table_threads = 0;
/* maximum number of analyze-sampling threads */
static int analyze_hard_max_comp_threads = 40;
/* maximum number of analyze-sampling threads */
int analyze_max_comp_threads = 10;
/* current number of analyze-sampling threads */
static int analyze_cur_comp_threads = 0;
/* table-thread mutex */
static pthread_mutex_t table_thd_mutex = PTHREAD_MUTEX_INITIALIZER;
/* table-thread cond */
static pthread_cond_t table_thd_cond = PTHREAD_COND_INITIALIZER;
/* comp-thread mutex */
static pthread_mutex_t comp_thd_mutex = PTHREAD_MUTEX_INITIALIZER;
/* comp-thread cond */
static pthread_cond_t comp_thd_cond = PTHREAD_COND_INITIALIZER;
/* comp-state enum */
enum {
SAMPLING_NOTINITED = 0,
SAMPLING_STARTUP = 1,
SAMPLING_RUNNING = 2,
SAMPLING_COMPLETE = 3,
SAMPLING_ERROR = 4
};
/* table-state enum */
enum {
TABLE_NOTINITED = 0,
TABLE_STARTUP = 1,
TABLE_RUNNING = 2,
TABLE_COMPLETE = 3,
TABLE_FAILED = 4,
TABLE_SKIPPED = 5
};
/* index-descriptor */
typedef struct index_descriptor {
pthread_t thread_id;
int comp_state;
sampled_idx_t *s_ix;
struct dbtable *tbl;
int ix;
int sampling_pct;
} index_descriptor_t;
/* table-descriptor */
typedef struct table_descriptor {
pthread_t thread_id;
int table_state;
char table[MAXTABLELEN];
SBUF2 *sb;
int scale;
int override_llmeta;
index_descriptor_t index[MAXINDEX];
} table_descriptor_t;
/* loadStat4 (analyze.c) will ignore all stat entries
* which have "tbl like 'cdb2.%' */
int backout_stats_frm_tbl(struct sqlclntstate *clnt, const char *table,
int stattbl)
{
char sql[256];
snprintf(sql, sizeof(sql), "delete from sqlite_stat%d where tbl='%s'",
stattbl, table);
int rc = run_internal_sql_clnt(clnt, sql);
if (rc)
return rc;
snprintf(sql, sizeof(sql),
"update sqlite_stat%d set tbl='%s' where tbl='cdb2.%s.sav'",
stattbl, table, table);
rc = run_internal_sql_clnt(clnt, sql);
return rc;
}
/* Create an sql machine & run an analyze command. */
static int run_sql_part_trans(sqlite3 *sqldb, struct sqlclntstate *client,
char *sql, int *changes)
{
int rc;
char *msg;
/* set sql and analyze flavor */
client->sql = sql;
/* set thread info */
struct sql_thread *thd = pthread_getspecific(query_info_key);
sql_get_query_id(thd);
/* run sql */
rc = sqlite3_exec(sqldb, sql, NULL, NULL, &msg);
if (rc) {
logmsg(LOGMSG_ERROR, "Analyze failure rc %d: %s\n", rc,
msg ? msg : "<unknown error>");
return rc;
}
if (changes != NULL)
*changes = sqlite3_changes(sqldb);
return rc;
}
static int check_stat1_and_flag(SBUF2 *sb)
{
/* verify sqlite_stat1 */
if (NULL == get_dbtable_by_name("sqlite_stat1")) {
sbuf2printf(sb, "?%s: analyze requires sqlite_stat1 to run\n",
__func__);
sbuf2printf(sb, "FAILED\n");
logmsg(LOGMSG_ERROR, "%s: analyze requires sqlite_stat1 to run\n", __func__);
return -1;
}
/* check if its already running */
if (analyze_running_flag) {
sbuf2printf(sb, "?%s: analyze is already running\n", __func__);
sbuf2printf(sb, "FAILED\n");
logmsg(LOGMSG_ERROR, "%s: analyze is already running\n", __func__);
return -1;
}
return 0;
}
void cleanup_stats(SBUF2 *sb)
{
struct sqlclntstate clnt;
start_internal_sql_clnt(&clnt);
clnt.sb = sb;
if (get_dbtable_by_name("sqlite_stat1")) {
run_internal_sql_clnt(&clnt,
"delete from sqlite_stat1 where idx is null");
run_internal_sql_clnt(&clnt, "delete from sqlite_stat1 where idx not "
"in (select name from sqlite_master where "
"type='index')");
}
if (get_dbtable_by_name("sqlite_stat2"))
run_internal_sql_clnt(&clnt, "delete from sqlite_stat2 where idx not "
"in (select name from sqlite_master where "
"type='index')");
if (get_dbtable_by_name("sqlite_stat4"))
run_internal_sql_clnt(&clnt, "delete from sqlite_stat4 where idx not "
"in (select name from sqlite_master where "
"type='index' UNION select "
"'cdb2.'||name||'.sav' from sqlite_master "
"where type='index')");
end_internal_sql_clnt(&clnt);
}
/* returns the index or NULL */
static sampled_idx_t *find_sampled_index(struct sqlclntstate *client,
char *table, int ix)
{
int i;
/* punt if this wasn't sampled */
if (NULL == client->sampled_idx_tbl || client->n_cmp_idx <= 0) {
return NULL;
}
/* search for table / index */
for (i = 0; i < client->n_cmp_idx; i++) {
sampled_idx_t *s_ix = &(client->sampled_idx_tbl[i]);
if (!strcmp(table, s_ix->name) && ix == s_ix->ixnum) {
return &client->sampled_idx_tbl[i];
}
}
return NULL;
}
/* sample (previously misnamed compress) this index */
static int sample_index_int(index_descriptor_t *ix_des)
{
sampled_idx_t *s_ix = ix_des->s_ix;
struct dbtable *tbl = ix_des->tbl;
int sampling_pct = ix_des->sampling_pct;
int ix = ix_des->ix;
int rc;
int bdberr;
unsigned long long n_recs;
unsigned long long n_sampled_recs;
struct temp_table *tmptbl = NULL;
/* cache the tablename for sqlglue */
strncpy(s_ix->name, tbl->tablename, sizeof(s_ix->name));
/* ask bdb to put a summary of this into a temp-table */
rc = bdb_summarize_table(tbl->handle, ix, sampling_pct, &tmptbl,
&n_sampled_recs, &n_recs, &bdberr);
/* failed */
if (rc) {
logmsg(LOGMSG_ERROR, "%s: failed to sample table '%s' idx %d\n",
__func__, tbl->tablename, ix);
return -1;
}
/* fill in structure */
s_ix->ixnum = ix;
s_ix->sampled_table = tmptbl;
s_ix->sampling_pct = sampling_pct;
s_ix->n_recs = n_recs;
s_ix->n_sampled_recs = n_sampled_recs;
return 0;
}
/* spawn a thread to sample an index */
static void *sampling_thread(void *arg)
{
int rc;
index_descriptor_t *ix_des = (index_descriptor_t *)arg;
/* register thread */
thrman_register(THRTYPE_ANALYZE);
backend_thread_event(thedb, COMDB2_THR_EVENT_START_RDWR);
/* update state */
ix_des->comp_state = SAMPLING_RUNNING;
/* sample the index */
rc = sample_index_int(ix_des);
/* mark the return */
if (0 == rc) {
ix_des->comp_state = SAMPLING_COMPLETE;
} else {
ix_des->comp_state = SAMPLING_ERROR;
}
/* release the thread */
pthread_mutex_lock(&comp_thd_mutex);
analyze_cur_comp_threads--;
pthread_cond_broadcast(&comp_thd_cond);
pthread_mutex_unlock(&comp_thd_mutex);
/* cleanup */
backend_thread_event(thedb, COMDB2_THR_EVENT_DONE_RDWR);
return NULL;
}
/* dispatch a thread to sample this index */
static int dispatch_sample_index_thread(index_descriptor_t *ix_des)
{
/* grab lock */
pthread_mutex_lock(&comp_thd_mutex);
/* wait for sampling thread availability */
while (analyze_cur_comp_threads >= analyze_max_comp_threads) {
pthread_cond_wait(&comp_thd_cond, &comp_thd_mutex);
}
/* grab sampling thread */
analyze_cur_comp_threads++;
/* release */
pthread_mutex_unlock(&comp_thd_mutex);
/* dispatch */
int rc = pthread_create(&ix_des->thread_id, &gbl_pthread_attr_detached,
sampling_thread, ix_des);
/* return */
return rc;
}
/* wait for an index to complete */
static int wait_for_index(index_descriptor_t *ix_des)
{
/* lock index mutex */
pthread_mutex_lock(&comp_thd_mutex);
/* wait for the state to change */
while (ix_des->comp_state == SAMPLING_STARTUP ||
ix_des->comp_state == SAMPLING_RUNNING) {
pthread_cond_wait(&comp_thd_cond, &comp_thd_mutex);
}
/* release */
pthread_mutex_unlock(&comp_thd_mutex);
return 0;
}
/* sample all indicies in this table */
static int sample_indicies(table_descriptor_t *td, struct sqlclntstate *client,
struct dbtable *tbl, int sampling_pct, SBUF2 *sb)
{
int i;
int err = 0;
char *table;
index_descriptor_t *ix_des;
/* find table to backout */
table = tbl->tablename;
/* allocate cmp_idx */
client->sampled_idx_tbl = calloc(tbl->nix, sizeof(sampled_idx_t));
if (!client->sampled_idx_tbl) {
logmsg(LOGMSG_ERROR, "%s: out of memory\n", __func__);
return -1;
}
/* set # sampled ixs */
client->n_cmp_idx = tbl->nix;
/* sample indicies */
for (i = 0; i < client->n_cmp_idx; i++) {
/* prepare index descriptor */
ix_des = &td->index[i];
ix_des->comp_state = SAMPLING_STARTUP;
ix_des->s_ix = &client->sampled_idx_tbl[i];
ix_des->tbl = tbl;
ix_des->ix = i;
ix_des->sampling_pct = sampling_pct;
/* start an index sampling thread */
int rc = dispatch_sample_index_thread(ix_des);
if (0 != rc) {
logmsg(LOGMSG_ERROR, "Couldn't start sampling-thread for table '%s' ix %d "
"rc=%d\n",
table, i, rc);
err = 1;
break;
}
}
/* wait for them to complete */
for (i = 0; i < client->n_cmp_idx; i++) {
wait_for_index(&td->index[i]);
if (SAMPLING_COMPLETE != td->index[i].comp_state)
err = 1;
}
return err;
}
/* clean up */
static int cleanup_sampled_indicies(struct sqlclntstate *client, struct dbtable *tbl)
{
int i;
int rc;
int bdberr;
/* delete sampled temptables */
for (i = 0; i < client->n_cmp_idx; i++) {
sampled_idx_t *s_ix = &client->sampled_idx_tbl[i];
if (!s_ix)
continue;
if (!s_ix->sampled_table)
continue;
rc = bdb_temp_table_close(tbl->handle, s_ix->sampled_table, &bdberr);
if (rc) {
logmsg(LOGMSG_ERROR, "%s: error closing tmptable: rc=%d "
"bdberr=%d\n",
__func__, rc, bdberr);
}
}
/* free & zero struct */
free(client->sampled_idx_tbl);
client->sampled_idx_tbl = NULL;
return 0;
}
/* Return the requested sampled temptable */
struct temp_table *analyze_get_sampled_temptable(struct sqlclntstate *client,
char *table, int idx)
{
sampled_idx_t *s_ix;
s_ix = find_sampled_index(client, table, idx);
if (!s_ix)
return NULL;
return s_ix->sampled_table;
}
/* Called from sqlite. Return the number of records for a sampled table */
int analyze_get_nrecs(int iTable)
{
struct sql_thread *thd;
struct sqlclntstate *client;
struct dbtable *db;
sampled_idx_t *s_ix;
int ixnum;
/* get client structures */
thd = pthread_getspecific(query_info_key);
client = thd->clnt;
/* comdb2-ize table-num and ixnum */
db = get_sqlite_db(thd, iTable, &ixnum);
assert(db);
/* grab sampled table descriptor */
s_ix = find_sampled_index(client, db->tablename, ixnum);
/* return -1 if not sampled. Sqlite will use the value it calculated. */
if (!s_ix) {
return -1;
}
/* boundry check return code */
if (s_ix->n_recs > INT_MAX) {
return INT_MAX;
}
/* return actual number of records */
else {
return (int)s_ix->n_recs;
}
}
/* Return the number of records sampled for an index */
int64_t analyze_get_sampled_nrecs(const char *dbname, int ixnum)
{
struct sql_thread *thd;
struct sqlclntstate *client;
/* get client structures */
thd = pthread_getspecific(query_info_key);
client = thd->clnt;
/* Punt if this wasn't sampled. */
if (NULL == client->sampled_idx_tbl || client->n_cmp_idx <= 0) {
return -1;
}
assert(0 <= ixnum && ixnum < client->n_cmp_idx);
return client->sampled_idx_tbl[ixnum].n_sampled_recs;
}
/* Return 1 if we have this sampled index, 0 otherwise */
int analyze_is_sampled(struct sqlclntstate *client, char *table, int idx)
{
sampled_idx_t *s_ix;
s_ix = find_sampled_index(client, table, idx);
return (NULL != s_ix);
}
static int local_replicate_write_analyze(char *table)
{
int rc;
tran_type *trans = NULL;
long long seqno;
int nretries = 0;
struct ireq iq;
int arc;
struct block_state blkstate = {0};
/* skip if not needed */
if (gbl_replicate_local == 0 || get_dbtable_by_name("comdb2_oplog") == NULL)
return 0;
init_fake_ireq(thedb, &iq);
iq.use_handle = thedb->bdb_env;
iq.blkstate = &blkstate;
again:
nretries++;
if (trans) {
arc = trans_abort(&iq, trans);
if (arc) {
logmsg(LOGMSG_ERROR, "Analyze: trans_abort rc %d\n", arc);
trans = NULL;
goto done;
}
trans = NULL;
}
if (nretries > gbl_maxretries)
return RC_INTERNAL_RETRY;
rc = trans_start(&iq, NULL, &trans);
if (rc) {
logmsg(LOGMSG_ERROR, "analyze: trans_start rc %d\n", rc);
goto done;
}
if (gbl_replicate_local_concurrent) {
unsigned long long useqno;
useqno = bdb_get_timestamp(thedb->bdb_env);
memcpy(&seqno, &useqno, sizeof(seqno));
} else
rc = get_next_seqno(trans, &seqno);
if (rc) {
if (rc != RC_INTERNAL_RETRY) {
logmsg(LOGMSG_ERROR, "get_next_seqno unexpected rc %d\n", rc);
goto done;
} else
goto again;
}
iq.blkstate->seqno = seqno;
iq.blkstate->pos = 0;
rc = add_oplog_entry(&iq, trans, LCL_OP_ANALYZE, table, strlen(table));
if (rc == RC_INTERNAL_RETRY)
goto again;
if (rc) {
logmsg(LOGMSG_ERROR, "analyze: add_oplog_entry(analyze) rc %d\n", rc);
goto done;
}
iq.blkstate->seqno = seqno;
iq.blkstate->pos = 1;
rc = add_oplog_entry(&iq, trans, LCL_OP_COMMIT, NULL, 0);
if (rc == RC_INTERNAL_RETRY)
goto again;
if (rc) {
logmsg(LOGMSG_ERROR, "analyze: add_oplog_entry(commit) rc %d\n", rc);
goto done;
}
rc = trans_commit(&iq, trans, gbl_mynode);
if (rc) {
logmsg(LOGMSG_ERROR, "analyze: commit rc %d\n", rc);
goto done;
}
trans = NULL;
done:
if (trans) {
arc = trans_abort(&iq, trans);
if (arc)
logmsg(LOGMSG_ERROR, "analyze: trans_abort rc %d\n", arc);
}
return rc;
}
/* get tbl sampling threshold, if NOT -1 set it
*/
static void get_sampling_threshold(char *table, long long *sampling_threshold)
{
int bdberr = 0;
long long threshold = 0;
bdb_get_analyzethreshold_table(NULL, table, &threshold, &bdberr);
#ifdef DEBUG
printf("retrieving from llmeta saved threshold for table '%s': %lld\n",
table, threshold);
#endif
if (threshold > 0) {
*sampling_threshold = threshold;
#ifdef DEBUG
printf("Using llmetasaved threshold %d\n", *sampling_threshold);
}
else {
printf("Using default threshold %d\n", *sampling_threshold);
#endif
}
}
/* get coverage value saved in llmeta for table
* if value is not saved in llmeta, will not modify scale
*/
static void get_saved_scale(char *table, int *scale)
{
int bdberr = 0;
int coveragevalue = 0;
bdb_get_analyzecoverage_table(NULL, table, &coveragevalue, &bdberr);
#ifdef DEBUG
printf("retrieving from llmeta saved coverage for table '%s': %d\n", table,
coveragevalue);
#endif
if (coveragevalue >= 0 && coveragevalue <= 100) {
*scale = coveragevalue;
}
}
int delete_sav(sqlite3 *sqldb, struct sqlclntstate *client, SBUF2 *sb,
int stat_tbl, const char *table)
{
char sql[256];
int ii = 0;
int more = 1;
int rc = 0;
snprintf( sql, sizeof(sql),
"delete from sqlite_stat%d where tbl='cdb2.%s.sav'",
stat_tbl, table);
#ifdef DEBUG
printf("query '%s'\n", sql);
#endif
if ( (rc = run_sql_part_trans( sqldb, client, sql, &more)) != 0) {
logmsg(LOGMSG_ERROR, "delete sav failed");
return rc;
}
#ifdef DEBUG
printf("deleted %d from tbl='cdb2.%s.sav'\n", more, table);
#endif
ii++;
return 0;
}
int update_sav(sqlite3 *sqldb, struct sqlclntstate *client, SBUF2 *sb,
int stat_tbl, const char *table)
{
char sql[256];
int ii = 0;
int more = 1;
int rc = 0;
snprintf( sql, sizeof(sql),
"update sqlite_stat%d set tbl='cdb2.%s.sav' where tbl='%s'",
stat_tbl, table, table);
#ifdef DEBUG
printf("query '%s'\n", sql);
#endif
if ( (rc = run_sql_part_trans( sqldb, client, sql, &more)) != 0) {
logmsg(LOGMSG_ERROR, "update sav failed");
return rc;
}
#ifdef DEBUG
printf("updated %d from tbl='cdb2.%s.sav'\n", more, table);
#endif
ii++;
return 0;
}
static int analyze_table_int(table_descriptor_t *td,
struct thr_handle *thr_self)
{
#ifdef DEBUG
printf("analyze_table_int() table '%s': scale %d\n", td->table, td->scale);
#endif
/* make sure we can find this table */
struct dbtable *tbl = get_dbtable_by_name(td->table);
if (!tbl) {
sbuf2printf(td->sb, "?Cannot find table '%s'\n", td->table);
return -1;
}
if (td->override_llmeta == 0) // user did not specify override parameter
get_saved_scale(td->table, &td->scale);
if (td->scale == 0) {
sbuf2printf(td->sb, "?Coverage for table '%s' is 0, skipping analyze\n",
td->table);
logmsg(LOGMSG_INFO, "coverage for table '%s' is 0, skipping analyze\n", td->table);
return TABLE_SKIPPED;
}
/* pass flush_resp fsql_write_response in sqlinterfaces.c
* to catch where write to stdout is occurring put in gdb:
* b write if 1==$rdi
*/
SBUF2 *sb2 = sbuf2open(fileno(stdout), 0);
struct sqlclntstate clnt;
start_internal_sql_clnt(&clnt);
clnt.osql_max_trans = 0; // allow large transactions
clnt.sb = sb2;
sbuf2settimeout(clnt.sb, 0, 0);
logmsg(LOGMSG_INFO, "Analyze thread starting, table %s (%d%%)\n", td->table, td->scale);
int rc = run_internal_sql_clnt(&clnt, "BEGIN");
if (rc)
goto cleanup;
char sql[256];
snprintf(sql, sizeof(sql),
"delete from sqlite_stat1 where tbl='cdb2.%s.sav'", td->table);
rc = run_internal_sql_clnt(&clnt, sql);
if (rc)
goto error;
snprintf(sql, sizeof(sql),
"update sqlite_stat1 set tbl='cdb2.%s.sav' where tbl='%s'",
td->table, td->table);
rc = run_internal_sql_clnt(&clnt, sql);
if (rc)
goto error;
if (get_dbtable_by_name("sqlite_stat2")) {
snprintf(sql, sizeof(sql),
"delete from sqlite_stat2 where tbl='cdb2.%s.sav'", td->table);
rc = run_internal_sql_clnt(&clnt, sql);
if (rc)
goto error;
snprintf(sql, sizeof(sql),
"update sqlite_stat2 set tbl='cdb2.%s.sav' where tbl='%s'",
td->table, td->table);
rc = run_internal_sql_clnt(&clnt, sql);
if (rc)
goto error;
}
if (get_dbtable_by_name("sqlite_stat4")) {
snprintf(sql, sizeof(sql),
"delete from sqlite_stat4 where tbl='cdb2.%s.sav'", td->table);
rc = run_internal_sql_clnt(&clnt, sql);
if (rc)
goto error;
snprintf(sql, sizeof(sql),
"update sqlite_stat4 set tbl='cdb2.%s.sav' where tbl='%s'",
td->table, td->table);
rc = run_internal_sql_clnt(&clnt, sql);
if (rc)
goto error;
}
/* grab the size of the table */
int64_t totsiz = calc_table_size_analyze(tbl);
int sampled_table = 0;
if (sampled_tables_enabled)
get_sampling_threshold(td->table, &sampling_threshold);
/* sample if enabled & large */
if (sampled_tables_enabled && totsiz > sampling_threshold) {
logmsg(LOGMSG_INFO, "Sampling table '%s' at %d%% coverage\n", td->table, td->scale);
sampled_table = 1;
rc = sample_indicies(td, &clnt, tbl, td->scale, td->sb);
if (rc) {
snprintf(sql, sizeof(sql), "Sampling table '%s'", td->table);
goto error;
}
}
clnt.is_analyze = 1;
/* run analyze as sql query */
snprintf(sql, sizeof(sql), "analyzesqlite main.\"%s\"", td->table);
rc = run_internal_sql_clnt(&clnt, sql);
clnt.is_analyze = 0;
if (rc)
goto error;
snprintf(sql, sizeof(sql), "COMMIT");
rc = run_internal_sql_clnt(&clnt, sql);
cleanup:
sbuf2flush(sb2);
sbuf2free(sb2);
if (rc) { // send error to client
sbuf2printf(td->sb, "?Analyze table %s. Error occurred with: %s\n",
td->table, sql);
} else {
sbuf2printf(td->sb, "?Analyze completed table %s\n", td->table);
logmsg(LOGMSG_INFO, "Analyze completed, table %s\n", td->table);
}
end_internal_sql_clnt(&clnt);
if (sampled_table) {
cleanup_sampled_indicies(&clnt, tbl);
}
return rc;
error:
run_internal_sql_clnt(&clnt, "ROLLBACK");
goto cleanup;
}
/* spawn thread to analyze a table */
static void *table_thread(void *arg)
{
int rc;
table_descriptor_t *td = (table_descriptor_t *)arg;
struct thr_handle *thd_self;
/* register thread */
thd_self = thrman_register(THRTYPE_ANALYZE);
backend_thread_event(thedb, COMDB2_THR_EVENT_START_RDWR);
stat4dump(1, td->table, 1); /* dump stats in trc file */
sql_mem_init(NULL);
thread_memcreate(analyze_thread_memory);
/* update state */
td->table_state = TABLE_RUNNING;
/* analyze the table */
rc = analyze_table_int(td, thd_self);
ctrace("analyze_table_int: Table %s, rc = %d\n", td->table, rc);
/* mark the return */
if (0 == rc) {
td->table_state = TABLE_COMPLETE;
if (thedb->master == gbl_mynode) { // reset directly
void reset_aa_counter(char *tblname);
ctrace("analyze: Analyzed Table %s, reseting counter to 0\n", td->table);
reset_aa_counter(td->table);
} else {
ctrace("analyze: Analyzed Table %s, msg to master to reset counter to 0\n", td->table);
bdb_send_analysed_table_to_master(thedb->bdb_env, td->table);
}
} else if (TABLE_SKIPPED == rc) {
td->table_state = TABLE_SKIPPED;
} else {
td->table_state = TABLE_FAILED;
}
/* release thread */
pthread_mutex_lock(&table_thd_mutex);
analyze_cur_table_threads--;
pthread_cond_broadcast(&table_thd_cond);
pthread_mutex_unlock(&table_thd_mutex);
backend_thread_event(thedb, COMDB2_THR_EVENT_DONE_RDWR);
thread_memdestroy();
sql_mem_shutdown(NULL);
return NULL;
}
/* dispatch a table thread when we're allowed to */
static int dispatch_table_thread(table_descriptor_t *td)
{
int rc;
/* grab lock */
pthread_mutex_lock(&table_thd_mutex);
/* wait for thread availability */
while (analyze_cur_table_threads >= analyze_max_table_threads) {
pthread_cond_wait(&table_thd_cond, &table_thd_mutex);
}
/* grab table thread */
analyze_cur_table_threads++;
/* release */
pthread_mutex_unlock(&table_thd_mutex);
/* dispatch */
rc = pthread_create(&td->thread_id, &gbl_pthread_attr_detached,
table_thread, td);
/* return */
return rc;
}
/* wait for table to complete */
static int wait_for_table(table_descriptor_t *td)
{
/* lock table mutex */
pthread_mutex_lock(&table_thd_mutex);
/* wait for the state to change */
while (td->table_state == TABLE_STARTUP ||
td->table_state == TABLE_RUNNING) {
pthread_cond_wait(&table_thd_cond, &table_thd_mutex);
}
/* release */
pthread_mutex_unlock(&table_thd_mutex);
int rc = 0;
if (TABLE_COMPLETE == td->table_state) {
sbuf2printf(td->sb, ">Analyze table '%s' is complete\n", td->table);
} else if (TABLE_SKIPPED == td->table_state) {
sbuf2printf(td->sb, ">Analyze table '%s' skipped\n", td->table);
} else {
sbuf2printf(td->sb, ">Analyze table '%s' failed\n", td->table);
rc = -1;
}
return rc;
}
/* check for existence of stat1 table -- make sure it exists
* because without stat1 it is pointless to run analyze.
*/
static inline int check_stat1(SBUF2 *sb)
{
/* verify sqlite_stat1 */
if (NULL == get_dbtable_by_name("sqlite_stat1")) {
sbuf2printf(sb, ">%s: analyze requires sqlite_stat1 to run\n",
__func__);
sbuf2printf(sb, "FAILED\n");
logmsg(LOGMSG_ERROR, "%s: analyze requires sqlite_stat1 to run\n", __func__);
return -1;
}
return 0;
}
/* set analyze running with atomic ops
* this makes sure two analyze requests don't race past
* each-other both setting the flag.
*/
static inline int set_analyze_running(SBUF2 *sb)
{
analyze_abort_requested = 0;
int old = XCHANGE(analyze_running_flag, 1); // set analyze_running_flag
if (1 == old) // analyze_running_flag was already 1, so bail out
{
sbuf2printf(sb, ">%s: analyze is already running\n", __func__);
sbuf2printf(sb, "FAILED\n");
logmsg(LOGMSG_ERROR, "%s: analyze is already running\n", __func__);
return -1;
}
return 0;
}
void set_analyze_abort_requested()
{
if (!analyze_running_flag)
return;
analyze_abort_requested = 1;
return;
}
int get_analyze_abort_requested()
{
return analyze_abort_requested;
}
/* analyze 'table' */
int analyze_table(char *table, SBUF2 *sb, int scale, int override_llmeta)
{
if (check_stat1(sb))
return -1;
if (gbl_schema_change_in_progress) {
logmsg(LOGMSG_ERROR,
"%s: Aborting Analyze because schema_change_in_progress\n",
__func__);
return -1;
}
if (set_analyze_running(sb))
return -1;
table_descriptor_t td = {0};
/* initialize table sync structure */
td.table_state = TABLE_STARTUP;
td.sb = sb;
td.scale = scale;
td.override_llmeta = override_llmeta;
strncpy(td.table, table, sizeof(td.table));
/* dispatch */
int rc = dispatch_table_thread(&td);
if (0 != rc) {
logmsg(LOGMSG_ERROR, "Analyze: Couldn't start table-thread for table '%s' rc=%d\n",
table, rc);
logmsg(LOGMSG_ERROR, "Analyze FAILED\n");
analyze_running_flag = 0;
return -1;
}
/* block waiting for analyze to complete */
rc = wait_for_table(&td);
if (rc == 0)
sbuf2printf(sb, "SUCCESS\n");
else
sbuf2printf(sb, "FAILED\n");
sbuf2flush(sb);
/* no-longer running */
analyze_running_flag = 0;
return rc;
}
/* Analyze all tables in this database */
int analyze_database(SBUF2 *sb, int scale, int override_llmeta)
{
int rc = 0;
int i;
int idx = 0;
int failed = 0;
table_descriptor_t *td;
if (check_stat1(sb))
return -1;
if (set_analyze_running(sb))
return -1;
/* allocate descriptor */
td = calloc(thedb->num_dbs, sizeof(table_descriptor_t));
/* start analyzing each table */
for (i = 0; i < thedb->num_dbs; i++) {
/* skip sqlite_stat */
if (is_sqlite_stat(thedb->dbs[i]->tablename)) {
continue;
}
/* initialize table-descriptor */
td[idx].table_state = TABLE_STARTUP;
td[idx].sb = sb;
td[idx].scale = scale;
td[idx].override_llmeta = override_llmeta;
strncpy(td[idx].table, thedb->dbs[i]->tablename, sizeof(td[idx].table));
/* dispatch analyze table thread */
rc = dispatch_table_thread(&td[idx]);
if (0 != rc) {
failed = 1;
logmsg(LOGMSG_ERROR, "Couldn't start a table-thread for table '%s' rc=%d\n",
td[idx].table, rc);
break;
}
idx++;
}
/* wait for this to complete */
for (i = 0; i < idx; i++) {
int lrc = wait_for_table(&td[i]);
if (lrc)
failed = 1;
}
/* tell comdb2sc the results */
if (failed) {
sbuf2printf(sb, "FAILED\n");
} else {
sbuf2printf(sb, "SUCCESS\n");
cleanup_stats(sb);
}
sbuf2flush(sb);
/* free descriptor */
free(td);
/* reset running flag */
analyze_running_flag = 0;
return rc;
}
/* dump some analyze stats */
int analyze_dump_stats(void)
{
logmsg(LOGMSG_USER, "Sampled tables: %s\n",
sampled_tables_enabled ? "Enabled" : "Disabled");
logmsg(LOGMSG_USER, "Sampling threshold: %lld bytes\n",
sampling_threshold);
logmsg(LOGMSG_USER, "Max Analyze table-threads: %d threads\n",
analyze_max_table_threads);
logmsg(LOGMSG_USER, "Current Analyze table-threads: %d threads\n",
analyze_cur_table_threads);
logmsg(LOGMSG_USER, "Max Analyze sampling-threads: %d threads\n",
analyze_max_comp_threads);
logmsg(LOGMSG_USER, "Current Analyze sampling-threads: %d threads\n",
analyze_cur_comp_threads);
logmsg(LOGMSG_USER, "Current Analyze counter: %d \n", gbl_analyze_gen);
return 0;
}
/* enabled sampled indicies */
void analyze_enable_sampled_indicies(void) { sampled_tables_enabled = 1; }
/* disable sampled indicies */
void analyze_disable_sampled_indicies(void) { sampled_tables_enabled = 0; }
/* set sampling threshold */
int analyze_set_sampling_threshold(void *context, void *thresh)
{
long long _thresh = *(int *)thresh;
if (_thresh < 0) {
logmsg(LOGMSG_ERROR, "%s: Invalid value for sampling threshold\n",
__func__);
return 1;
}
sampling_threshold = _thresh;
return 0;
}
/* get sampling threshold */
long long analyze_get_sampling_threshold(void) { return sampling_threshold; }
/* set maximum analyze compression threads */
int analyze_set_max_comp_threads( int maxthd )
{
/* must have at least 1 */
if( maxthd < 1 )
{
printf( "%s: invalid value for maxthd\n", __func__ );
return -1;
}
/* can have no more than hard-max */
if( maxthd > analyze_hard_max_comp_threads )
{
printf( "%s: hard-maximum is %d\n", __func__,
analyze_hard_max_comp_threads );
return -1;
}
analyze_max_comp_threads = maxthd;
return 0;
}
/* set maximum analyze threads */
int analyze_set_max_table_threads(void *context, void *maxthd)
{
int _maxthd = *(int *)maxthd;
/* must have at least 1 */
if (_maxthd < 1) {
logmsg(LOGMSG_ERROR, "%s: invalid value for maxthd\n", __func__);
return 1;
}
/* can have no more than hard-max */
if (_maxthd > analyze_hard_max_table_threads) {
logmsg(LOGMSG_ERROR, "%s: hard-maximum is %d\n", __func__,
analyze_hard_max_table_threads);
return 1;
}
analyze_max_table_threads = _maxthd;
return 0;
}
/* Set maximum analyze sampling threads */
int analyze_set_max_sampling_threads(void *context, void *maxthd)
{
int _maxthd = *(int *)maxthd;
/* must have at least 1 */
if (_maxthd < 1) {
logmsg(LOGMSG_ERROR, "%s: invalid value for maxthd\n", __func__);
return 1;
}
/* can have no more than hard-max */
if (_maxthd > analyze_hard_max_comp_threads) {
logmsg(LOGMSG_ERROR, "%s: hard-maximum is %d\n", __func__,
analyze_hard_max_comp_threads);
return 1;
}
analyze_max_comp_threads = _maxthd;
return 0;
}
/* return a 1 if analyze is running, 0 otherwise */
int analyze_is_running(void) { return analyze_running_flag; }
/* analyze message-trap thread */
void *message_trap_td(void *args)
{
SBUF2 *sb = NULL;
/* open an sbuf pointed at stdout */
sb = sbuf2open(1, 0);
/* analyze the database */
analyze_database(sb,
bdb_attr_get(thedb->bdb_attr, BDB_ATTR_DEFAULT_ANALYZE_PERCENT),
0); //override llmeta scale 0 (false)
/* flush sbuf */
sbuf2flush(sb);
/* free sbuf */
sbuf2free(sb);
return NULL;
}
/* Backout to previous analyze stats */
static inline int analyze_backout_table(struct sqlclntstate *clnt, char *table)
{
if (is_sqlite_stat(table))
return 0;
int rc = run_internal_sql_clnt(clnt, "BEGIN");
if (rc)
return rc;
rc = backout_stats_frm_tbl(clnt, table, 1);
if (rc)
goto error;
if (get_dbtable_by_name("sqlite_stat2")) {
rc = backout_stats_frm_tbl(clnt, table, 2);
if (rc)
goto error;
}
if (get_dbtable_by_name("sqlite_stat4")) {
rc = backout_stats_frm_tbl(clnt, table, 4);
if (rc)
goto error;
}
rc = run_internal_sql_clnt(clnt, "COMMIT");
return rc;
error:
logmsg(LOGMSG_ERROR, "backout error, rolling back transaction\n");
run_internal_sql_clnt(clnt, "ROLLBACK");
return rc;
}
void handle_backout(SBUF2 *sb, char *table)
{
if (check_stat1_and_flag(sb))
return;
struct sqlclntstate clnt;
start_internal_sql_clnt(&clnt);
SBUF2 *sb2 = sbuf2open(fileno(stdout), 0);
int rc = 0;
clnt.sb = sb2;
rdlock_schema_lk();
if (table) {
rc = analyze_backout_table(&clnt, table);
} else {
int i = 0;
while (i < thedb->num_dbs && rc == 0) {
rc = analyze_backout_table(&clnt, thedb->dbs[i]->tablename);
i++;
}
}
unlock_schema_lk();
sbuf2flush(sb2);
sbuf2free(sb2);
if (rc == 0)
sbuf2printf(sb, "SUCCESS\n");
else {
sbuf2printf(sb, "?Error occured with query: '%s'\n", clnt.sql);
sbuf2printf(sb, "FAILED\n");
}
end_internal_sql_clnt(&clnt);
sbuf2flush(sb);
}
/* Add stats for indices of table newname by copying from
* entries for table oldname. Used when renaming a table.
*/
void add_idx_stats(const char *tbl, const char *oldname, const char *newname)
{
if (NULL == get_dbtable_by_name("sqlite_stat1"))
return; // stat1 does not exist, nothing to do
char sql[256];
snprintf(sql, sizeof(sql), "INSERT INTO sqlite_stat1 select tbl, '%s' as "
"idx, stat FROM sqlite_stat1 WHERE tbl='%s' and "
"idx='%s' \n",
newname, tbl, oldname);
run_internal_sql(sql);
if (get_dbtable_by_name("sqlite_stat2")) {
snprintf(sql, sizeof(sql), "INSERT INTO sqlite_stat2 select tbl, '%s' "
"as idx, sampleno, sample FROM sqlite_stat2 "
"WHERE tbl='%s' and idx='%s' \n",
newname, tbl, oldname);
run_internal_sql(sql);
}
if (get_dbtable_by_name("sqlite_stat4")) {
snprintf(sql, sizeof(sql), "INSERT INTO sqlite_stat4 select tbl, '%s' "
"as idx, neq, nlt, ndlt, sample FROM "
"sqlite_stat4 WHERE tbl='%s' and idx='%s' "
"\n",
newname, tbl, oldname);
run_internal_sql(sql);
}
}
int do_analyze(char *tbl, int percent)
{
SBUF2 *sb2 = sbuf2open(fileno(stdout), 0);
int overwrite_llmeta = 1;
if (percent == 0) {
overwrite_llmeta = 0;
percent = bdb_attr_get(thedb->bdb_attr,
BDB_ATTR_DEFAULT_ANALYZE_PERCENT);
}
int rc;
if (tbl == NULL)
rc = analyze_database(sb2, percent, overwrite_llmeta);
else
rc = analyze_table(tbl, sb2, percent, overwrite_llmeta);
sbuf2flush(sb2);
sbuf2free(sb2);
return rc;
}
|
13649.c | /*
* Copyright (c) 2020 Demant
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/types.h>
#include <bluetooth/hci.h>
#include <sys/byteorder.h>
#include <sys/slist.h>
#include <sys/util.h>
#include "hal/ccm.h"
#include "util/util.h"
#include "util/mem.h"
#include "util/memq.h"
#include "util/dbuf.h"
#include "pdu.h"
#include "ll.h"
#include "ll_settings.h"
#include "lll.h"
#include "ll_feat.h"
#include "lll/lll_df_types.h"
#include "lll_conn.h"
#include "ull_tx_queue.h"
#include "ull_conn_types.h"
#include "ull_chan_internal.h"
#include "ull_llcp.h"
#include "ull_conn_internal.h"
#include "ull_internal.h"
#include "ull_llcp_features.h"
#include "ull_llcp_internal.h"
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#define LOG_MODULE_NAME bt_ctlr_ull_llcp_common
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
/* LLCP Local Procedure FSM states */
enum {
LP_COMMON_STATE_IDLE,
LP_COMMON_STATE_WAIT_TX,
LP_COMMON_STATE_WAIT_TX_ACK,
LP_COMMON_STATE_WAIT_RX,
LP_COMMON_STATE_WAIT_NTF,
};
/* LLCP Local Procedure Common FSM events */
enum {
/* Procedure run */
LP_COMMON_EVT_RUN,
/* Response received */
LP_COMMON_EVT_RESPONSE,
/* Reject response received */
LP_COMMON_EVT_REJECT,
/* Unknown response received */
LP_COMMON_EVT_UNKNOWN,
/* Instant collision detected */
LP_COMMON_EVT_COLLISION,
/* Ack received */
LP_COMMON_EVT_ACK,
};
/* LLCP Remote Procedure Common FSM states */
enum {
RP_COMMON_STATE_IDLE,
RP_COMMON_STATE_WAIT_RX,
RP_COMMON_STATE_WAIT_TX,
RP_COMMON_STATE_WAIT_TX_ACK,
RP_COMMON_STATE_WAIT_NTF,
};
/* LLCP Remote Procedure Common FSM events */
enum {
/* Procedure run */
RP_COMMON_EVT_RUN,
/* Ack received */
RP_COMMON_EVT_ACK,
/* Request received */
RP_COMMON_EVT_REQUEST,
};
/*
* LLCP Local Procedure Common FSM
*/
static void lp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
{
struct node_tx *tx;
struct pdu_data *pdu;
/* Allocate tx node */
tx = llcp_tx_alloc(conn, ctx);
LL_ASSERT(tx);
pdu = (struct pdu_data *)tx->pdu;
/* Encode LL Control PDU */
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PROC_LE_PING:
llcp_pdu_encode_ping_req(pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PING_RSP;
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
case PROC_FEATURE_EXCHANGE:
llcp_pdu_encode_feature_req(conn, pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP;
break;
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
case PROC_MIN_USED_CHANS:
llcp_pdu_encode_min_used_chans_ind(ctx, pdu);
ctx->tx_ack = tx;
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
case PROC_VERSION_EXCHANGE:
llcp_pdu_encode_version_ind(pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
break;
case PROC_TERMINATE:
llcp_pdu_encode_terminate_ind(ctx, pdu);
ctx->tx_ack = tx;
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
llcp_pdu_encode_length_req(conn, pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
case PROC_CTE_REQ:
llcp_pdu_encode_cte_req(ctx, pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CTE_RSP;
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
default:
/* Unknown procedure */
LL_ASSERT(0);
}
ctx->tx_opcode = pdu->llctrl.opcode;
/* Enqueue LL Control PDU towards LLL */
llcp_tx_enqueue(conn, tx);
/* Update procedure timeout. For TERMINATE supervision_timeout is used */
ull_conn_prt_reload(conn, (ctx->proc != PROC_TERMINATE) ? conn->procedure_reload :
conn->supervision_reload);
}
static void lp_comm_ntf_feature_exchange(struct ll_conn *conn, struct proc_ctx *ctx,
struct pdu_data *pdu)
{
switch (ctx->response_opcode) {
case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
llcp_ntf_encode_feature_rsp(conn, pdu);
break;
case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
case PDU_DATA_LLCTRL_TYPE_FEATURE_REQ:
/*
* No notification on feature-request or periph-feature request
* TODO: probably handle as an unexpected call
*/
break;
case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
llcp_ntf_encode_unknown_rsp(ctx, pdu);
break;
default:
/* TODO: define behaviour for unexpected PDU */
LL_ASSERT(0);
}
}
static void lp_comm_ntf_version_ind(struct ll_conn *conn, struct proc_ctx *ctx,
struct pdu_data *pdu)
{
switch (ctx->response_opcode) {
case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
llcp_ntf_encode_version_ind(conn, pdu);
break;
default:
/* TODO: define behaviour for unexpected PDU */
LL_ASSERT(0);
}
}
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
static void lp_comm_ntf_length_change(struct ll_conn *conn, struct proc_ctx *ctx,
struct pdu_data *pdu)
{
llcp_ntf_encode_length_change(conn, pdu);
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
static void lp_comm_complete_cte_req_finalize(struct ll_conn *conn)
{
llcp_rr_set_paused_cmd(conn, PROC_NONE);
llcp_lr_complete(conn);
conn->llcp.cte_req.is_active = 0U;
/* If disable_cb is not NULL then there is waiting CTE REQ disable request
* from host. Execute the callback to notify waiting thread that the
* procedure is inactive.
*/
if (conn->llcp.cte_req.disable_cb) {
conn->llcp.cte_req.disable_cb(conn->llcp.cte_req.disable_param);
}
}
static void lp_comm_ntf_cte_req(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
{
switch (ctx->response_opcode) {
case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
/* Notify host that received LL_CTE_RSP does not have CTE */
if (!ctx->data.cte_remote_rsp.has_cte) {
llcp_ntf_encode_cte_req(pdu);
}
break;
case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
llcp_ntf_encode_reject_ext_ind(ctx, pdu);
break;
default:
/* TODO (ppryga): Update when behavior for unexpected PDU is defined */
LL_ASSERT(0);
}
}
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
{
struct node_rx_pdu *ntf;
struct pdu_data *pdu;
/* Allocate ntf node */
ntf = llcp_ntf_alloc();
LL_ASSERT(ntf);
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
ntf->hdr.handle = conn->lll.handle;
pdu = (struct pdu_data *)ntf->pdu;
switch (ctx->proc) {
case PROC_FEATURE_EXCHANGE:
lp_comm_ntf_feature_exchange(conn, ctx, pdu);
break;
case PROC_VERSION_EXCHANGE:
lp_comm_ntf_version_ind(conn, ctx, pdu);
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
lp_comm_ntf_length_change(conn, ctx, pdu);
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
case PROC_CTE_REQ:
lp_comm_ntf_cte_req(conn, ctx, pdu);
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
default:
LL_ASSERT(0);
break;
}
/* Enqueue notification towards LL */
ll_rx_put(ntf->hdr.link, ntf);
ll_rx_sched();
}
static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PROC_LE_PING:
if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_PING_RSP) {
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
} else {
/* Illegal response opcode */
LL_ASSERT(0);
}
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
case PROC_FEATURE_EXCHANGE:
if (!llcp_ntf_alloc_is_available()) {
ctx->state = LP_COMMON_STATE_WAIT_NTF;
} else {
lp_comm_ntf(conn, ctx);
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
break;
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
case PROC_MIN_USED_CHANS:
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
case PROC_VERSION_EXCHANGE:
if (!llcp_ntf_alloc_is_available()) {
ctx->state = LP_COMMON_STATE_WAIT_NTF;
} else {
lp_comm_ntf(conn, ctx);
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
break;
case PROC_TERMINATE:
/* No notification */
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
/* Mark the connection for termination */
conn->llcp_terminate.reason_final = BT_HCI_ERR_LOCALHOST_TERM_CONN;
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
if (ctx->response_opcode != PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) {
/* Apply changes in data lengths/times */
uint8_t dle_changed = ull_dle_update_eff(conn);
if (dle_changed && !llcp_ntf_alloc_is_available()) {
/* We need to generate NTF but no buffers avail so wait for one */
ctx->state = LP_COMMON_STATE_WAIT_NTF;
} else {
if (dle_changed) {
lp_comm_ntf(conn, ctx);
}
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
} else {
/* Peer does not accept DLU, so disable on current connection */
feature_unmask_features(conn, LL_FEAT_BIT_DLE);
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
if (!ull_cp_remote_dle_pending(conn)) {
/* Resume data, but only if there is no remote procedure pending RSP
* in which case, the RSP tx-ACK will resume data
*/
llcp_tx_resume_data(conn);
}
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
case PROC_CTE_REQ:
if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_CTE_RSP) {
if (ctx->data.cte_remote_rsp.has_cte) {
if (conn->llcp.cte_req.req_interval != 0U) {
conn->llcp.cte_req.req_expire =
conn->llcp.cte_req.req_interval;
}
ctx->state = LP_COMMON_STATE_IDLE;
} else if (llcp_ntf_alloc_is_available()) {
lp_comm_ntf(conn, ctx);
ull_cp_cte_req_set_disable(conn);
ctx->state = LP_COMMON_STATE_IDLE;
} else {
ctx->state = LP_COMMON_STATE_WAIT_NTF;
}
} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND &&
ctx->reject_ext_ind.reject_opcode == PDU_DATA_LLCTRL_TYPE_CTE_REQ) {
if (llcp_ntf_alloc_is_available()) {
lp_comm_ntf(conn, ctx);
ull_cp_cte_req_set_disable(conn);
ctx->state = LP_COMMON_STATE_IDLE;
} else {
ctx->state = LP_COMMON_STATE_WAIT_NTF;
}
} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNUSED) {
/* This path is related with handling disable the CTE REQ when PHY
* has been changed to CODED PHY. BT 5.3 Core Vol 4 Part E 7.8.85
* says CTE REQ has to be automatically disabled as if it had been requested
* by Host. There is no notification send to Host.
*/
ull_cp_cte_req_set_disable(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
if (ctx->state == LP_COMMON_STATE_IDLE) {
lp_comm_complete_cte_req_finalize(conn);
}
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
default:
/* Unknown procedure */
LL_ASSERT(0);
}
}
static void lp_comm_send_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PROC_LE_PING:
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_tx(conn, ctx);
ctx->state = LP_COMMON_STATE_WAIT_RX;
}
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
case PROC_FEATURE_EXCHANGE:
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_tx(conn, ctx);
conn->llcp.fex.sent = 1;
ctx->state = LP_COMMON_STATE_WAIT_RX;
}
break;
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
case PROC_MIN_USED_CHANS:
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_tx(conn, ctx);
ctx->state = LP_COMMON_STATE_WAIT_TX_ACK;
}
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
case PROC_VERSION_EXCHANGE:
/* The Link Layer shall only queue for transmission a maximum of
* one LL_VERSION_IND PDU during a connection.
*/
if (!conn->llcp.vex.sent) {
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_tx(conn, ctx);
conn->llcp.vex.sent = 1;
ctx->state = LP_COMMON_STATE_WAIT_RX;
}
} else {
ctx->response_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
lp_comm_complete(conn, ctx, evt, param);
}
break;
case PROC_TERMINATE:
if (!llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_tx(conn, ctx);
ctx->state = LP_COMMON_STATE_WAIT_TX_ACK;
}
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
if (!ull_cp_remote_dle_pending(conn)) {
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
/* Pause data tx, to ensure we can later (on RSP rx-ack)
* update DLE without conflicting with out-going LL Data PDUs
* See BT Core 5.2 Vol6: B-4.5.10 & B-5.1.9
*/
llcp_tx_pause_data(conn);
lp_comm_tx(conn, ctx);
ctx->state = LP_COMMON_STATE_WAIT_RX;
}
} else {
/* REQ was received from peer and RSP not yet sent
* lets piggy-back on RSP instead af sending REQ
* thus we can complete local req
*/
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
case PROC_CTE_REQ:
#if defined(CONFIG_BT_CTLR_PHY)
if (conn->lll.phy_rx != PHY_CODED) {
#else
if (1) {
#endif /* CONFIG_BT_CTLR_PHY */
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx) ||
(llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ)) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_tx(conn, ctx);
ctx->state = LP_COMMON_STATE_WAIT_RX;
}
} else {
/* The PHY was changed to CODED when the request was waiting in a local
* request queue.
*
* Use of pair: proc PROC_CTE_REQ and rx_opcode PDU_DATA_LLCTRL_TYPE_UNUSED
* to complete the procedure before sending a request to peer.
* This is a special complete execution path to disable the procedure
* due to change of RX PHY to CODED.
*/
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
ctx->state = LP_COMMON_STATE_IDLE;
llcp_lr_complete(conn);
}
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
default:
/* Unknown procedure */
LL_ASSERT(0);
}
}
static void lp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (evt) {
case LP_COMMON_EVT_RUN:
if (ctx->pause) {
ctx->state = LP_COMMON_STATE_WAIT_TX;
} else {
lp_comm_send_req(conn, ctx, evt, param);
}
break;
default:
/* Ignore other evts */
break;
}
}
static void lp_comm_st_wait_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (evt) {
case LP_COMMON_EVT_RUN:
lp_comm_send_req(conn, ctx, evt, param);
break;
default:
/* Ignore other evts */
break;
}
}
static void lp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
void *param)
{
switch (evt) {
case LP_COMMON_EVT_ACK:
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
case PROC_MIN_USED_CHANS:
ctx->tx_ack = NULL;
lp_comm_complete(conn, ctx, evt, param);
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
case PROC_TERMINATE:
ctx->tx_ack = NULL;
lp_comm_complete(conn, ctx, evt, param);
break;
default:
/* Ignore for other procedures */
break;
}
break;
default:
/* Ignore other evts */
break;
}
/* TODO */
}
static void lp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
{
ctx->response_opcode = pdu->llctrl.opcode;
switch (pdu->llctrl.opcode) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PDU_DATA_LLCTRL_TYPE_PING_RSP:
/* ping_rsp has no data */
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
llcp_pdu_decode_feature_rsp(conn, pdu);
break;
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
/* No response expected */
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
llcp_pdu_decode_version_ind(conn, pdu);
break;
case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
llcp_pdu_decode_unknown_rsp(ctx, pdu);
break;
case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
/* No response expected */
LL_ASSERT(0);
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
llcp_pdu_decode_length_rsp(conn, pdu);
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
llcp_pdu_decode_cte_rsp(ctx, pdu);
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
llcp_pdu_decode_reject_ext_ind(ctx, pdu);
break;
default:
/* Unknown opcode */
LL_ASSERT(0);
}
}
static void lp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (evt) {
case LP_COMMON_EVT_RESPONSE:
lp_comm_rx_decode(conn, ctx, (struct pdu_data *)param);
lp_comm_complete(conn, ctx, evt, param);
break;
default:
/* Ignore other evts */
break;
}
}
static void lp_comm_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
void *param)
{
/* TODO */
switch (evt) {
case LP_COMMON_EVT_RUN:
switch (ctx->proc) {
case PROC_FEATURE_EXCHANGE:
case PROC_VERSION_EXCHANGE:
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
if (llcp_ntf_alloc_is_available()) {
lp_comm_ntf(conn, ctx);
llcp_lr_complete(conn);
ctx->state = LP_COMMON_STATE_IDLE;
}
break;
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
case PROC_CTE_REQ:
if (llcp_ntf_alloc_is_available()) {
lp_comm_ntf(conn, ctx);
ctx->state = LP_COMMON_STATE_IDLE;
lp_comm_complete_cte_req_finalize(conn);
}
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
default:
break;
}
break;
default:
break;
}
}
static void lp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
void *param)
{
switch (ctx->state) {
case LP_COMMON_STATE_IDLE:
lp_comm_st_idle(conn, ctx, evt, param);
break;
case LP_COMMON_STATE_WAIT_TX:
lp_comm_st_wait_tx(conn, ctx, evt, param);
break;
case LP_COMMON_STATE_WAIT_TX_ACK:
lp_comm_st_wait_tx_ack(conn, ctx, evt, param);
break;
case LP_COMMON_STATE_WAIT_RX:
lp_comm_st_wait_rx(conn, ctx, evt, param);
break;
case LP_COMMON_STATE_WAIT_NTF:
lp_comm_st_wait_ntf(conn, ctx, evt, param);
break;
default:
/* Unknown state */
LL_ASSERT(0);
}
}
void llcp_lp_comm_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
{
lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_ACK, tx->pdu);
}
void llcp_lp_comm_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
{
lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_RESPONSE, rx->pdu);
}
void llcp_lp_comm_init_proc(struct proc_ctx *ctx)
{
ctx->state = LP_COMMON_STATE_IDLE;
}
void llcp_lp_comm_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
{
lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_RUN, param);
}
/*
* LLCP Remote Procedure Common FSM
*/
static void rp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
{
ctx->response_opcode = pdu->llctrl.opcode;
switch (pdu->llctrl.opcode) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PDU_DATA_LLCTRL_TYPE_PING_REQ:
/* ping_req has no data */
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_PERIPHERAL)
case PDU_DATA_LLCTRL_TYPE_FEATURE_REQ:
#endif /* CONFIG_BT_PERIPHERAL */
#if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
#endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
llcp_pdu_decode_feature_req(conn, pdu);
break;
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
llcp_pdu_decode_min_used_chans_ind(conn, pdu);
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
llcp_pdu_decode_version_ind(conn, pdu);
break;
case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
llcp_pdu_decode_terminate_ind(ctx, pdu);
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
llcp_pdu_decode_length_req(conn, pdu);
/* On reception of REQ mark RSP open for local piggy-back
* Pause data tx, to ensure we can later (on RSP tx ack) update DLE without
* conflicting with out-going LL Data PDUs
* See BT Core 5.2 Vol6: B-4.5.10 & B-5.1.9
*/
llcp_tx_pause_data(conn);
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
llcp_pdu_decode_cte_req(ctx, pdu);
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
default:
/* Unknown opcode */
LL_ASSERT(0);
}
}
static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
{
struct node_tx *tx;
struct pdu_data *pdu;
/* Allocate tx node */
tx = llcp_tx_alloc(conn, ctx);
LL_ASSERT(tx);
pdu = (struct pdu_data *)tx->pdu;
/* Encode LL Control PDU */
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PROC_LE_PING:
llcp_pdu_encode_ping_rsp(pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PING_RSP;
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
case PROC_FEATURE_EXCHANGE:
llcp_pdu_encode_feature_rsp(conn, pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP;
break;
case PROC_VERSION_EXCHANGE:
llcp_pdu_encode_version_ind(pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
llcp_pdu_encode_length_rsp(conn, pdu);
ctx->tx_ack = tx;
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
case PROC_CTE_REQ: {
uint8_t err_code = 0;
if (conn->llcp.cte_rsp.is_enabled == 0) {
err_code = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
}
#if defined(CONFIG_BT_PHY_UPDATE)
/* If the PHY update is not possible, then PHY1M is used.
* CTE is supported for PHY1M.
*/
if (conn->lll.phy_tx == PHY_CODED) {
err_code = BT_HCI_ERR_INVALID_LL_PARAM;
}
#endif /* CONFIG_BT_PHY_UPDATE */
if (!(conn->llcp.cte_rsp.cte_types & BIT(ctx->data.cte_remote_req.cte_type)) ||
conn->llcp.cte_rsp.max_cte_len < ctx->data.cte_remote_req.min_cte_len) {
err_code = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
}
if (!err_code) {
llcp_pdu_encode_cte_rsp(ctx, pdu);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CTE_RSP;
} else {
llcp_pdu_encode_reject_ext_ind(pdu, PDU_DATA_LLCTRL_TYPE_CTE_REQ, err_code);
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND;
}
ctx->tx_ack = tx;
break;
}
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
default:
/* Unknown procedure */
LL_ASSERT(0);
}
ctx->tx_opcode = pdu->llctrl.opcode;
/* Enqueue LL Control PDU towards LLL */
llcp_tx_enqueue(conn, tx);
}
static void rp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (evt) {
case RP_COMMON_EVT_RUN:
ctx->state = RP_COMMON_STATE_WAIT_RX;
break;
default:
/* Ignore other evts */
break;
}
}
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
static void rp_comm_ntf_length_change(struct ll_conn *conn, struct proc_ctx *ctx,
struct pdu_data *pdu)
{
llcp_ntf_encode_length_change(conn, pdu);
}
static void rp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
{
struct node_rx_pdu *ntf;
struct pdu_data *pdu;
ARG_UNUSED(pdu);
/* Allocate ntf node */
ntf = llcp_ntf_alloc();
LL_ASSERT(ntf);
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
ntf->hdr.handle = conn->lll.handle;
pdu = (struct pdu_data *)ntf->pdu;
switch (ctx->proc) {
/* Note: the 'double' ifdef in case this switch case expands
* in the future and the function is re-instated
*/
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
rp_comm_ntf_length_change(conn, ctx, pdu);
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
default:
LL_ASSERT(0);
break;
}
/* Enqueue notification towards LL */
ll_rx_put(ntf->hdr.link, ntf);
ll_rx_sched();
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
static void rp_comm_send_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_LE_PING)
case PROC_LE_PING:
/* Always respond on remote ping */
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = RP_COMMON_STATE_WAIT_TX;
} else {
rp_comm_tx(conn, ctx);
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
}
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
case PROC_FEATURE_EXCHANGE:
/* Always respond on remote feature exchange */
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = RP_COMMON_STATE_WAIT_TX;
} else {
rp_comm_tx(conn, ctx);
conn->llcp.fex.sent = 1;
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
}
break;
case PROC_VERSION_EXCHANGE:
/* The Link Layer shall only queue for transmission a maximum of one
* LL_VERSION_IND PDU during a connection.
*/
if (!conn->llcp.vex.sent) {
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = RP_COMMON_STATE_WAIT_TX;
} else {
rp_comm_tx(conn, ctx);
conn->llcp.vex.sent = 1;
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
}
} else {
/* Protocol Error.
*
* A procedure already sent a LL_VERSION_IND and received a LL_VERSION_IND.
*/
/* TODO */
LL_ASSERT(0);
}
break;
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
case PROC_MIN_USED_CHANS:
/*
* Spec says (5.2, Vol.6, Part B, Section 5.1.11):
* The procedure has completed when the Link Layer acknowledgment of the
* LL_MIN_USED_CHANNELS_IND PDU is sent or received.
* In effect, for this procedure, this is equivalent to RX of PDU
*/
/* Inititate a chmap update, but only if acting as central, just in case ... */
if (conn->lll.role == BT_HCI_ROLE_CENTRAL &&
ull_conn_lll_phy_active(conn, conn->llcp.muc.phys)) {
uint8_t chmap[5];
ull_chan_map_get((uint8_t *const)chmap);
ull_cp_chan_map_update(conn, chmap);
/* TODO - what to do on failure of ull_cp_chan_map_update() */
}
/* No response */
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
break;
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
case PROC_TERMINATE:
/* No response */
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
/* Mark the connection for termination */
conn->llcp_terminate.reason_final = ctx->data.term.error_code;
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE:
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx)) {
ctx->state = RP_COMMON_STATE_WAIT_TX;
} else {
/* On RSP tx close the window for possible local req piggy-back */
rp_comm_tx(conn, ctx);
/* Wait for the peer to have ack'ed the RSP before updating DLE */
ctx->state = RP_COMMON_STATE_WAIT_TX_ACK;
}
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
case PROC_CTE_REQ:
if (ctx->pause || !llcp_tx_alloc_peek(conn, ctx) ||
(llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ)) {
ctx->state = RP_COMMON_STATE_WAIT_TX;
} else {
llcp_rr_set_paused_cmd(conn, PROC_PHY_UPDATE);
rp_comm_tx(conn, ctx);
ctx->state = RP_COMMON_STATE_WAIT_TX_ACK;
}
break;
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
default:
/* Unknown procedure */
LL_ASSERT(0);
}
}
static void rp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (evt) {
case RP_COMMON_EVT_REQUEST:
rp_comm_rx_decode(conn, ctx, (struct pdu_data *)param);
rp_comm_send_rsp(conn, ctx, evt, param);
break;
default:
/* Ignore other evts */
break;
}
}
static void rp_comm_st_wait_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
{
switch (evt) {
case LP_COMMON_EVT_RUN:
rp_comm_send_rsp(conn, ctx, evt, param);
break;
default:
/* Ignore other evts */
break;
}
}
static void rp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
void *param)
{
switch (evt) {
case RP_COMMON_EVT_ACK:
switch (ctx->proc) {
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case PROC_DATA_LENGTH_UPDATE: {
/* Apply changes in data lengths/times */
uint8_t dle_changed = ull_dle_update_eff(conn);
llcp_tx_resume_data(conn);
if (dle_changed && !llcp_ntf_alloc_is_available()) {
ctx->state = RP_COMMON_STATE_WAIT_NTF;
} else {
if (dle_changed) {
rp_comm_ntf(conn, ctx);
}
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
}
break;
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
case PROC_CTE_REQ: {
/* add PHY update pause = false here */
ctx->tx_ack = NULL;
llcp_rr_set_paused_cmd(conn, PROC_NONE);
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
}
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
default:
/* Ignore other procedures */
break;
}
break;
default:
/* Ignore other evts */
break;
}
}
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
static void rp_comm_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
void *param)
{
if (llcp_ntf_alloc_is_available()) {
rp_comm_ntf(conn, ctx);
llcp_rr_complete(conn);
ctx->state = RP_COMMON_STATE_IDLE;
}
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
static void rp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
void *param)
{
switch (ctx->state) {
case RP_COMMON_STATE_IDLE:
rp_comm_st_idle(conn, ctx, evt, param);
break;
case RP_COMMON_STATE_WAIT_RX:
rp_comm_st_wait_rx(conn, ctx, evt, param);
break;
case RP_COMMON_STATE_WAIT_TX:
rp_comm_st_wait_tx(conn, ctx, evt, param);
break;
case RP_COMMON_STATE_WAIT_TX_ACK:
rp_comm_st_wait_tx_ack(conn, ctx, evt, param);
break;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
case RP_COMMON_STATE_WAIT_NTF:
rp_comm_st_wait_ntf(conn, ctx, evt, param);
break;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
default:
/* Unknown state */
LL_ASSERT(0);
}
}
void llcp_rp_comm_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
{
rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_REQUEST, rx->pdu);
}
void llcp_rp_comm_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
{
rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_ACK, tx->pdu);
}
void llcp_rp_comm_init_proc(struct proc_ctx *ctx)
{
ctx->state = RP_COMMON_STATE_IDLE;
}
void llcp_rp_comm_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
{
rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_RUN, param);
}
|
564639.c | // 修改编程题11,使得程序持续执行加法运算,直到当前项小于eps为止,其中eps是用户输入的较小的(浮点)数。
#include <stdio.h>
int main() {
double e = 2, eps;
printf("请输入eps: ");
scanf("%lf", &eps);
double temp = 1;
unsigned i = 2;
while (temp > eps) {
unsigned int factor = 1;
for (unsigned j = 1; j <= i; ++j) {
factor *= j;
}
temp = 1.0 / (double)factor;
e += temp;
++i;
}
printf("e: %f\n", e);
return 0;
}
|
105453.c | /* Created RJudd December 14, 1997 */
/* SPAWARSYSCEN */
/**********************************************************************
// For TASP VSIPL Documentation and Code neither the United States /
// Government, the United States Navy, nor any of their employees, /
// makes any warranty, express or implied, including the warranties /
// of merchantability and fitness for a particular purpose, or /
// assumes any legal liability or responsibility for the accuracy, /
// completeness, or usefulness of any information, apparatus, /
// product, or process disclosed, or represents that its use would /
// not infringe privately owned rights /
**********************************************************************/
/* $Id: vsip_vsumval_i.c,v 2.0 2003/02/22 15:19:19 judd Exp $ */
/* Modified RJudd March 20, 1998 */
/* to vsip_vsumval_i.c */
/* Removed Tisdale error checking Sept 00 */
#include<vsip.h>
#include<vsip_vviewattributes_i.h>
vsip_scalar_i (vsip_vsumval_i)(
const vsip_vview_i* a) {
{
/*define variables*/
vsip_length n = a->length;
vsip_stride ast = a->stride;
vsip_scalar_i *ap = (a->block->array) + a->offset;
vsip_scalar_i t = 0;
/* do sum */
while(n-- > 0){
t += *ap;
ap += ast;
}
/* return sum */
return t;
}
}
|
122315.c | #include "tor4iot.h"
#include "tor_dtls.h"
#include "connection.h"
#include "tor_delegation.h"
#include "contiki-net.h"
static int read_from_peer(struct dtls_context_t *ctx, session_t *session,
uint8_t *data, size_t len) {
struct uip_udp_conn *conn = (struct uip_udp_conn *) dtls_get_app_data(ctx);
LOG_DBG("Got data of length %zd from ", len);
LOG_DBG_6ADDR(&conn->ripaddr);
LOG_DBG_(".%u\n", uip_ntohs(conn->rport));
memcpy(buffer, data, len);
DUMP_MEMORY("msg", buffer, len);
conn_handle_input(session->conn, buffer, len);
return 0;
}
static int send_to_peer(struct dtls_context_t *ctx, session_t *session,
uint8_t *data, size_t len) {
struct uip_udp_conn *conn = (struct uip_udp_conn *) dtls_get_app_data(ctx);
uip_ipaddr_copy(&conn->ripaddr, &session->addr);
conn->rport = session->port;
LOG_DBG("\nsend_to_peer\n"
"DTLS Context: %p\n"
"Session: %p\n"
"UDP Connection: %p\n"
"IP Address (%p): ", ctx, session, conn, &session->addr);
LOG_DBG_6ADDR(&session->addr);
LOG_DBG_("\n");
LOG_DBG("Sending data of length %zd to ", len);
LOG_DBG_6ADDR(&conn->ripaddr);
LOG_DBG_(":%u\n", uip_ntohs(conn->rport));
uip_udp_packet_send(conn, data, len);
/* Restore server connection to allow data from any node */
/* FIXME: do we want this at all? */
memset(&conn->ripaddr, 0, sizeof(conn->ripaddr));
memset(&conn->rport, 0, sizeof(conn->rport));
return len;
}
static int handle_event(struct dtls_context_t *ctx, session_t *session,
dtls_alert_level_t level, unsigned short code) {
if (level > 0) {
LOG_WARN("Received DTLS alert message with code %d\n", code);
} else if (level == 0) {
switch (code) {
case DTLS_EVENT_CONNECTED:
if (!session->conn->already_connected) {
LOG_DBG("Connected to Tor Relay.\n");
handle_connected(session->conn);
session->conn->already_connected = 1;
}
break;
case DTLS_EVENT_CONNECT:
LOG_DBG("Start to connect to Tor Relay.\n");
break;
case DTLS_EVENT_RENEGOTIATE:
LOG_DBG("Renegotioate with Tor Relay.\n");
break;
}
}
return 0;
}
#ifdef DTLS_PSK
static unsigned char psk_id[PSK_ID_MAXLEN] = PSK_DEFAULT_IDENTITY;
static size_t psk_id_length = sizeof(PSK_DEFAULT_IDENTITY) - 1;
static unsigned char psk_key[PSK_MAXLEN] = PSK_DEFAULT_KEY;
static size_t psk_key_length = sizeof(PSK_DEFAULT_KEY) - 1;
#ifdef __GNUC__
#define UNUSED_PARAM __attribute__((unused))
#else
#define UNUSED_PARAM
#endif /* __GNUC__ */
/* This function is the "key store" for tinyDTLS. It is called to
* retrieve a key for the given identity within this particular
* session. */
static int
get_psk_info(struct dtls_context_t *ctx UNUSED_PARAM,
const session_t *session UNUSED_PARAM,
dtls_credentials_type_t type,
const unsigned char *id, size_t id_len,
unsigned char *result, size_t result_length)
{
switch (type)
{
case DTLS_PSK_IDENTITY:
if (result_length < psk_id_length)
{
LOG_INFO("cannot set psk_identity -- buffer too small\n");
return dtls_alert_fatal_create(DTLS_ALERT_INTERNAL_ERROR);
}
memcpy(result, psk_id, psk_id_length);
return psk_id_length;
case DTLS_PSK_KEY:
if (id_len != psk_id_length || memcmp(psk_id, id, id_len) != 0)
{
LOG_INFO("PSK for unknown id requested, exiting\n");
return dtls_alert_fatal_create(DTLS_ALERT_ILLEGAL_PARAMETER);
}
else if (result_length < psk_key_length)
{
LOG_INFO("cannot set psk -- buffer too small\n");
return dtls_alert_fatal_create(DTLS_ALERT_INTERNAL_ERROR);
}
memcpy(result, psk_key, psk_key_length);
return psk_key_length;
default:
LOG_INFO("unsupported request type: %d\n", type);
}
return dtls_alert_fatal_create(DTLS_ALERT_INTERNAL_ERROR);
}
#endif /* DTLS_PSK */
#ifdef DTLS_ECC
static int
get_ecdsa_key(struct dtls_context_t *ctx,
const session_t *session,
const dtls_ecdsa_key_t **result)
{
static const dtls_ecdsa_key_t ecdsa_key =
{
.curve = DTLS_ECDH_CURVE_SECP256R1,
.priv_key = ecdsa_priv_key,
.pub_key_x = ecdsa_pub_key_x,
.pub_key_y = ecdsa_pub_key_y
};
*result = &ecdsa_key;
return 0;
}
static int
verify_ecdsa_key(struct dtls_context_t *ctx,
const session_t *session,
const unsigned char *other_pub_x,
const unsigned char *other_pub_y,
size_t key_size)
{
return 0;
}
#endif /* DTLS_ECC */
void tor_dtls_init() {
dtls_init();
}
uint16_t our_port = 10000;
void tor_dtls_connect(connection_t *conn) {
dtls_context_t *new_ctx;
struct uip_udp_conn *new_udp_conn;
static dtls_handler_t cb = { .write = send_to_peer, .read = read_from_peer,
.event = handle_event,
#ifdef DTLS_PSK
.get_psk_info = get_psk_info,
#endif /* DTLS_PSK */
#ifdef DTLS_ECC
.get_ecdsa_key = get_ecdsa_key,
.verify_ecdsa_key = verify_ecdsa_key
#endif /* DTLS_ECC */
};
LOG_INFO("DTLS client started\n");
conn->session.conn = conn;
new_udp_conn = udp_new(&conn->session.addr, 0, NULL);
if (!new_udp_conn) {
LOG_WARN("Contiki was not able to open a new UDP socket.\n");
}
conn->udp_conn = new_udp_conn;
udp_bind(conn->udp_conn, uip_htons(our_port));
our_port++;
new_ctx = dtls_new_context(conn->udp_conn);
conn->ctx = new_ctx;
LOG_DBG("\ntor_dtls_connect\n"
"DTLS Context: %p\n"
"Session: %p\n"
"Tor Connection: %p\n"
"UDP Connection: %p\n"
"IP Address (%p): ", new_ctx, &conn->session, conn, new_udp_conn, &conn->session.addr);
LOG_DBG_6ADDR(&conn->session.addr);
LOG_DBG_("\n");
if (conn->ctx) {
dtls_set_handler(conn->ctx, &cb);
LOG_DBG("Set handler\n");
if (!dtls_connect(conn->ctx, &conn->session)) {
LOG_INFO("Failed connecting to OR\n");
}
}
}
int tor_dtls_disconnect(connection_t *conn) {
int r;
r = dtls_close(conn->ctx, &conn->session);
if (r) {
LOG_ERR("Failed to close DTLS connection\n");
}
dtls_free_context(conn->ctx);
r &= uip_udp_remove(conn->udp_conn);
conn->already_connected = 0;
return r;
}
int tor_dtls_send(connection_t *conn, const uint8_t *buf, size_t buflen) {
int res;
DUMP_MEMORY("dtls_msg_out", buf, buflen);
res = dtls_write(conn->ctx, &conn->session, (uint8_t *) buf, buflen);
return res;
}
void tor_dtls_handle_read(connection_t *conn) {
int len;
if (uip_newdata()) {
len = uip_datalen();
((char *) uip_appdata)[uip_datalen()] = 0;
// DUMP_MEMORY("dtls_ctx", conn->ctx, sizeof(dtls_));
DUMP_MEMORY("dtls_msg_in", uip_appdata, len);
dtls_handle_message(conn->ctx, &conn->session, uip_appdata, len);
}
}
|
383789.c | #include <gb/gb.h>
#include "./helpers.h"
#include "./text.h"
void clearScreen(void) {
UINT8 line[32] = {_TEXT_CHAR_SPACE};
UINT8 y = 32;
while (y) {
y -= 1;
set_bkg_tiles(0, y, 32, 1, line);
}
}
UINT8 waitFramesOrKeys(INT8 count) {
UINT8 keys = 0;
while (count) {
keys = joypad();
if (keys) return keys;
count -= 1;
wait_vbl_done();
}
return 0;
}
|
362147.c | #define _GNU_SOURCE
#include <string.h>
#include <limits.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <zmq.h>
#include <errno.h>
#include <ctype.h>
#include "disk.h"
#include "main.h"
#include "zutils.h"
#include "log.h"
#include "http.h"
#include "resolve.h"
const char content_type[] = "Content-Type";
const char last_modified[] = "Last-Modified";
const char gzip_encoding[] = "Content-Encoding\000gzip";
static mime_table_t *mime_new() {
int sz = 4096;
mime_table_t *mt = malloc(sizeof(mime_table_t) + sizeof(mime_entry_t)*sz);
mt->size = sz;
obstack_init(&mt->pieces);
memset((char *)mt + sizeof(mime_table_t), 0, sizeof(mime_entry_t)*sz);
return mt;
}
static void mime_free(mime_table_t *mt) {
obstack_free(&mt->pieces, NULL);
free(mt);
}
static char *mime_add(mime_table_t *mt, char *key, char *value) {
char *s = key;
size_t h = 0;
while (*s) {
h += (h<<1) + (h<<4) + (h<<7) + (h<<8) + (h<<24);
h ^= (size_t)*s++;
}
int cell = h % mt->size;
mime_entry_t *entry = LIST_FIRST(&mt->entries[cell]);
mime_entry_t *prev = NULL;
while(entry) {
if(!strcmp(entry->name, key)) {
return entry->mime;
}
prev = entry;
entry = LIST_NEXT(entry, lst);
}
int klen = strlen(key);
int vlen = strlen(value);
entry = obstack_alloc(&mt->pieces, sizeof(mime_entry_t) + klen+1 + vlen+1);
memcpy(entry->name, key, klen+1);
entry->mime = entry->name + klen + 1;
memcpy(entry->mime, value, vlen+1);
if(prev) {
LIST_INSERT_AFTER(prev, entry, lst);
} else {
LIST_INSERT_HEAD(&mt->entries[cell], entry, lst);
}
return NULL;
}
static char *mime_find(mime_table_t *mt, char *key) {
char *s = key;
size_t h = 0;
while (*s) {
h += (h<<1) + (h<<4) + (h<<7) + (h<<8) + (h<<24);
h ^= (size_t)*s++;
}
int cell = h % mt->size;
mime_entry_t *entry;
LIST_FOREACH(entry, &mt->entries[cell], lst) {
if(!strcmp(entry->name, key)) {
return entry->mime;
}
}
return NULL;
}
static char *check_base(disk_request_t *req) {
char *path = req->path;
char *pathend = strchrnul(req->path, '?');
int pathlen = pathend - path;
config_Route_t *route = req->route;
CONFIG_STRING_LOOP(suffix, route->static_.deny_suffixes) {
if(pathlen >= suffix->value_len
&& !memcmp(pathend - suffix->value_len,
suffix->value, suffix->value_len)) {
return NULL;
}
}
char *base = pathend;
for(;base != path && *base != '/'; --base);
++base; // need part right after the slash
int baselen = pathend - base;
CONFIG_STRING_LOOP(prefix, route->static_.deny_prefixes) {
if(baselen >= prefix->value_len
&& !memcmp(base, prefix->value, prefix->value_len)) {
return NULL;
}
}
config_main_t *config = root.config;
char *ext, *ext0 = NULL;
if(!*base || pathend[-1] == '/') {
if(route->static_.index_file) {
ext0 = strrchr(route->static_.index_file, '.');
if(ext0) {
ext = ext0 = ext0+1;
}
} else if(route->static_.dir_index) {
return "text/html";
} else {
ext = NULL;
}
} else {
ext = pathend;
while(*--ext != '.' && ext > base);
if(ext <= base)
ext = NULL;
}
if(!ext)
return config->Server.mime_types.no_extension;
++ext; // need next after '.' character
char *mime;
if(!ext0) {
ext0 = alloca(pathend - ext + 1);
memcpy(ext0, ext, pathend - ext);
ext0[pathend - ext] = 0;
}
mime = mime_find(root.disk.mime_table, ext0);
if(mime) return mime;
return config->Server.mime_types.default_type;
}
static bool check_path(disk_request_t *req, char *realpath) {
int plen = strlen(realpath);
config_Route_t *route = req->route;
if(route->static_.restrict_root) {
if(plen < route->static_.root_len+1)
return FALSE;
if(memcmp(realpath, route->static_.root, route->static_.root_len))
return FALSE;
if(realpath[route->static_.root_len] != '/')
return FALSE;
}
if(!route->static_.restrict_dirs_len)
return TRUE;
CONFIG_DIR_LOOP(dir, route->static_.restrict_dirs) {
if(plen >= dir->value_len+1
&& !memcmp(realpath, dir->value, dir->value_len)
&& realpath[dir->value_len] == '/')
return TRUE;
}
return FALSE;
}
static char *join_paths(disk_request_t *req) {
char *path = req->path;
char *pathend = strchrnul(path, '?');
int nstrip = req->route->static_.strip_dirs+1; // always strip first slash
for(; *path; ++path) {
if(*path == '/') {
for(;*path && *path == '/'; ++path);
--nstrip;
if(!nstrip) break;
}
}
int fulllen = req->route->static_.root_len + pathend - path + 1;
bool index = FALSE;
if((!*path || *(pathend-1) == '/') && req->route->static_.index_file) {
fulllen += req->route->static_.index_file_len;
index = TRUE;
}
char fullpath[fulllen];
memcpy(fullpath, req->route->static_.root, req->route->static_.root_len);
fullpath[req->route->static_.root_len] = '/';
memcpy(fullpath + req->route->static_.root_len + 1, path, pathend - path);
if(index) {
memcpy(fullpath + req->route->static_.root_len + (pathend - path) + 1,
req->route->static_.index_file, req->route->static_.index_file_len);
}
fullpath[fulllen] = 0;
LDEBUG("Fullpath ``%s''", fullpath);
return realpath(fullpath, NULL);
}
static int get_file(char *path, zmq_msg_t *msg,
char *if_mod, char *lastmod, int *gzip) {
int fd = -1;
if(*gzip) {
char *npath = alloca(strlen(path) + 4);
strcpy(npath, path);
strcat(npath, ".gz");
fd = open(npath, O_RDONLY);
}
if(fd < 0) {
*gzip = FALSE;
}
while(fd < 0) {
fd = open(path, O_RDONLY);
if(fd < 0) {
if(errno != EINTR) {
TWARN("Can't open file ``%s''", path);
return -1;
}
}
};
struct stat statinfo;
if(fstat(fd, &statinfo)) {
TWARN("Can't stat file ``%s''", path);
SNIMPL(close(fd));
return -1;
}
if(S_ISDIR(statinfo.st_mode)) {
TWARN("Path ``%s'' is a directory", path);
SNIMPL(close(fd));
return -1;
}
struct tm tmstruct;
gmtime_r(&statinfo.st_mtime, &tmstruct);
strftime(lastmod, 32, "%a, %d %b %Y %T GMT", &tmstruct);
if(if_mod && !strcmp(if_mod, lastmod)) {
zmq_msg_init(msg); // empty body for 304 reply
SNIMPL(close(fd));
return 1;
}
size_t to_read = statinfo.st_size;
if(zmq_msg_init_size(msg, to_read)) {
TWARN("Can't allocate buffer for file");
SNIMPL(close(fd));
return -1;
}
void *data = zmq_msg_data(msg);
while(to_read) {
ssize_t bytes = read(fd, data, to_read);
if(bytes < 0) {
if(errno == EAGAIN || errno == EINTR) {
TWARN("Can't read file");
SNIMPL(close(fd));
return -1;
}
}
data += bytes;
to_read -= bytes;
ANIMPL(to_read >= 0);
}
SNIMPL(close(fd));
return 0;
}
void *disk_loop(void *_) {
void *sock = zmq_socket(root.zmq, ZMQ_REP);
SNIMPL(zmq_connect(sock, "inproc://disk"));
while(1) {
disk_request_t *req;
zmq_msg_t msg;
zmq_msg_init(&msg);
if(zmq_recv(sock, &msg, 0) < 0) {
if(errno == EINTR || errno == EAGAIN) {
continue;
}
SNIMPL(-1);
}
int64_t opt;
size_t optlen = sizeof(opt);
SNIMPL(zmq_getsockopt(sock, ZMQ_RCVMORE, &opt, &optlen));
ANIMPL(optlen == sizeof(opt) && !opt);
req = zmq_msg_data(&msg);
size_t reqlen = zmq_msg_size(&msg);
if(reqlen == 8 && !memcmp(req, "shutdown", 8)) break;
LDEBUG("Got disk request for ``%s''", req->path);
char *mime = check_base(req);
if(!mime) {
LDEBUG("Path ``%s'' denied", req->path);
SNIMPL(zmq_msg_close(&msg));
SNIMPL(zmq_msg_init_data(&msg, "402", 4, NULL, NULL));
SNIMPL(zmq_send(sock, &msg, 0));
continue;
}
char *realpath = join_paths(req);
if(!realpath) {
SWARN2("Can't resolve ``%s''", req->path);
SNIMPL(zmq_msg_close(&msg));
SNIMPL(zmq_msg_init_data(&msg, "404", 4, NULL, NULL));
SNIMPL(zmq_send(sock, &msg, 0));
continue;
}
LDEBUG("Resolved ``%s'' -> ``%s''", req->path, realpath);
if(!check_path(req, realpath)) {
LDEBUG("Path ``%s''(``%s'') denied", req->path, realpath);
free(realpath);
SNIMPL(zmq_msg_close(&msg));
SNIMPL(zmq_msg_init_data(&msg, "402", 4, NULL, NULL));
SNIMPL(zmq_send(sock, &msg, 0));
continue;
}
zmq_msg_t result;
zmq_msg_init(&result);
char lastmod[64];
int gz = req->gzipped;
int rc = get_file(realpath, &result, req->if_modified, lastmod, &gz);
free(realpath);
zmq_msg_close(&msg); // frees req
if(rc == 1) {
zmq_msg_close(&result);
SNIMPL(zmq_msg_init_data(&result,
"304 Not Modified", strlen("304 Not Modified"), NULL, NULL));
SNIMPL(zmq_send(sock, &result, ZMQ_SNDMORE));
SNIMPL(zmq_msg_init(&result));
SNIMPL(zmq_send(sock, &result, 0));
continue;
} else if(rc) {
zmq_msg_close(&result);
SNIMPL(zmq_msg_init_data(&result,
"500 Internal Server Error",
strlen("500 Internal Server Error"), NULL, NULL));
SNIMPL(zmq_send(sock, &result, ZMQ_SNDMORE));
SNIMPL(zmq_msg_init(&result));
SNIMPL(zmq_send(sock, &result, 0));
continue;
}
int mimelen = strlen(mime)+1;
int modlen = strlen(lastmod)+1;
int totsize = sizeof(content_type) + mimelen
+ sizeof(last_modified) + modlen;
if(gz) totsize += sizeof(gzip_encoding);
SNIMPL(zmq_msg_init_data(&msg, "200 OK", 6, NULL, NULL));
SNIMPL(zmq_send(sock, &msg, ZMQ_SNDMORE));
SNIMPL(zmq_msg_init_size(&msg, totsize));
void *data = zmq_msg_data(&msg);
memcpy(data, content_type, sizeof(content_type));
data += sizeof(content_type);
memcpy(data, mime, mimelen);
data += mimelen;
memcpy(data, last_modified, sizeof(last_modified));
data += sizeof(last_modified);
memcpy(data, lastmod, modlen);
data += modlen;
if(gz) {
memcpy(data, gzip_encoding, sizeof(gzip_encoding));
data += sizeof(gzip_encoding);
}
SNIMPL(zmq_send(sock, &msg, ZMQ_SNDMORE));
SNIMPL(zmq_send(sock, &result, 0));
continue;
}
SNIMPL(zmq_close(sock));
LDEBUG("Disk thread shut down");
}
int disk_request(request_t *req) {
if(!root.disk.socket) {
TWARN("Configured static route with non-positive `disk-io-threads`");
http_static_response(req,
&req->route->responses.internal_error);
return 0;
}
// Must wake up reading and on each send, because the way zmq sockets work
ev_feed_event(root.loop, &root.disk.watch, EV_READ);
zmq_msg_t msg;
make_hole_uid(req, req->uid, root.request_sieve, FALSE);
req->flags |= REQ_IN_SIEVE;
root.stat.disk_requests += 1;
REQ_INCREF(req);
SNIMPL(zmq_msg_init_data(&msg, req->uid, UID_LEN, request_decref, req));
while(zmq_send(root.disk.socket, &msg, ZMQ_SNDMORE|ZMQ_NOBLOCK) < 0) {
if(errno == EAGAIN) {
zmq_msg_close(&msg);
http_static_response(req,
&req->route->responses.service_unavailable);
return 0;
} else if(errno == EINTR) {
continue;
} else {
zmq_msg_close(&msg);
http_static_response(req,
&req->route->responses.internal_error);
return 0;
}
}
SNIMPL(zmq_msg_init(&msg));
SNIMPL(zmq_send(root.disk.socket, &msg, ZMQ_SNDMORE));
SNIMPL(zmq_msg_init_size(&msg, sizeof(disk_request_t)));
disk_request_t *dreq = zmq_msg_data(&msg);
dreq->route = req->route;
if(req->ws.headerindex[root.disk.IF_MODIFIED]) {
strncpy(dreq->if_modified, req->ws.headerindex[root.disk.IF_MODIFIED],
sizeof(dreq->if_modified));
} else {
dreq->if_modified[0] = 0;
}
if(req->route->static_.single_uri_len) {
dreq->path = req->route->static_.single_uri;
} else {
// TODO(tailhook) probably it's not save to use path from request
// we should copy it
dreq->path = req->path;
}
dreq->gzipped = FALSE;
if(req->route->static_.gzip_enabled) {
char *ae = req->ws.headerindex[root.disk.ACCEPT_ENCODING];
if(ae) {
char *next = ae;
while(*next) {
char *start = next;
char *end = strchrnul(next, ',');
next = *end ? end+1 : end;
--end;
while(start < end && isspace(*start)) ++ start;
while(end > start && isspace(*end)) -- end;
if(start == end) {
continue;
}
if(!strncmp(start, "gzip", end-start+1)) {
dreq->gzipped = TRUE;
break;
} // TODO(tailhook) implement gzip; q=0.xx
}
}
}
SNIMPL(zmq_send(root.disk.socket, &msg, 0));
return 0;
}
static void disk_process(struct ev_loop *loop, struct ev_io *watch, int revents) {
ANIMPL(!(revents & EV_ERROR));
while(TRUE) {
Z_SEQ_INIT(msg, root.disk.socket);
LDEBUG("Checking disk...");
Z_RECV_START(msg, break);
LDEBUG("Got something from disk");
if(zmq_msg_size(&msg) != UID_LEN) {
TWARN("Wrong uid length %d", zmq_msg_size(&msg));
goto msg_error;
}
request_t *req = sieve_get(root.request_sieve,
UID_HOLE(zmq_msg_data(&msg)));
ANIMPL(req && UID_EQ(req->uid, zmq_msg_data(&msg)));
REQ_INCREF(req); // own a reference immediately, before we
// free a message which refers to our request
Z_RECV_NEXT(msg);
ANIMPL(!zmq_msg_size(&msg)); // The sentinel of routing data
Z_RECV(msg);
//first is a status-line
char *data = zmq_msg_data(&msg);
char *tail;
int dlen = zmq_msg_size(&msg);
LDEBUG("Disk status line: [%d] %.*s", dlen, dlen, data);
if(!msg_opt) { // if there are no subsequent parts
// then it's error response
int code = atoi(data);
if(code == 404) {
http_static_response(req,
&REQRCONFIG(req)->responses.not_found);
} else if(code == 402) {
http_static_response(req,
&REQRCONFIG(req)->responses.forbidden);
} else {
http_static_response(req,
&REQRCONFIG(req)->responses.internal_error);
}
request_finish(req);
goto msg_error;
} else {
ws_statusline(&req->ws, data);
Z_RECV(msg);
if(msg_opt) { //second is headers if its not last
char *data = zmq_msg_data(&msg);
char *name = data;
char *value = NULL;
int dlen = zmq_msg_size(&msg);
char *end = data + dlen;
int state = 0;
for(char *cur = data; cur < end; ++cur) {
for(; cur < end; ++cur) {
if(!*cur) {
value = cur + 1;
++cur;
break;
}
}
for(; cur < end; ++cur) {
if(!*cur) {
ws_add_header(&req->ws, name, value);
name = cur + 1;
break;
}
}
}
if(name < end) {
TWARN("Some garbage at end of headers. "
"Please finish each name and each value "
"with '\\0' character");
}
Z_RECV(msg);
if(msg_opt) {
TWARN("Too many message parts");
http_static_response(req,
&REQRCONFIG(req)->responses.internal_error);
request_finish(req);
goto msg_error;
}
}
}
http_common_headers(req);
CONFIG_STRING_STRING_LOOP(line, req->route->headers) {
SNIMPL(ws_add_header(&req->ws, line->key, line->value));
}
ws_finish_headers(&req->ws);
root.stat.disk_reads += 1;
root.stat.disk_bytes_read += zmq_msg_size(&msg);
// the last part is always a body
ANIMPL(!(req->flags & REQ_HAS_MESSAGE));
SNIMPL(zmq_msg_init(&req->response_msg));
req->flags |= REQ_HAS_MESSAGE;
SNIMPL(zmq_msg_move(&req->response_msg, &msg));
SNIMPL(ws_reply_data(&req->ws, zmq_msg_data(&req->response_msg),
zmq_msg_size(&req->response_msg)));
req->flags |= REQ_REPLIED;
request_finish(req);
msg_finish:
REQ_DECREF(req);
Z_SEQ_FINISH(msg);
continue;
msg_error:
REQ_DECREF(req);
Z_SEQ_ERROR(msg);
continue;
}
LDEBUG("Out of disk...");
}
static int read_mime_types(struct obstack *buf, mime_table_t *matcher,
char *filename) {
FILE *file = fopen(filename, "r");
if(!file) return -1;
char *line = NULL;
size_t len = 0;
ssize_t read;
while ((read = getline(&line, &len, file)) != -1) {
char *tokptr = NULL;
char *tok = strtok_r(line, " \t\r\n", &tokptr);
if(!tok || tok[0] == '#')
continue;
char *mtype = tok;
tok = strtok_r(NULL, " \t\r\n", &tokptr);
if(!tok) continue;
mtype = obstack_copy0(buf, mtype, strlen(mtype));
do {
LDEBUG("Adding mime ``%s'' -> ``%s''", tok, mtype);
char *old = mime_add(root.disk.mime_table, tok, mtype);
if(old && !root.config->Server.mime_types.no_warnings) {
LWARN("Conflicting mime for ``%s'' using ``%s''", tok, old);
}
tok = strtok_r(NULL, " \t\r\n", &tokptr);
} while(tok);
}
free(line);
fclose(file);
return 0;
}
int prepare_disk(config_main_t *config) {
if(config->Server.disk_io_threads <= 0) {
root.disk.socket = NULL;
return 0;
}
root.disk.IF_MODIFIED = ws_index_header(&root.ws, "If-Modified-Since");
root.disk.ACCEPT_ENCODING = ws_index_header(&root.ws, "Accept-Encoding");
root.disk.socket = zmq_socket(root.zmq, ZMQ_XREQ);
SNIMPL(root.disk.socket == NULL);
SNIMPL(zmq_bind(root.disk.socket, "inproc://disk"));
int64_t fd;
size_t fdsize = sizeof(fd);
SNIMPL(zmq_getsockopt(root.disk.socket, ZMQ_FD, &fd, &fdsize));
ev_io_init(&root.disk.watch, disk_process, fd, EV_READ);
ev_io_start(root.loop, &root.disk.watch);
root.disk.threads =malloc(sizeof(pthread_t)*config->Server.disk_io_threads);
ANIMPL(root.disk.threads);
for(int i = 0; i < config->Server.disk_io_threads; ++i) {
SNIMPL(pthread_create(&root.disk.threads[i], NULL, disk_loop, NULL));
}
LWARN("%d disk threads ready", config->Server.disk_io_threads);
root.disk.mime_table = mime_new();
// User-specified values override mime.types
CONFIG_STRING_STRING_LOOP(item, config->Server.mime_types.extra) {
LDEBUG("Adding mime ``%s'' -> ``%s''", item->key, item->value);
char *old = mime_add(root.disk.mime_table, item->key, item->value);
if(old && !root.config->Server.mime_types.no_warnings) {
LWARN("Conflicting mime for ``%s'' using ``%s''", item->key, old);
}
}
SNIMPL(read_mime_types(&config->head.pieces,
root.disk.mime_table,
config->Server.mime_types.file));
return 0;
}
int release_disk(config_main_t *config) {
while(TRUE) {
zmq_msg_t msg;
SNIMPL(zmq_msg_init(&msg));
if(zmq_send(root.disk.socket, &msg, ZMQ_NOBLOCK|ZMQ_SNDMORE) < 0) {
if(errno == EAGAIN) {
zmq_msg_close(&msg);
break;
}
SNIMPL(-1);
}
SNIMPL(zmq_msg_init_size(&msg, 8));
memcpy(zmq_msg_data(&msg), "shutdown", 8);
zmq_send(root.disk.socket, &msg, ZMQ_NOBLOCK); // don't care if fails
zmq_msg_close(&msg);
}
for(int i = 0; i < config->Server.disk_io_threads; ++i) {
SNIMPL(pthread_join(root.disk.threads[i], NULL));
}
ev_io_stop(root.loop, &root.disk.watch);
SNIMPL(zmq_close(root.disk.socket));
free(root.disk.threads);
mime_free(root.disk.mime_table);
return 0;
}
|
345601.c | #include "../svcomp.h"
void main() { int x;
if(x!=4) return;
while(x>0)
{
int y;
if(-3>y || y>-1) return;
x += y;
}
__VERIFIER_assert(x==0 || x==-2);
}
|
812897.c | #include <strings.h>
#include "sway/commands.h"
#include "sway/config.h"
struct cmd_results *output_cmd_mode(int argc, char **argv) {
if (!config->handler_context.output_config) {
return cmd_results_new(CMD_FAILURE, "Missing output config");
}
if (!argc) {
return cmd_results_new(CMD_INVALID, "Missing mode argument.");
}
struct output_config *output = config->handler_context.output_config;
if (strcmp(argv[0], "--custom") == 0) {
argv++;
argc--;
output->custom_mode = 1;
} else {
output->custom_mode = 0;
}
// Reset custom modeline, if any
output->drm_mode.type = 0;
char *end;
output->width = strtol(*argv, &end, 10);
if (*end) {
// Format is 1234x4321
if (*end != 'x') {
return cmd_results_new(CMD_INVALID, "Invalid mode width.");
}
++end;
output->height = strtol(end, &end, 10);
if (*end) {
if (*end != '@') {
return cmd_results_new(CMD_INVALID, "Invalid mode height.");
}
++end;
output->refresh_rate = strtof(end, &end);
if (strcasecmp("Hz", end) != 0) {
return cmd_results_new(CMD_INVALID,
"Invalid mode refresh rate.");
}
}
} else {
// Format is 1234 4321
argc--; argv++;
if (!argc) {
return cmd_results_new(CMD_INVALID,
"Missing mode argument (height).");
}
output->height = strtol(*argv, &end, 10);
if (*end) {
return cmd_results_new(CMD_INVALID, "Invalid mode height.");
}
}
config->handler_context.leftovers.argc = argc - 1;
config->handler_context.leftovers.argv = argv + 1;
return NULL;
}
static bool parse_modeline(char **argv, drmModeModeInfo *mode) {
mode->type = DRM_MODE_TYPE_USERDEF;
mode->clock = strtof(argv[0], NULL) * 1000;
mode->hdisplay = strtol(argv[1], NULL, 10);
mode->hsync_start = strtol(argv[2], NULL, 10);
mode->hsync_end = strtol(argv[3], NULL, 10);
mode->htotal = strtol(argv[4], NULL, 10);
mode->vdisplay = strtol(argv[5], NULL, 10);
mode->vsync_start = strtol(argv[6], NULL, 10);
mode->vsync_end = strtol(argv[7], NULL, 10);
mode->vtotal = strtol(argv[8], NULL, 10);
mode->vrefresh = mode->clock * 1000.0 * 1000.0
/ mode->htotal / mode->vtotal;
if (strcasecmp(argv[9], "+hsync") == 0) {
mode->flags |= DRM_MODE_FLAG_PHSYNC;
} else if (strcasecmp(argv[9], "-hsync") == 0) {
mode->flags |= DRM_MODE_FLAG_NHSYNC;
} else {
return false;
}
if (strcasecmp(argv[10], "+vsync") == 0) {
mode->flags |= DRM_MODE_FLAG_PVSYNC;
} else if (strcasecmp(argv[10], "-vsync") == 0) {
mode->flags |= DRM_MODE_FLAG_NVSYNC;
} else {
return false;
}
snprintf(mode->name, sizeof(mode->name), "%dx%d@%d",
mode->hdisplay, mode->vdisplay, mode->vrefresh / 1000);
return true;
}
struct cmd_results *output_cmd_modeline(int argc, char **argv) {
if (!config->handler_context.output_config) {
return cmd_results_new(CMD_FAILURE, "Missing output config");
}
if (!argc) {
return cmd_results_new(CMD_INVALID, "Missing modeline argument.");
}
struct output_config *output = config->handler_context.output_config;
if (argc != 11 || !parse_modeline(argv, &output->drm_mode)) {
return cmd_results_new(CMD_INVALID, "Invalid modeline");
}
config->handler_context.leftovers.argc = argc - 12;
config->handler_context.leftovers.argv = argv + 12;
return NULL;
}
|
789031.c | /**
Onion HTTP server library
Copyright (C) 2010-2018 David Moreno Montero and others
This library is free software; you can redistribute it and/or
modify it under the terms of, at your choice:
a. the Apache License Version 2.0.
b. the GNU General Public License as published by the
Free Software Foundation; either version 2.0 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of both licenses, if not see
<http://www.gnu.org/licenses/> and
<http://www.apache.org/licenses/LICENSE-2.0>.
*/
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <assert.h>
#include <pty.h>
#include <poll.h>
#include <pthread.h>
#include <onion/request.h>
#include <onion/response.h>
#include <onion/handler.h>
#include <onion/log.h>
#ifdef __DEBUG__
#include <onion/handlers/exportlocal.h>
#endif
#include <onion/handlers/opack.h>
#include <onion/shortcuts.h>
#include <pwd.h>
#include <onion/dict.h>
#include "oterm_handler.h"
#include <onion/onion.h>
#include <onion/poller.h>
/// Time to wait for output, or just return.
#define TIMEOUT 60000
/// Max data to store. This is, more or less a window of 230*70, which is not so extrange
#define BUFFER_SIZE 4096*4
/**
* @short Information about a process
*
* The process has a circular buffer. The pos is mod with BUFFER_SIZE.
*
* First time clients just ask data, and as a custom control command we send the latest position.
*
* Then on next request the client sets that want from that position onwards. If data avaliable,
* it is sent, if not, then the first blocks on the fd, and the followings on a pthread_condition.
*/
typedef struct process_t {
int fd; ///< fd that serves as communication channel with the pty.
pid_t pid; ///< PID of the command process
char *title; ///< Title of the process, normally from path, but is set using xterm commands
char *buffer; ///< Circular buffer, keeps las BUFFER_SIZE characters emmited
int16_t buffer_pos; ///< Position on the circular buffer, 0-BUFFER_SIZE
pthread_cond_t dataReady; ///< All but one will wait on the condition
pthread_mutex_t mutex; ///< Mutex for this process, as several threads can access it.
char uuid[37]; ///< UUID for this process. May be used to access it.
struct process_t *next;
} process;
/**
* @short Information about all the processes.
*/
typedef struct {
pthread_mutex_t head_mutex;
process *head;
} oterm_session;
/**
* @short Data for the onion terminal handler itself
*/
struct oterm_data_t {
char *exec_command;
onion *onion;
onion_dict *processes;
onion_dict *sessions; // Each user (session['username']) has a session.
};
typedef struct oterm_data_t oterm_data;
process *oterm_new(oterm_data * d, oterm_session * s, const char *username,
char impersonate);
oterm_session *oterm_session_new();
static int oterm_status(oterm_session * o, onion_request * req,
onion_response * res);
static int oterm_resize(process * o, onion_request * req, onion_response * res);
static int oterm_title(process * o, onion_request * req, onion_response * res);
static int oterm_in(process * o, onion_request * req, onion_response * res);
static int oterm_out(process * o, onion_request * req, onion_response * res);
static int oterm_data_ready(process * o);
static onion_connection_status oterm_process(oterm_data * data, process * term,
const char *function,
onion_request * req,
onion_response * res);
/// Returns the term from the list of known terms. FIXME, make this structure a tree or something faster than linear search.
process *oterm_get_process(oterm_session * o, const char *id) {
pthread_mutex_lock(&o->head_mutex);
process *p = o->head;
int pid = atoi(id);
int i = 1;
while (p) {
if ((strcmp(p->uuid, id) == 0) || i == pid) {
pthread_mutex_unlock(&o->head_mutex);
return p;
}
p = p->next;
i++;
}
pthread_mutex_unlock(&o->head_mutex);
return NULL;
}
/// Returns the term from the list of known terms. FIXME, make this structure a tree or something faster than linear search.
process *oterm_get_process_by_uuid(oterm_data * o, const char *id) {
return (process *) onion_dict_get(o->processes, id);
}
onion_connection_status oterm_uuid(void *data, onion_request * req,
onion_response * res) {
const char *path = onion_request_get_path(req);
ONION_DEBUG("Ask path %s (%p)", path, data);
// split id / function
int l = strlen(path) + 1;
char *id = alloca(l);
char *function = NULL;
int i;
memcpy(id, path, l);
int func_pos = 0;
for (i = 0; i < l; i++) {
if (id[i] == '/') {
if (!function && id[i + 1] != '\0')
function = id + i + 1;
id[i] = 0;
func_pos = i;
break;
}
}
ONION_DEBUG("Id %s, function %s", id, function);
process *term = oterm_get_process_by_uuid(data, id);
if (!term)
return OCS_INTERNAL_ERROR;
if (!function)
return onion_shortcut_internal_redirect("static/oterm.html", req, res);
// do it
onion_request_advance_path(req, func_pos);
return oterm_process(data, term, function, req, res);
}
/// Plexes the request depending on arguments.
int oterm_get_data(oterm_data * data, onion_request * req, onion_response * res) {
const char *username = onion_request_get_session(req, "username");
if (!username) {
ONION_WARNING("Trying to enter authenticated area without username.");
return OCS_FORBIDDEN;
}
oterm_session *o = (oterm_session *) onion_dict_get(data->sessions,
onion_request_get_session
(req,
"username"));
if (!o) {
o = oterm_session_new();
onion_dict_lock_write(data->sessions);
onion_dict_add(data->sessions, onion_request_get_session(req, "username"),
o, 0);
onion_dict_unlock(data->sessions);
}
const char *path = onion_request_get_path(req);
ONION_DEBUG("Ask path %s (%p)", path, data);
if (strcmp(path, "new") == 0) {
if (onion_request_get_post(req, "command")) {
free(data->exec_command);
data->exec_command = strdup(onion_request_get_post(req, "command"));
}
oterm_new(data, o, onion_request_get_session(req, "username"),
onion_request_get_session(req, "nopam") ? 0 : 1);
return onion_shortcut_response("ok", 200, req, res);
}
if (strcmp(path, "status") == 0)
return oterm_status(o, req, res);
return OCS_NOT_PROCESSED;
}
static onion_connection_status oterm_process(oterm_data * data, process * term,
const char *function,
onion_request * req,
onion_response * res) {
if (!term)
return onion_shortcut_response("Terminal Id unknown", 404, req, res);
if (strcmp(function, "out") == 0)
return oterm_out(term, req, res);
if (strcmp(function, "in") == 0)
return oterm_in(term, req, res);
if (strcmp(function, "resize") == 0)
return oterm_resize(term, req, res);
if (strcmp(function, "title") == 0)
return oterm_title(term, req, res);
return OCS_NOT_PROCESSED;
}
/// Variables that will be passed to the new environment.
const char *onion_clearenvs[] = { "HOME", "TMP" };
const char *onion_extraenvs[] = { "TERM=xterm" };
#define ONION_CLEARENV_COUNT (sizeof(onion_clearenvs)/sizeof(onion_clearenvs[0]))
#define ONION_EXTRAENV_COUNT (sizeof(onion_extraenvs)/sizeof(onion_extraenvs[0]))
/// Creates a new oterm
process *oterm_new(oterm_data * data, oterm_session * session,
const char *username, char impersonate) {
process *oterm = malloc(sizeof(process));
const char *command_name;
int i;
for (i = strlen(data->exec_command); i >= 0; i--)
if (data->exec_command[i] == '/')
break;
command_name = &data->exec_command[i + 1];
/// Get the UUID, linux nicely gives it.
{
int fd = open("/proc/sys/kernel/random/uuid", O_RDONLY);
if (fd >= 0) {
int r = read(fd, oterm->uuid, sizeof(oterm->uuid) - 1);
close(fd);
if (r != sizeof(oterm->uuid) - 1) // So we will use the pseudo random generator.
fd = -1;
}
if (fd < 0) {
const char random_chars[] = "0123456789abcdef-";
for (i = 0; i < sizeof(oterm->uuid) - 1; i++) {
oterm->uuid[i] = random_chars[rand() % sizeof(random_chars)];
}
}
oterm->uuid[sizeof(oterm->uuid) - 1] = 0;
ONION_DEBUG("New UUID for this terminal is %s", oterm->uuid);
}
oterm->buffer = calloc(1, BUFFER_SIZE);
oterm->buffer_pos = 0;
pthread_mutex_init(&oterm->mutex, NULL);
pthread_cond_init(&oterm->dataReady, NULL);
ONION_DEBUG("Creating new terminal, exec %s (%s)", data->exec_command,
command_name);
oterm->pid = forkpty(&oterm->fd, NULL, NULL, NULL);
if (oterm->pid == 0) { // on child
// Copy env vars.
char **envs =
malloc(sizeof(char *) *
(1 + ONION_CLEARENV_COUNT + ONION_EXTRAENV_COUNT));
int i, j = 0;
for (i = 0; i < ONION_CLEARENV_COUNT; i++) {
const char *env = onion_clearenvs[i];
const char *val = getenv(env);
if (val) {
int l = strlen(env) + 1 + strlen(val) + 1;
envs[j] = malloc(l);
sprintf(envs[j], "%s=%s", env, val);
j++;
}
}
for (i = 0; i < ONION_EXTRAENV_COUNT; i++) {
envs[j] = strdup(onion_extraenvs[i]);
j++;
}
envs[j] = NULL;
// Change personality to that user
if (impersonate) {
struct passwd *pw;
pw = getpwnam(username);
int error;
if (!pw) {
ONION_ERROR("Cant find user to drop priviledges: %s", username);
exit(1);
} else {
error = setgid(pw->pw_gid);
error |= setuid(pw->pw_uid);
}
if (error) {
ONION_ERROR("Cant set the uid/gid for user %s", username);
exit(1);
}
}
for (i = 3; i < 256; i++) // Force close file descriptors. Dirty but it works.
close(i);
int ok = execle(data->exec_command, command_name, NULL, envs);
fprintf(stderr, "%s:%d Could not exec shell: %d\n", __FILE__, __LINE__, ok);
perror("");
exit(1);
}
oterm->title = strdup(data->exec_command);
ONION_DEBUG("Default title is %s", oterm->title);
oterm->next = NULL;
// I set myself at end
pthread_mutex_lock(&session->head_mutex);
if (!session->head)
session->head = oterm;
else {
process *next = session->head;
while (next->next)
next = next->next;
next->next = oterm;
}
onion_poller_slot *sl =
onion_poller_slot_new(oterm->fd, (void *)oterm_data_ready, oterm);
onion_poller_add(onion_get_poller(data->onion), sl);
pthread_mutex_unlock(&session->head_mutex);
onion_dict_add(data->processes, oterm->uuid, oterm, 0);
return oterm;
}
/// Checks if the process is running and if it stopped, set name
static void oterm_check_running(process * n) {
if (n->pid == -1)
return; // Already dead
int changed = waitpid(n->pid, NULL, WNOHANG);
if (changed) {
free(n->title);
n->title = strdup("- Finished -");
n->pid = -1;
}
}
/// Returns the status of all known terminals.
int oterm_status(oterm_session * session, onion_request * req,
onion_response * res) {
onion_dict *status = onion_dict_new();
if (session) {
pthread_mutex_lock(&session->head_mutex);
process *n = session->head;
int i = 1;
while (n) {
oterm_check_running(n);
char *id = malloc(6);
sprintf(id, "%d", i);
onion_dict *term = onion_dict_new();
onion_dict_add(term, "title", n->title, OD_DUP_VALUE);
onion_dict_add(term, "uuid", n->uuid, 0);
onion_dict_add(status, id, term, OD_DICT | OD_FREE_ALL);
// Just a check, here is ok, no hanging childs
n = n->next;
i++;
}
pthread_mutex_unlock(&session->head_mutex);
}
return onion_shortcut_response_json(status, req, res);
}
/// Input data to the process
int oterm_in(process * p, onion_request * req, onion_response * res) {
oterm_check_running(p);
const char *data;
data = onion_request_get_post(req, "type");
ssize_t w;
if (data) {
//fprintf(stderr,"%s:%d write %ld bytes\n",__FILE__,__LINE__,strlen(data));
size_t r = strlen(data);
w = write(p->fd, data, r);
if (w != r) {
ONION_WARNING
("Error writing data to process. Not all data written. (%d).", w);
return onion_shortcut_response("Error", HTTP_INTERNAL_ERROR, req, res);
}
}
return onion_shortcut_response("OK", HTTP_OK, req, res);
}
/// Resize the window.
int oterm_resize(process * p, onion_request * req, onion_response * res) {
//const char *data=onion_request_get_query(req,"resize");
//int ok=kill(o->pid, SIGWINCH);
struct winsize winSize;
memset(&winSize, 0, sizeof(winSize));
const char *t = onion_request_get_post(req, "width");
winSize.ws_row = (unsigned short)atoi(t ? t : "80");
t = onion_request_get_post(req, "height");
winSize.ws_col = (unsigned short)atoi(t ? t : "25");
int ok = ioctl(p->fd, TIOCSWINSZ, (char *)&winSize) == 0;
if (ok)
return onion_shortcut_response("OK", HTTP_OK, req, res);
else
return onion_shortcut_response("Error", HTTP_INTERNAL_ERROR, req, res);
}
/// Sets internally the window title, for reference.
int oterm_title(process * p, onion_request * req, onion_response * res) {
const char *t = onion_request_get_post(req, "title");
if (!t)
return onion_shortcut_response("Error, must set title", HTTP_INTERNAL_ERROR,
req, res);
if (p->title)
free(p->title);
p->title = strdup(t);
ONION_DEBUG("Set term %d title %s", p->pid, p->title);
return onion_shortcut_response("OK", HTTP_OK, req, res);
}
/// Gets the output data
int oterm_out(process * o, onion_request * req, onion_response * res) {
pthread_mutex_lock(&o->mutex);
if (onion_request_get_query(req, "initial")) {
if (o->buffer[BUFFER_SIZE - 1] != 0) { // If 0 then never wrote on it. So if not, write from pos to end too, first.
onion_response_write(res, &o->buffer[o->buffer_pos],
BUFFER_SIZE - o->buffer_pos);
}
onion_response_write(res, o->buffer, o->buffer_pos);
onion_response_printf(res, "\033]oterm;%d;", o->buffer_pos);
onion_response_printf(res, "\033]url;https://localhost:8080/uuid/%s/;",
o->uuid);
pthread_mutex_unlock(&o->mutex);
return OCS_PROCESSED;
}
int16_t p = atoi(onion_request_get_queryd(req, "pos", "0")); //o->buffer_pos;
ONION_DEBUG("Wait for data at %d", p);
while (p == o->buffer_pos) // We need it to be diferent, if not does not make sense to wake up
pthread_cond_wait(&o->dataReady, &o->mutex);
ONION_DEBUG("Data ready at %d (waiting from %d)", o->buffer_pos, p);
if (o->buffer_pos < p) {
onion_response_write(res, &o->buffer[p], BUFFER_SIZE - p);
p = 0;
}
onion_response_write(res, &o->buffer[p], o->buffer_pos - p);
onion_response_printf(res, "\033]oterm;%d;", o->buffer_pos);
pthread_mutex_unlock(&o->mutex);
return OCS_PROCESSED;
}
/**
* @short The onion main poller has some data ready.
*
*/
static int oterm_data_ready(process * o) {
// read data, if any. Else return inmediately empty.
char buffer[4096];
int n = 0; // -O2 complains of maybe used uninitialized
n = read(o->fd, buffer, sizeof(buffer));
if (n < 0)
return n;
pthread_mutex_lock(&o->mutex);
// Store on buffer
const char *data = buffer;
int sd = n;
if (sd > BUFFER_SIZE) {
data = &data[sd - BUFFER_SIZE];
sd = BUFFER_SIZE;
}
if (o->buffer_pos + sd > BUFFER_SIZE) {
memcpy(&o->buffer[o->buffer_pos], data, BUFFER_SIZE - o->buffer_pos);
data = &data[BUFFER_SIZE - o->buffer_pos];
sd = sd - (BUFFER_SIZE - o->buffer_pos);
o->buffer_pos = 0;
}
memcpy(&o->buffer[o->buffer_pos], data, sd);
o->buffer_pos += sd;
pthread_mutex_unlock(&o->mutex);
pthread_cond_broadcast(&o->dataReady);
return 0;
}
/// Terminates all processes, and frees the memory.
void oterm_session_free(oterm_session * o) {
process *p = o->head;
process *t;
while (p) {
kill(p->pid, SIGTERM);
close(p->fd);
free(p->buffer);
t = p;
p = p->next;
free(t);
}
pthread_mutex_destroy(&o->head_mutex);
free(o);
}
void oterm_session_free_dict_helper(void *data, const char *key,
const void *value, int flags) {
oterm_session_free((oterm_session *) value);
}
/// Creates a new session, one per user at oterm_data->sessions.
oterm_session *oterm_session_new() {
oterm_session *oterm = malloc(sizeof(oterm_session));
pthread_mutex_init(&oterm->head_mutex, NULL);
oterm->head = NULL;
return oterm;
}
/// Frees memory used by the handler
void oterm_free(oterm_data * data) {
// Free all sessions.
onion_dict_preorder(data->sessions, oterm_session_free_dict_helper, NULL);
onion_dict_free(data->sessions);
onion_dict_free(data->processes);
free(data->exec_command);
free(data);
}
/// Prepares the oterm handler
onion_handler *oterm_handler(onion * o, const char *exec_command) {
oterm_data *data = malloc(sizeof(oterm_data));
data->sessions = onion_dict_new();
data->processes = onion_dict_new();
data->exec_command = strdup(exec_command);
data->onion = o;
return onion_handler_new((void *)oterm_get_data, data, (void *)oterm_free);
}
|
726873.c | /*
* FreeRTOS V202012.00
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
BASIC INTERRUPT DRIVEN SERIAL PORT DRIVER FOR USART0.
This file contains all the serial port components that can be compiled to
either ARM or THUMB mode. Components that must be compiled to ARM mode are
contained in serialISR.c.
*/
/* Standard includes. */
#include <stdlib.h>
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "queue.h"
#include "task.h"
/* Demo application includes. */
#include "serial.h"
#include "AT91R40008.h"
#include "usart.h"
#include "pio.h"
#include "aic.h"
/*-----------------------------------------------------------*/
/* Constants to setup and access the UART. */
#define portUSART0_AIC_CHANNEL ( ( unsigned long ) 2 )
#define serINVALID_QUEUE ( ( QueueHandle_t ) 0 )
#define serHANDLE ( ( xComPortHandle ) 1 )
#define serNO_BLOCK ( ( TickType_t ) 0 )
/*-----------------------------------------------------------*/
/* Queues used to hold received characters, and characters waiting to be
transmitted. */
static QueueHandle_t xRxedChars;
static QueueHandle_t xCharsForTx;
/*-----------------------------------------------------------*/
/*
* The queues are created in serialISR.c as they are used from the ISR.
* Obtain references to the queues and THRE Empty flag.
*/
extern void vSerialISRCreateQueues( unsigned portBASE_TYPE uxQueueLength, QueueHandle_t *pxRxedChars, QueueHandle_t *pxCharsForTx );
/*-----------------------------------------------------------*/
xComPortHandle xSerialPortInitMinimal( unsigned long ulWantedBaud, unsigned portBASE_TYPE uxQueueLength )
{
unsigned long ulSpeed;
unsigned long ulCD;
xComPortHandle xReturn = serHANDLE;
extern void ( vUART_ISR_Wrapper )( void );
/* The queues are used in the serial ISR routine, so are created from
serialISR.c (which is always compiled to ARM mode. */
vSerialISRCreateQueues( uxQueueLength, &xRxedChars, &xCharsForTx );
if(
( xRxedChars != serINVALID_QUEUE ) &&
( xCharsForTx != serINVALID_QUEUE ) &&
( ulWantedBaud != ( unsigned long ) 0 )
)
{
portENTER_CRITICAL();
{
/* Enable clock to USART0... */
AT91C_BASE_PS->PS_PCER = AT91C_PS_US0;
/* Disable all USART0 interrupt sources to begin... */
AT91C_BASE_US0->US_IDR = 0xFFFFFFFF;
/* Reset various status bits (just in case)... */
AT91C_BASE_US0->US_CR = US_RSTSTA;
AT91C_BASE_PIO->PIO_PDR = TXD0 | RXD0; /* Enable RXD and TXD pins */
AT91C_BASE_US0->US_CR = US_RSTRX | US_RSTTX | US_RXDIS | US_TXDIS;
/* Clear Transmit and Receive Counters */
AT91C_BASE_US0->US_RCR = 0;
AT91C_BASE_US0->US_TCR = 0;
/* Input clock to baud rate generator is MCK */
ulSpeed = configCPU_CLOCK_HZ * 10;
ulSpeed = ulSpeed / 16;
ulSpeed = ulSpeed / ulWantedBaud;
/* compute the error */
ulCD = ulSpeed / 10;
if ((ulSpeed - (ulCD * 10)) >= 5)
ulCD++;
/* Define the baud rate divisor register */
AT91C_BASE_US0->US_BRGR = ulCD;
/* Define the USART mode */
AT91C_BASE_US0->US_MR = US_CLKS_MCK | US_CHRL_8 | US_PAR_NO | US_NBSTOP_1 | US_CHMODE_NORMAL;
/* Write the Timeguard Register */
AT91C_BASE_US0->US_TTGR = 0;
/* Setup the interrupt for USART0.
Store interrupt handler function address in USART0 vector register... */
AT91C_BASE_AIC->AIC_SVR[ portUSART0_AIC_CHANNEL ] = (unsigned long)vUART_ISR_Wrapper;
/* USART0 interrupt level-sensitive, priority 1... */
AT91C_BASE_AIC->AIC_SMR[ portUSART0_AIC_CHANNEL ] = AIC_SRCTYPE_INT_LEVEL_SENSITIVE | 1;
/* Clear some pending USART0 interrupts (just in case)... */
AT91C_BASE_US0->US_CR = US_RSTSTA;
/* Enable USART0 interrupt sources (but not Tx for now)... */
AT91C_BASE_US0->US_IER = US_RXRDY;
/* Enable USART0 interrupts in the AIC... */
AT91C_BASE_AIC->AIC_IECR = ( 1 << portUSART0_AIC_CHANNEL );
/* Enable receiver and transmitter... */
AT91C_BASE_US0->US_CR = US_RXEN | US_TXEN;
}
portEXIT_CRITICAL();
}
else
{
xReturn = ( xComPortHandle ) 0;
}
return xReturn;
}
/*-----------------------------------------------------------*/
signed portBASE_TYPE xSerialGetChar( xComPortHandle pxPort, signed char *pcRxedChar, TickType_t xBlockTime )
{
/* The port handle is not required as this driver only supports UART0. */
( void ) pxPort;
/* Get the next character from the buffer. Return false if no characters
are available, or arrive before xBlockTime expires. */
if( xQueueReceive( xRxedChars, pcRxedChar, xBlockTime ) )
{
return pdTRUE;
}
else
{
return pdFALSE;
}
}
/*-----------------------------------------------------------*/
void vSerialPutString( xComPortHandle pxPort, const signed char * const pcString, unsigned short usStringLength )
{
signed char *pxNext;
/* NOTE: This implementation does not handle the queue being full as no
block time is used! */
/* The port handle is not required as this driver only supports UART0. */
( void ) pxPort;
( void ) usStringLength;
/* Send each character in the string, one at a time. */
pxNext = ( signed char * ) pcString;
while( *pxNext )
{
xSerialPutChar( pxPort, *pxNext, serNO_BLOCK );
pxNext++;
}
}
/*-----------------------------------------------------------*/
signed portBASE_TYPE xSerialPutChar( xComPortHandle pxPort, signed char cOutChar, TickType_t xBlockTime )
{
( void ) pxPort;
/* Place the character in the queue of characters to be transmitted. */
if( xQueueSend( xCharsForTx, &cOutChar, xBlockTime ) != pdPASS )
{
return pdFAIL;
}
/* Turn on the Tx interrupt so the ISR will remove the character from the
queue and send it. This does not need to be in a critical section as
if the interrupt has already removed the character the next interrupt
will simply turn off the Tx interrupt again. */
AT91C_BASE_US0->US_IER = US_TXRDY;
return pdPASS;
}
/*-----------------------------------------------------------*/
void vSerialClose( xComPortHandle xPort )
{
/* Not supported as not required by the demo application. */
( void ) xPort;
}
/*-----------------------------------------------------------*/
|
99555.c | inherit "room/room";
reset(arg) {
add_exit("east", "105");
add_exit("southwest", "102");
short_desc = "A small snowy tunnel";
long_desc = "Soft breeze of cold air floats through this part of the tunnel system.\n" +
"The freezing air seems to be flooding into the room through a tiny hole\n" +
"near the ceiling. Some light is also luminating into the room through the\n" +
"hole. The ceiling of the tunnel is concave-shaped, and from the center, it\n" +
"reaches in the height of eight feet. Some icicles are hanging from the\n" +
"ceiling. All around the tunnel floor can be seen lying lots of thin snow.\n" +
"The small tunnel continues to east. In southwest can be seen a small opening.\n";
items = allocate(4);
items[0] = "hole";
items[1] = "A small hole near the ceiling of the tunnel seems to penetrate the mountain\n" +
"all the way to the surface. From the hole is shimmering a weak light and\n" +
"some breezes of cold air flood into this part of the tunnel through it";
items[2] = "snow";
items[3] = "The snow in this part of the tunnel seems to be very fine and thin. The soft\n" +
"snow feels comfortable under your feet and no footprints can be recognized\n" +
"from the fine mass of snow even when examining more carefully";
}
init() {
::init();
call_out("icedrop", 1);
}
icedrop() {
if(present(this_player())) call_out("icedrop", 3);
else return 1;
if(random(200)) return;
tell_room(this_object(), "A sharp icicle falls down from the ceiling.\n");
if(!random(2)) {
tell_object(this_player(), "You grunt in pain as the icicle hits you!\n");
this_player()->hit_with_spell(random(31) + 1);
say("An icicle hits " + this_player()->query_name() + "!\n");
}
else {
tell_room(this_object(), "The icicle breaks into pieces as it lands on the ground.\n");
}
return 1;
} |
7287.c | #ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <numa.h>
#include <sched.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <assert.h>
#include "mtcp_api.h"
#define MAX_FILE_NAME 1024
/*----------------------------------------------------------------------------*/
int
GetNumCPUs()
{
return sysconf(_SC_NPROCESSORS_ONLN);
}
/*----------------------------------------------------------------------------*/
pid_t
Gettid()
{
return syscall(__NR_gettid);
}
/*----------------------------------------------------------------------------*/
int
mtcp_core_affinitize(int cpu)
{
cpu_set_t cpus;
struct bitmask *bmask;
FILE *fp;
char sysfname[MAX_FILE_NAME];
int phy_id;
size_t n;
int ret;
int unused;
n = GetNumCPUs();
if (cpu < 0 || cpu >= (int) n) {
errno = -EINVAL;
return -1;
}
CPU_ZERO(&cpus);
CPU_SET((unsigned)cpu, &cpus);
ret = sched_setaffinity(Gettid(), sizeof(cpus), &cpus);
if (numa_max_node() == 0)
return ret;
bmask = numa_bitmask_alloc(n);
assert(bmask);
/* read physical id of the core from sys information */
snprintf(sysfname, MAX_FILE_NAME - 1,
"/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
fp = fopen(sysfname, "r");
if (!fp) {
perror(sysfname);
errno = EFAULT;
return -1;
}
unused = fscanf(fp, "%d", &phy_id);
numa_bitmask_setbit(bmask, phy_id);
numa_set_membind(bmask);
numa_bitmask_free(bmask);
fclose(fp);
UNUSED(unused);
return ret;
}
|
972128.c | /*
* Generated by asn1c-0.9.24 (http://lionet.info/asn1c)
* From ASN.1 module "S1AP-IEs"
* found in "/root/openair-cn/SRC/S1AP/MESSAGES/ASN1/R10.5/S1AP-IEs.asn"
* `asn1c -gen-PER`
*/
#include "S1ap-CompletedCellinEAI-Item.h"
static asn_TYPE_member_t asn_MBR_S1ap_CompletedCellinEAI_Item_1[] = {
{ ATF_NOFLAGS, 0, offsetof(struct S1ap_CompletedCellinEAI_Item, eCGI),
(ASN_TAG_CLASS_CONTEXT | (0 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_S1ap_EUTRAN_CGI,
0, /* Defer constraints checking to the member type */
0, /* No PER visible constraints */
0,
"eCGI"
},
{ ATF_POINTER, 1, offsetof(struct S1ap_CompletedCellinEAI_Item, iE_Extensions),
(ASN_TAG_CLASS_CONTEXT | (1 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_S1ap_IE_Extensions,
0, /* Defer constraints checking to the member type */
0, /* No PER visible constraints */
0,
"iE-Extensions"
},
};
static int asn_MAP_S1ap_CompletedCellinEAI_Item_oms_1[] = { 1 };
static ber_tlv_tag_t asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1[] = {
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static asn_TYPE_tag2member_t asn_MAP_S1ap_CompletedCellinEAI_Item_tag2el_1[] = {
{ (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 }, /* eCGI at 418 */
{ (ASN_TAG_CLASS_CONTEXT | (1 << 2)), 1, 0, 0 } /* iE-Extensions at 419 */
};
static asn_SEQUENCE_specifics_t asn_SPC_S1ap_CompletedCellinEAI_Item_specs_1 = {
sizeof(struct S1ap_CompletedCellinEAI_Item),
offsetof(struct S1ap_CompletedCellinEAI_Item, _asn_ctx),
asn_MAP_S1ap_CompletedCellinEAI_Item_tag2el_1,
2, /* Count of tags in the map */
asn_MAP_S1ap_CompletedCellinEAI_Item_oms_1, /* Optional members */
1, 0, /* Root/Additions */
1, /* Start extensions */
3 /* Stop extensions */
};
asn_TYPE_descriptor_t asn_DEF_S1ap_CompletedCellinEAI_Item = {
"S1ap-CompletedCellinEAI-Item",
"S1ap-CompletedCellinEAI-Item",
SEQUENCE_free,
SEQUENCE_print,
SEQUENCE_constraint,
SEQUENCE_decode_ber,
SEQUENCE_encode_der,
SEQUENCE_decode_xer,
SEQUENCE_encode_xer,
SEQUENCE_decode_uper,
SEQUENCE_encode_uper,
SEQUENCE_decode_aper,
SEQUENCE_encode_aper,
SEQUENCE_compare,
0, /* Use generic outmost tag fetcher */
asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1,
sizeof(asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1)
/sizeof(asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1[0]), /* 1 */
asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1, /* Same as above */
sizeof(asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1)
/sizeof(asn_DEF_S1ap_CompletedCellinEAI_Item_tags_1[0]), /* 1 */
0, /* No PER visible constraints */
asn_MBR_S1ap_CompletedCellinEAI_Item_1,
2, /* Elements count */
&asn_SPC_S1ap_CompletedCellinEAI_Item_specs_1 /* Additional specs */
};
|
680044.c | /*
* Crystals (working title)
*
* Copyright (c) 2010, 2011 Matt Windsor, Michael Walker and Alexander
* Preisinger.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * The names of contributors may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* AFOREMENTIONED COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file src/field/object.c
* @author Matt Windsor
* @brief Low-level object functions.
*/
#include "../crystals.h"
/* -- STATIC DECLARATIONS -- */
/**
* Check to see whether the given object falls within the given dirty
* rectangle and, if so, mark the object as dirty.
*
* @param object The object to test.
* @param rect_pointer Pointer to the dirty rectangle to test.
*/
static void dirty_object_test_post_check (object_t *object,
dirty_rectangle_t *rect);
/* -- DEFINITIONS -- */
/* Changes the tag associated with an object. */
void
set_object_tag (object_t *object, layer_tag_t tag)
{
g_assert (object != NULL);
object->tag = tag;
}
/* Gets the graphic associated with an object. */
object_image_t *
get_object_image (object_t *object)
{
g_assert (object != NULL);
return object->image;
}
/* Changes the graphic associated with an object. */
void
set_object_image (object_t *object,
const char filename[],
int16_t image_x,
int16_t image_y, uint16_t width, uint16_t height)
{
g_assert (object && object->image && filename);
if (object->image->filename != NULL)
{
g_free (object->image->filename);
}
object->image->filename = g_strdup (filename);
g_assert (object->image->filename != NULL);
object->image->image_x = image_x;
object->image->image_y = image_y;
object->image->width = width;
object->image->height = height;
}
/* Retrieves the object's co-ordinates on-map. */
void
get_object_coordinates (object_t *object,
int32_t *x_pointer,
int32_t *y_pointer,
reference_point_t reference)
{
g_assert (object && object->image);
*x_pointer = object->image->map_x;
*y_pointer = object->image->map_y;
if (reference == BOTTOM_LEFT)
*y_pointer += (object->image->height - 1);
}
/* Sets the object's co-ordinates on map. */
void
set_object_coordinates (object_t *object,
int32_t x, int32_t y,
reference_point_t reference)
{
g_assert (object && object->image);
/* No point setting coordinates if they're the same. */
if (object->image->map_x == x && object->image->map_y == y)
return;
object->image->map_x = x;
object->image->map_y = y;
if (reference == BOTTOM_LEFT)
{
/* Check to see if the offset will send the object off the map. */
g_assert (object->image->map_y >= object->image->height - 1);
object->image->map_y -= (object->image->height - 1);
}
}
/* Marks an object as being dirty on the given map view. */
void
set_object_dirty (object_t *object, mapview_t *mapview)
{
g_assert (object && mapview);
/* If we're already dirty, no need to run this again. */
if (object->is_dirty)
return;
/* If the object has no image (the filename is NULL) then ignore the
dirty request. */
if (object->image->filename == NULL)
return;
/* Ensure the object's co-ordinates don't go over the map
width/height! */
g_assert ((object->image->map_x + object->image->width
<= mapview->map->width * TILE_W)
&& (object->image->map_y + object->image->height
<= mapview->map->height * TILE_H));
/* And now, the business end. */
if (object->tag != 0)
{
add_object_image (mapview, object);
object->is_dirty = true;
}
}
/* Frees an object and all associated data. */
void
free_object (object_t *object)
{
if (object)
{
if (object->name)
g_free (object->name);
if (object->script_filename)
g_free (object->script_filename);
if (object->image)
free_object_image (object->image);
free (object);
}
}
/* Check to see whether the given object falls within the given dirty
* rectangle and, if so, mark the object as dirty (if it has not been
* marked before).
*/
void
dirty_object_test (gpointer key_ptr,
gpointer object_ptr, gpointer rect_ptr)
{
object_t *object = (object_t *) object_ptr;
dirty_rectangle_t *rect = (dirty_rectangle_t *) rect_ptr;
(void) key_ptr; /* Avoid unused warnings */
g_assert (object && rect);
if (object->is_dirty)
return;
dirty_object_test_post_check (object, rect);
}
/* Check to see whether the given object falls within the given dirty
* rectangle and, if so, mark the object as dirty.
*/
static void
dirty_object_test_post_check (object_t *object,
dirty_rectangle_t *rect)
{
mapview_t *mapview = rect->parent;
int32_t rect_left = rect->start_x;
int32_t rect_top = rect->start_y;
int32_t rect_right = rect->start_x + rect->width - 1;
int32_t rect_bottom = rect->start_y + rect->height - 1;
int32_t object_left = object->image->map_x;
int32_t object_top = object->image->map_y;
int32_t object_right = object->image->map_x + object->image->width - 1;
int32_t object_bottom =
object->image->map_y + object->image->height - 1;
/* Use separating axis theorem, sort of, to decide whether the
* object rect and the dirty rect intersect.
*/
if ((object_left <= rect_right && object_right > rect_left)
&& (object_top <= rect_bottom && object_bottom > rect_top))
{
set_object_dirty (object, mapview);
}
}
|
309495.c | /** @file
Sample platform variable cleanup library implementation.
Copyright (c) 2015 - 2017, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "PlatVarCleanup.h"
VAR_ERROR_FLAG mLastVarErrorFlag = VAR_ERROR_FLAG_NO_ERROR;
EDKII_VAR_CHECK_PROTOCOL *mVarCheck = NULL;
///
/// The flag to indicate whether the platform has left the DXE phase of execution.
///
BOOLEAN mEndOfDxe = FALSE;
EFI_EVENT mPlatVarCleanupLibEndOfDxeEvent = NULL;
LIST_ENTRY mUserVariableList = INITIALIZE_LIST_HEAD_VARIABLE (mUserVariableList);
UINT16 mUserVariableCount = 0;
UINT16 mMarkedUserVariableCount = 0;
EFI_GUID mVariableCleanupHiiGuid = VARIABLE_CLEANUP_HII_GUID;
CHAR16 mVarStoreName[] = L"VariableCleanup";
HII_VENDOR_DEVICE_PATH mVarCleanupHiiVendorDevicePath = {
{
{
HARDWARE_DEVICE_PATH,
HW_VENDOR_DP,
{
(UINT8) (sizeof (VENDOR_DEVICE_PATH)),
(UINT8) ((sizeof (VENDOR_DEVICE_PATH)) >> 8)
}
},
VARIABLE_CLEANUP_HII_GUID
},
{
END_DEVICE_PATH_TYPE,
END_ENTIRE_DEVICE_PATH_SUBTYPE,
{
(UINT8) (sizeof (EFI_DEVICE_PATH_PROTOCOL)),
(UINT8) ((sizeof (EFI_DEVICE_PATH_PROTOCOL)) >> 8)
}
}
};
/**
Internal get variable error flag.
@return Variable error flag.
**/
VAR_ERROR_FLAG
InternalGetVarErrorFlag (
VOID
)
{
EFI_STATUS Status;
UINTN Size;
VAR_ERROR_FLAG ErrorFlag;
Size = sizeof (ErrorFlag);
Status = gRT->GetVariable (
VAR_ERROR_FLAG_NAME,
&gEdkiiVarErrorFlagGuid,
NULL,
&Size,
&ErrorFlag
);
if (EFI_ERROR (Status)) {
DEBUG ((EFI_D_INFO, "%s - not found\n", VAR_ERROR_FLAG_NAME));
return VAR_ERROR_FLAG_NO_ERROR;
}
return ErrorFlag;
}
/**
Is user variable?
@param[in] Name Pointer to variable name.
@param[in] Guid Pointer to vendor guid.
@retval TRUE User variable.
@retval FALSE System variable.
**/
BOOLEAN
IsUserVariable (
IN CHAR16 *Name,
IN EFI_GUID *Guid
)
{
EFI_STATUS Status;
VAR_CHECK_VARIABLE_PROPERTY Property;
if (mVarCheck == NULL) {
gBS->LocateProtocol (
&gEdkiiVarCheckProtocolGuid,
NULL,
(VOID **) &mVarCheck
);
}
ASSERT (mVarCheck != NULL);
ZeroMem (&Property, sizeof (Property));
Status = mVarCheck->VariablePropertyGet (
Name,
Guid,
&Property
);
if (EFI_ERROR (Status)) {
//
// No property, it is user variable.
//
DEBUG ((EFI_D_INFO, "PlatformVarCleanup - User variable: %g:%s\n", Guid, Name));
return TRUE;
}
// DEBUG ((EFI_D_INFO, "PlatformVarCleanup - Variable Property: %g:%s\n", Guid, Name));
// DEBUG ((EFI_D_INFO, " Revision - 0x%04x\n", Property.Revision));
// DEBUG ((EFI_D_INFO, " Property - 0x%04x\n", Property.Property));
// DEBUG ((EFI_D_INFO, " Attribute - 0x%08x\n", Property.Attributes));
// DEBUG ((EFI_D_INFO, " MinSize - 0x%x\n", Property.MinSize));
// DEBUG ((EFI_D_INFO, " MaxSize - 0x%x\n", Property.MaxSize));
return FALSE;
}
/**
Find user variable node by variable GUID.
@param[in] Guid Pointer to vendor guid.
@return Pointer to user variable node.
**/
USER_VARIABLE_NODE *
FindUserVariableNodeByGuid (
IN EFI_GUID *Guid
)
{
USER_VARIABLE_NODE *UserVariableNode;
LIST_ENTRY *Link;
for (Link = mUserVariableList.ForwardLink
;Link != &mUserVariableList
;Link = Link->ForwardLink) {
UserVariableNode = USER_VARIABLE_FROM_LINK (Link);
if (CompareGuid (Guid, &UserVariableNode->Guid)) {
//
// Found it.
//
return UserVariableNode;
}
}
//
// Create new one if not found.
//
UserVariableNode = AllocateZeroPool (sizeof (*UserVariableNode));
ASSERT (UserVariableNode != NULL);
UserVariableNode->Signature = USER_VARIABLE_NODE_SIGNATURE;
CopyGuid (&UserVariableNode->Guid, Guid);
//
// (36 chars of "########-####-####-####-############" + 1 space + 1 terminator) * sizeof (CHAR16).
//
UserVariableNode->PromptString = AllocatePool ((36 + 2) * sizeof (CHAR16));
ASSERT (UserVariableNode->PromptString != NULL);
UnicodeSPrint (UserVariableNode->PromptString, (36 + 2) * sizeof (CHAR16), L" %g", &UserVariableNode->Guid);
InitializeListHead (&UserVariableNode->NameLink);
InsertTailList (&mUserVariableList, &UserVariableNode->Link);
return UserVariableNode;
}
/**
Create user variable node.
**/
VOID
CreateUserVariableNode (
VOID
)
{
EFI_STATUS Status;
EFI_STATUS GetVariableStatus;
CHAR16 *VarName;
UINTN MaxVarNameSize;
UINTN VarNameSize;
UINTN MaxDataSize;
UINTN DataSize;
VOID *Data;
UINT32 Attributes;
EFI_GUID Guid;
USER_VARIABLE_NODE *UserVariableNode;
USER_VARIABLE_NAME_NODE *UserVariableNameNode;
UINT16 Index;
UINTN StringSize;
//
// Initialize 128 * sizeof (CHAR16) variable name size.
//
MaxVarNameSize = 128 * sizeof (CHAR16);
VarName = AllocateZeroPool (MaxVarNameSize);
ASSERT (VarName != NULL);
//
// Initialize 0x1000 variable data size.
//
MaxDataSize = 0x1000;
Data = AllocateZeroPool (MaxDataSize);
ASSERT (Data != NULL);
Index = 0;
do {
VarNameSize = MaxVarNameSize;
Status = gRT->GetNextVariableName (&VarNameSize, VarName, &Guid);
if (Status == EFI_BUFFER_TOO_SMALL) {
VarName = ReallocatePool (MaxVarNameSize, VarNameSize, VarName);
ASSERT (VarName != NULL);
MaxVarNameSize = VarNameSize;
Status = gRT->GetNextVariableName (&VarNameSize, VarName, &Guid);
}
if (!EFI_ERROR (Status)) {
if (IsUserVariable (VarName, &Guid)) {
DataSize = MaxDataSize;
GetVariableStatus = gRT->GetVariable (VarName, &Guid, &Attributes, &DataSize, Data);
if (GetVariableStatus == EFI_BUFFER_TOO_SMALL) {
Data = ReallocatePool (MaxDataSize, DataSize, Data);
ASSERT (Data != NULL);
MaxDataSize = DataSize;
GetVariableStatus = gRT->GetVariable (VarName, &Guid, &Attributes, &DataSize, Data);
}
ASSERT_EFI_ERROR (GetVariableStatus);
if ((Attributes & EFI_VARIABLE_NON_VOLATILE) != 0) {
UserVariableNode = FindUserVariableNodeByGuid (&Guid);
ASSERT (UserVariableNode != NULL);
//
// Different variables that have same variable GUID share same user variable node.
//
UserVariableNameNode = AllocateZeroPool (sizeof (*UserVariableNameNode));
ASSERT (UserVariableNameNode != NULL);
UserVariableNameNode->Signature = USER_VARIABLE_NAME_NODE_SIGNATURE;
UserVariableNameNode->Name = AllocateCopyPool (VarNameSize, VarName);
UserVariableNameNode->Attributes = Attributes;
UserVariableNameNode->DataSize = DataSize;
UserVariableNameNode->Index = Index;
UserVariableNameNode->QuestionId = (EFI_QUESTION_ID) (USER_VARIABLE_QUESTION_ID + Index);
//
// 2 space * sizeof (CHAR16) + StrSize.
//
StringSize = 2 * sizeof (CHAR16) + StrSize (UserVariableNameNode->Name);
UserVariableNameNode->PromptString = AllocatePool (StringSize);
ASSERT (UserVariableNameNode->PromptString != NULL);
UnicodeSPrint (UserVariableNameNode->PromptString, StringSize, L" %s", UserVariableNameNode->Name);
//
// (33 chars of "Attribtues = 0x and DataSize = 0x" + 1 terminator + (sizeof (UINT32) + sizeof (UINTN)) * 2) * sizeof (CHAR16).
//
StringSize = (33 + 1 + (sizeof (UINT32) + sizeof (UINTN)) * 2) * sizeof (CHAR16);
UserVariableNameNode->HelpString = AllocatePool (StringSize);
ASSERT (UserVariableNameNode->HelpString != NULL);
UnicodeSPrint (UserVariableNameNode->HelpString, StringSize, L"Attribtues = 0x%08x and DataSize = 0x%x", UserVariableNameNode->Attributes, UserVariableNameNode->DataSize);
UserVariableNameNode->Deleted = FALSE;
InsertTailList (&UserVariableNode->NameLink, &UserVariableNameNode->Link);
Index++;
}
}
}
} while (Status != EFI_NOT_FOUND);
mUserVariableCount = Index;
ASSERT (mUserVariableCount <= MAX_USER_VARIABLE_COUNT);
DEBUG ((EFI_D_INFO, "PlatformVarCleanup - User variable count: 0x%04x\n", mUserVariableCount));
FreePool (VarName);
FreePool (Data);
}
/**
Destroy user variable nodes.
**/
VOID
DestroyUserVariableNode (
VOID
)
{
USER_VARIABLE_NODE *UserVariableNode;
LIST_ENTRY *Link;
USER_VARIABLE_NAME_NODE *UserVariableNameNode;
LIST_ENTRY *NameLink;
while (mUserVariableList.ForwardLink != &mUserVariableList) {
Link = mUserVariableList.ForwardLink;
UserVariableNode = USER_VARIABLE_FROM_LINK (Link);
RemoveEntryList (&UserVariableNode->Link);
while (UserVariableNode->NameLink.ForwardLink != &UserVariableNode->NameLink) {
NameLink = UserVariableNode->NameLink.ForwardLink;
UserVariableNameNode = USER_VARIABLE_NAME_FROM_LINK (NameLink);
RemoveEntryList (&UserVariableNameNode->Link);
FreePool (UserVariableNameNode->Name);
FreePool (UserVariableNameNode->PromptString);
FreePool (UserVariableNameNode->HelpString);
FreePool (UserVariableNameNode);
}
FreePool (UserVariableNode->PromptString);
FreePool (UserVariableNode);
}
}
/**
Create a time based data payload by concatenating the EFI_VARIABLE_AUTHENTICATION_2
descriptor with the input data. NO authentication is required in this function.
@param[in, out] DataSize On input, the size of Data buffer in bytes.
On output, the size of data returned in Data
buffer in bytes.
@param[in, out] Data On input, Pointer to data buffer to be wrapped or
pointer to NULL to wrap an empty payload.
On output, Pointer to the new payload date buffer allocated from pool,
it's caller's responsibility to free the memory after using it.
@retval EFI_SUCCESS Create time based payload successfully.
@retval EFI_OUT_OF_RESOURCES There are not enough memory resourses to create time based payload.
@retval EFI_INVALID_PARAMETER The parameter is invalid.
@retval Others Unexpected error happens.
**/
EFI_STATUS
CreateTimeBasedPayload (
IN OUT UINTN *DataSize,
IN OUT UINT8 **Data
)
{
EFI_STATUS Status;
UINT8 *NewData;
UINT8 *Payload;
UINTN PayloadSize;
EFI_VARIABLE_AUTHENTICATION_2 *DescriptorData;
UINTN DescriptorSize;
EFI_TIME Time;
if (Data == NULL || DataSize == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// At user physical presence, the variable does not need to be signed but the
// parameters to the SetVariable() call still need to be prepared as authenticated
// variable. So we create EFI_VARIABLE_AUTHENTICATED_2 descriptor without certificate
// data in it.
//
Payload = *Data;
PayloadSize = *DataSize;
DescriptorSize = OFFSET_OF (EFI_VARIABLE_AUTHENTICATION_2, AuthInfo) + OFFSET_OF (WIN_CERTIFICATE_UEFI_GUID, CertData);
NewData = (UINT8 *) AllocateZeroPool (DescriptorSize + PayloadSize);
if (NewData == NULL) {
return EFI_OUT_OF_RESOURCES;
}
if ((Payload != NULL) && (PayloadSize != 0)) {
CopyMem (NewData + DescriptorSize, Payload, PayloadSize);
}
DescriptorData = (EFI_VARIABLE_AUTHENTICATION_2 *) (NewData);
ZeroMem (&Time, sizeof (EFI_TIME));
Status = gRT->GetTime (&Time, NULL);
if (EFI_ERROR (Status)) {
FreePool (NewData);
return Status;
}
Time.Pad1 = 0;
Time.Nanosecond = 0;
Time.TimeZone = 0;
Time.Daylight = 0;
Time.Pad2 = 0;
CopyMem (&DescriptorData->TimeStamp, &Time, sizeof (EFI_TIME));
DescriptorData->AuthInfo.Hdr.dwLength = OFFSET_OF (WIN_CERTIFICATE_UEFI_GUID, CertData);
DescriptorData->AuthInfo.Hdr.wRevision = 0x0200;
DescriptorData->AuthInfo.Hdr.wCertificateType = WIN_CERT_TYPE_EFI_GUID;
CopyGuid (&DescriptorData->AuthInfo.CertType, &gEfiCertPkcs7Guid);
if (Payload != NULL) {
FreePool (Payload);
}
*DataSize = DescriptorSize + PayloadSize;
*Data = NewData;
return EFI_SUCCESS;
}
/**
Create a counter based data payload by concatenating the EFI_VARIABLE_AUTHENTICATION
descriptor with the input data. NO authentication is required in this function.
@param[in, out] DataSize On input, the size of Data buffer in bytes.
On output, the size of data returned in Data
buffer in bytes.
@param[in, out] Data On input, Pointer to data buffer to be wrapped or
pointer to NULL to wrap an empty payload.
On output, Pointer to the new payload date buffer allocated from pool,
it's caller's responsibility to free the memory after using it.
@retval EFI_SUCCESS Create counter based payload successfully.
@retval EFI_OUT_OF_RESOURCES There are not enough memory resourses to create time based payload.
@retval EFI_INVALID_PARAMETER The parameter is invalid.
@retval Others Unexpected error happens.
**/
EFI_STATUS
CreateCounterBasedPayload (
IN OUT UINTN *DataSize,
IN OUT UINT8 **Data
)
{
EFI_STATUS Status;
UINT8 *NewData;
UINT8 *Payload;
UINTN PayloadSize;
EFI_VARIABLE_AUTHENTICATION *DescriptorData;
UINTN DescriptorSize;
UINT64 MonotonicCount;
if (Data == NULL || DataSize == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// At user physical presence, the variable does not need to be signed but the
// parameters to the SetVariable() call still need to be prepared as authenticated
// variable. So we create EFI_VARIABLE_AUTHENTICATED descriptor without certificate
// data in it.
//
Payload = *Data;
PayloadSize = *DataSize;
DescriptorSize = (OFFSET_OF (EFI_VARIABLE_AUTHENTICATION, AuthInfo)) + \
(OFFSET_OF (WIN_CERTIFICATE_UEFI_GUID, CertData)) + \
sizeof (EFI_CERT_BLOCK_RSA_2048_SHA256);
NewData = (UINT8 *) AllocateZeroPool (DescriptorSize + PayloadSize);
if (NewData == NULL) {
return EFI_OUT_OF_RESOURCES;
}
if ((Payload != NULL) && (PayloadSize != 0)) {
CopyMem (NewData + DescriptorSize, Payload, PayloadSize);
}
DescriptorData = (EFI_VARIABLE_AUTHENTICATION *) (NewData);
Status = gBS->GetNextMonotonicCount (&MonotonicCount);
if (EFI_ERROR (Status)) {
FreePool (NewData);
return Status;
}
DescriptorData->MonotonicCount = MonotonicCount;
DescriptorData->AuthInfo.Hdr.dwLength = OFFSET_OF (WIN_CERTIFICATE_UEFI_GUID, CertData) + sizeof (EFI_CERT_BLOCK_RSA_2048_SHA256);
DescriptorData->AuthInfo.Hdr.wRevision = 0x0200;
DescriptorData->AuthInfo.Hdr.wCertificateType = WIN_CERT_TYPE_EFI_GUID;
CopyGuid (&DescriptorData->AuthInfo.CertType, &gEfiCertTypeRsa2048Sha256Guid);
if (Payload != NULL) {
FreePool (Payload);
}
*DataSize = DescriptorSize + PayloadSize;
*Data = NewData;
return EFI_SUCCESS;
}
/**
Delete user variable.
@param[in] DeleteAll Delete all user variables.
@param[in] VariableCleanupData Pointer to variable cleanup data.
**/
VOID
DeleteUserVariable (
IN BOOLEAN DeleteAll,
IN VARIABLE_CLEANUP_DATA *VariableCleanupData OPTIONAL
)
{
EFI_STATUS Status;
USER_VARIABLE_NODE *UserVariableNode;
LIST_ENTRY *Link;
USER_VARIABLE_NAME_NODE *UserVariableNameNode;
LIST_ENTRY *NameLink;
UINTN DataSize;
UINT8 *Data;
for (Link = mUserVariableList.ForwardLink
;Link != &mUserVariableList
;Link = Link->ForwardLink) {
UserVariableNode = USER_VARIABLE_FROM_LINK (Link);
for (NameLink = UserVariableNode->NameLink.ForwardLink
;NameLink != &UserVariableNode->NameLink
;NameLink = NameLink->ForwardLink) {
UserVariableNameNode = USER_VARIABLE_NAME_FROM_LINK (NameLink);
if (!UserVariableNameNode->Deleted && (DeleteAll || ((VariableCleanupData != NULL) && (VariableCleanupData->UserVariable[UserVariableNameNode->Index] == TRUE)))) {
DEBUG ((EFI_D_INFO, "PlatformVarCleanup - Delete variable: %g:%s\n", &UserVariableNode->Guid, UserVariableNameNode->Name));
if ((UserVariableNameNode->Attributes & EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) != 0) {
DataSize = 0;
Data = NULL;
Status = CreateTimeBasedPayload (&DataSize, &Data);
if (!EFI_ERROR (Status)) {
Status = gRT->SetVariable (UserVariableNameNode->Name, &UserVariableNode->Guid, UserVariableNameNode->Attributes, DataSize, Data);
FreePool (Data);
}
} else if ((UserVariableNameNode->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) != 0) {
DataSize = 0;
Data = NULL;
Status = CreateCounterBasedPayload (&DataSize, &Data);
if (!EFI_ERROR (Status)) {
Status = gRT->SetVariable (UserVariableNameNode->Name, &UserVariableNode->Guid, UserVariableNameNode->Attributes, DataSize, Data);
FreePool (Data);
}
} else {
Status = gRT->SetVariable (UserVariableNameNode->Name, &UserVariableNode->Guid, 0, 0, NULL);
}
if (!EFI_ERROR (Status)) {
UserVariableNameNode->Deleted = TRUE;
} else {
DEBUG ((EFI_D_INFO, "PlatformVarCleanup - Delete variable fail: %g:%s\n", &UserVariableNode->Guid, UserVariableNameNode->Name));
}
}
}
}
}
/**
This function allows a caller to extract the current configuration for one
or more named elements from the target driver.
@param[in] This Points to the EFI_HII_CONFIG_ACCESS_PROTOCOL.
@param[in] Request A null-terminated Unicode string in <ConfigRequest> format.
@param[out] Progress On return, points to a character in the Request string.
Points to the string's null terminator if request was successful.
Points to the most recent '&' before the first failing name/value
pair (or the beginning of the string if the failure is in the
first name/value pair) if the request was not successful.
@param[out] Results A null-terminated Unicode string in <ConfigAltResp> format which
has all values filled in for the names in the Request string.
String to be allocated by the called function.
@retval EFI_SUCCESS The Results is filled with the requested values.
@retval EFI_OUT_OF_RESOURCES Not enough memory to store the results.
@retval EFI_INVALID_PARAMETER Request is illegal syntax, or unknown name.
@retval EFI_NOT_FOUND Routing data doesn't match any storage in this driver.
**/
EFI_STATUS
EFIAPI
VariableCleanupHiiExtractConfig (
IN CONST EFI_HII_CONFIG_ACCESS_PROTOCOL *This,
IN CONST EFI_STRING Request,
OUT EFI_STRING *Progress,
OUT EFI_STRING *Results
)
{
EFI_STATUS Status;
VARIABLE_CLEANUP_HII_PRIVATE_DATA *Private;
UINTN BufferSize;
EFI_STRING ConfigRequestHdr;
EFI_STRING ConfigRequest;
BOOLEAN AllocatedRequest;
UINTN Size;
if (Progress == NULL || Results == NULL) {
return EFI_INVALID_PARAMETER;
}
*Progress = Request;
if ((Request != NULL) && !HiiIsConfigHdrMatch (Request, &mVariableCleanupHiiGuid, mVarStoreName)) {
return EFI_NOT_FOUND;
}
ConfigRequestHdr = NULL;
ConfigRequest = NULL;
AllocatedRequest = FALSE;
Size = 0;
Private = VARIABLE_CLEANUP_HII_PRIVATE_FROM_THIS (This);
//
// Convert buffer data to <ConfigResp> by helper function BlockToConfig().
//
BufferSize = sizeof (VARIABLE_CLEANUP_DATA);
ConfigRequest = Request;
if ((Request == NULL) || (StrStr (Request, L"OFFSET") == NULL)) {
//
// Request has no request element, construct full request string.
// Allocate and fill a buffer large enough to hold the <ConfigHdr> template
// followed by "&OFFSET=0&WIDTH=WWWWWWWWWWWWWWWW" followed by a Null-terminator.
//
ConfigRequestHdr = HiiConstructConfigHdr (&mVariableCleanupHiiGuid, mVarStoreName, Private->HiiHandle);
Size = (StrLen (ConfigRequestHdr) + 32 + 1) * sizeof (CHAR16);
ConfigRequest = AllocateZeroPool (Size);
ASSERT (ConfigRequest != NULL);
AllocatedRequest = TRUE;
UnicodeSPrint (ConfigRequest, Size, L"%s&OFFSET=0&WIDTH=%016LX", ConfigRequestHdr, (UINT64)BufferSize);
FreePool (ConfigRequestHdr);
}
Status = Private->ConfigRouting->BlockToConfig (
Private->ConfigRouting,
ConfigRequest,
(UINT8 *) &Private->VariableCleanupData,
BufferSize,
Results,
Progress
);
ASSERT_EFI_ERROR (Status);
//
// Free the allocated config request string.
//
if (AllocatedRequest) {
FreePool (ConfigRequest);
ConfigRequest = NULL;
}
//
// Set Progress string to the original request string or the string's null terminator.
//
if (Request == NULL) {
*Progress = NULL;
} else if (StrStr (Request, L"OFFSET") == NULL) {
*Progress = Request + StrLen (Request);
}
return Status;
}
/**
Update user variable form.
@param[in] Private Points to the VARIABLE_CLEANUP_HII_PRIVATE_DATA.
**/
VOID
UpdateUserVariableForm (
IN VARIABLE_CLEANUP_HII_PRIVATE_DATA *Private
)
{
EFI_STRING_ID PromptStringToken;
EFI_STRING_ID HelpStringToken;
VOID *StartOpCodeHandle;
VOID *EndOpCodeHandle;
EFI_IFR_GUID_LABEL *StartLabel;
EFI_IFR_GUID_LABEL *EndLabel;
USER_VARIABLE_NODE *UserVariableNode;
LIST_ENTRY *Link;
USER_VARIABLE_NAME_NODE *UserVariableNameNode;
LIST_ENTRY *NameLink;
BOOLEAN Created;
//
// Init OpCode Handle.
//
StartOpCodeHandle = HiiAllocateOpCodeHandle ();
ASSERT (StartOpCodeHandle != NULL);
EndOpCodeHandle = HiiAllocateOpCodeHandle ();
ASSERT (EndOpCodeHandle != NULL);
//
// Create Hii Extend Label OpCode as the start opcode.
//
StartLabel = (EFI_IFR_GUID_LABEL *) HiiCreateGuidOpCode (StartOpCodeHandle, &gEfiIfrTianoGuid, NULL, sizeof (EFI_IFR_GUID_LABEL));
StartLabel->ExtendOpCode = EFI_IFR_EXTEND_OP_LABEL;
StartLabel->Number = LABEL_START;
//
// Create Hii Extend Label OpCode as the end opcode.
//
EndLabel = (EFI_IFR_GUID_LABEL *) HiiCreateGuidOpCode (EndOpCodeHandle, &gEfiIfrTianoGuid, NULL, sizeof (EFI_IFR_GUID_LABEL));
EndLabel->ExtendOpCode = EFI_IFR_EXTEND_OP_LABEL;
EndLabel->Number = LABEL_END;
HiiUpdateForm (
Private->HiiHandle,
&mVariableCleanupHiiGuid,
FORM_ID_VARIABLE_CLEANUP,
StartOpCodeHandle, // LABEL_START
EndOpCodeHandle // LABEL_END
);
for (Link = mUserVariableList.ForwardLink
;Link != &mUserVariableList
;Link = Link->ForwardLink) {
UserVariableNode = USER_VARIABLE_FROM_LINK (Link);
//
// Create checkbox opcode for variables in the same variable GUID space.
//
Created = FALSE;
for (NameLink = UserVariableNode->NameLink.ForwardLink
;NameLink != &UserVariableNode->NameLink
;NameLink = NameLink->ForwardLink) {
UserVariableNameNode = USER_VARIABLE_NAME_FROM_LINK (NameLink);
if (!UserVariableNameNode->Deleted) {
if (!Created) {
//
// Create subtitle opcode for variable GUID.
//
PromptStringToken = HiiSetString (Private->HiiHandle, 0, UserVariableNode->PromptString, NULL);
HiiCreateSubTitleOpCode (StartOpCodeHandle, PromptStringToken, 0, 0, 0);
Created = TRUE;
}
//
// Only create opcode for the non-deleted variables.
//
PromptStringToken = HiiSetString (Private->HiiHandle, 0, UserVariableNameNode->PromptString, NULL);
HelpStringToken = HiiSetString (Private->HiiHandle, 0, UserVariableNameNode->HelpString, NULL);
HiiCreateCheckBoxOpCode (
StartOpCodeHandle,
UserVariableNameNode->QuestionId,
VARIABLE_CLEANUP_VARSTORE_ID,
(UINT16) (USER_VARIABLE_VAR_OFFSET + UserVariableNameNode->Index),
PromptStringToken,
HelpStringToken,
EFI_IFR_FLAG_CALLBACK,
Private->VariableCleanupData.UserVariable[UserVariableNameNode->Index],
NULL
);
}
}
}
HiiCreateSubTitleOpCode (
StartOpCodeHandle,
STRING_TOKEN (STR_NULL_STRING),
0,
0,
0
);
//
// Create the "Apply changes" and "Discard changes" tags.
//
HiiCreateActionOpCode (
StartOpCodeHandle,
SAVE_AND_EXIT_QUESTION_ID,
STRING_TOKEN (STR_SAVE_AND_EXIT),
STRING_TOKEN (STR_NULL_STRING),
EFI_IFR_FLAG_CALLBACK,
0
);
HiiCreateActionOpCode (
StartOpCodeHandle,
NO_SAVE_AND_EXIT_QUESTION_ID,
STRING_TOKEN (STR_NO_SAVE_AND_EXIT),
STRING_TOKEN (STR_NULL_STRING),
EFI_IFR_FLAG_CALLBACK,
0
);
HiiUpdateForm (
Private->HiiHandle,
&mVariableCleanupHiiGuid,
FORM_ID_VARIABLE_CLEANUP,
StartOpCodeHandle, // LABEL_START
EndOpCodeHandle // LABEL_END
);
HiiFreeOpCodeHandle (StartOpCodeHandle);
HiiFreeOpCodeHandle (EndOpCodeHandle);
}
/**
This function applies changes in a driver's configuration.
Input is a Configuration, which has the routing data for this
driver followed by name / value configuration pairs. The driver
must apply those pairs to its configurable storage. If the
driver's configuration is stored in a linear block of data
and the driver's name / value pairs are in <BlockConfig>
format, it may use the ConfigToBlock helper function (above) to
simplify the job. Currently not implemented.
@param[in] This Points to the EFI_HII_CONFIG_ACCESS_PROTOCOL.
@param[in] Configuration A null-terminated Unicode string in
<ConfigString> format.
@param[out] Progress A pointer to a string filled in with the
offset of the most recent '&' before the
first failing name / value pair (or the
beginn ing of the string if the failure
is in the first name / value pair) or
the terminating NULL if all was
successful.
@retval EFI_SUCCESS The results have been distributed or are
awaiting distribution.
@retval EFI_OUT_OF_RESOURCES Not enough memory to store the
parts of the results that must be
stored awaiting possible future
protocols.
@retval EFI_INVALID_PARAMETERS Passing in a NULL for the
Results parameter would result
in this type of error.
@retval EFI_NOT_FOUND Target for the specified routing data
was not found.
**/
EFI_STATUS
EFIAPI
VariableCleanupHiiRouteConfig (
IN CONST EFI_HII_CONFIG_ACCESS_PROTOCOL *This,
IN CONST EFI_STRING Configuration,
OUT EFI_STRING *Progress
)
{
EFI_STATUS Status;
VARIABLE_CLEANUP_HII_PRIVATE_DATA *Private;
UINTN BufferSize;
if (Progress == NULL) {
return EFI_INVALID_PARAMETER;
}
*Progress = Configuration;
if (Configuration == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Check routing data in <ConfigHdr>.
// Note: there is no name for Name/Value storage, only GUID will be checked.
//
if (!HiiIsConfigHdrMatch (Configuration, &mVariableCleanupHiiGuid, mVarStoreName)) {
return EFI_NOT_FOUND;
}
Private = VARIABLE_CLEANUP_HII_PRIVATE_FROM_THIS (This);
//
// Get Buffer Storage data.
//
BufferSize = sizeof (VARIABLE_CLEANUP_DATA);
//
// Convert <ConfigResp> to buffer data by helper function ConfigToBlock().
//
Status = Private->ConfigRouting->ConfigToBlock (
Private->ConfigRouting,
Configuration,
(UINT8 *) &Private->VariableCleanupData,
&BufferSize,
Progress
);
ASSERT_EFI_ERROR (Status);
DeleteUserVariable (FALSE, &Private->VariableCleanupData);
//
// For "F10" hotkey to refresh the form.
//
// UpdateUserVariableForm (Private);
return EFI_SUCCESS;
}
/**
This function is called to provide results data to the driver.
This data consists of a unique key that is used to identify
which data is either being passed back or being asked for.
@param[in] This Points to the EFI_HII_CONFIG_ACCESS_PROTOCOL.
@param[in] Action Specifies the type of action taken by the browser.
@param[in] QuestionId A unique value which is sent to the original
exporting driver so that it can identify the type
of data to expect. The format of the data tends to
vary based on the opcode that generated the callback.
@param[in] Type The type of value for the question.
@param[in] Value A pointer to the data being sent to the original
exporting driver.
@param[out] ActionRequest On return, points to the action requested by the
callback function.
@retval EFI_SUCCESS The callback successfully handled the action.
@retval EFI_OUT_OF_RESOURCES Not enough storage is available to hold the
variable and its data.
@retval EFI_DEVICE_ERROR The variable could not be saved.
@retval EFI_UNSUPPORTED The specified Action is not supported by the
callback.
**/
EFI_STATUS
EFIAPI
VariableCleanupHiiCallback (
IN CONST EFI_HII_CONFIG_ACCESS_PROTOCOL *This,
IN EFI_BROWSER_ACTION Action,
IN EFI_QUESTION_ID QuestionId,
IN UINT8 Type,
IN EFI_IFR_TYPE_VALUE *Value,
OUT EFI_BROWSER_ACTION_REQUEST *ActionRequest
)
{
VARIABLE_CLEANUP_HII_PRIVATE_DATA *Private;
VARIABLE_CLEANUP_DATA *VariableCleanupData;
Private = VARIABLE_CLEANUP_HII_PRIVATE_FROM_THIS (This);
if ((Action != EFI_BROWSER_ACTION_CHANGING) && (Action != EFI_BROWSER_ACTION_CHANGED)) {
//
// All other action return unsupported.
//
return EFI_UNSUPPORTED;
}
//
// Retrieve uncommitted data from Form Browser.
//
VariableCleanupData = &Private->VariableCleanupData;
HiiGetBrowserData (&mVariableCleanupHiiGuid, mVarStoreName, sizeof (VARIABLE_CLEANUP_DATA), (UINT8 *) VariableCleanupData);
if (Action == EFI_BROWSER_ACTION_CHANGING) {
if (Value == NULL) {
return EFI_INVALID_PARAMETER;
}
} else if (Action == EFI_BROWSER_ACTION_CHANGED) {
if ((Value == NULL) || (ActionRequest == NULL)) {
return EFI_INVALID_PARAMETER;
}
if ((QuestionId >= USER_VARIABLE_QUESTION_ID) && (QuestionId < USER_VARIABLE_QUESTION_ID + MAX_USER_VARIABLE_COUNT)) {
if (Value->b){
//
// Means one user variable checkbox is marked to delete but not press F10 or "Commit Changes and Exit" menu.
//
mMarkedUserVariableCount++;
ASSERT (mMarkedUserVariableCount <= mUserVariableCount);
if (mMarkedUserVariableCount == mUserVariableCount) {
//
// All user variables have been marked, then also mark the SelectAll checkbox.
//
VariableCleanupData->SelectAll = TRUE;
}
} else {
//
// Means one user variable checkbox is unmarked.
//
mMarkedUserVariableCount--;
//
// Also unmark the SelectAll checkbox.
//
VariableCleanupData->SelectAll = FALSE;
}
} else {
switch (QuestionId) {
case SELECT_ALL_QUESTION_ID:
if (Value->b){
//
// Means the SelectAll checkbox is marked to delete all user variables but not press F10 or "Commit Changes and Exit" menu.
//
SetMem (VariableCleanupData->UserVariable, sizeof (VariableCleanupData->UserVariable), TRUE);
mMarkedUserVariableCount = mUserVariableCount;
} else {
//
// Means the SelectAll checkbox is unmarked.
//
SetMem (VariableCleanupData->UserVariable, sizeof (VariableCleanupData->UserVariable), FALSE);
mMarkedUserVariableCount = 0;
}
break;
case SAVE_AND_EXIT_QUESTION_ID:
DeleteUserVariable (FALSE, VariableCleanupData);
*ActionRequest = EFI_BROWSER_ACTION_REQUEST_FORM_SUBMIT_EXIT;
break;
case NO_SAVE_AND_EXIT_QUESTION_ID:
//
// Restore local maintain data.
//
*ActionRequest = EFI_BROWSER_ACTION_REQUEST_FORM_DISCARD_EXIT;
break;
default:
break;
}
}
}
//
// Pass changed uncommitted data back to Form Browser.
//
HiiSetBrowserData (&mVariableCleanupHiiGuid, mVarStoreName, sizeof (VARIABLE_CLEANUP_DATA), (UINT8 *) VariableCleanupData, NULL);
return EFI_SUCCESS;
}
/**
Platform variable cleanup.
@param[in] Flag Variable error flag.
@param[in] Type Variable cleanup type.
If it is VarCleanupManually, the interface must be called after console connected.
@retval EFI_SUCCESS No error or error processed.
@retval EFI_UNSUPPORTED The specified Flag or Type is not supported.
For example, system error may be not supported to process and Platform should have mechanism to reset system to manufacture mode.
Another, if system and user variables are wanted to be distinguished to process, the interface must be called after EndOfDxe.
@retval EFI_OUT_OF_RESOURCES Not enough resource to process the error.
@retval EFI_INVALID_PARAMETER The specified Flag or Type is an invalid value.
@retval Others Other failure occurs.
**/
EFI_STATUS
EFIAPI
PlatformVarCleanup (
IN VAR_ERROR_FLAG Flag,
IN VAR_CLEANUP_TYPE Type
)
{
EFI_STATUS Status;
EFI_FORM_BROWSER2_PROTOCOL *FormBrowser2;
VARIABLE_CLEANUP_HII_PRIVATE_DATA *Private;
if (!mEndOfDxe) {
//
// This implementation must be called after EndOfDxe.
//
return EFI_UNSUPPORTED;
}
if ((Type >= VarCleanupMax) || ((Flag & ((VAR_ERROR_FLAG) (VAR_ERROR_FLAG_SYSTEM_ERROR & VAR_ERROR_FLAG_USER_ERROR))) == 0)) {
return EFI_INVALID_PARAMETER;
}
if (Flag == VAR_ERROR_FLAG_NO_ERROR) {
//
// Just return success if no error.
//
return EFI_SUCCESS;
}
if ((Flag & (~((VAR_ERROR_FLAG) VAR_ERROR_FLAG_SYSTEM_ERROR))) == 0) {
//
// This sample does not support system variables cleanup.
//
DEBUG ((EFI_D_ERROR, "NOTICE - VAR_ERROR_FLAG_SYSTEM_ERROR\n"));
DEBUG ((EFI_D_ERROR, "Platform should have mechanism to reset system to manufacture mode\n"));
return EFI_UNSUPPORTED;
}
//
// Continue to process VAR_ERROR_FLAG_USER_ERROR.
//
//
// Create user variable nodes for the following processing.
//
CreateUserVariableNode ();
switch (Type) {
case VarCleanupAll:
DeleteUserVariable (TRUE, NULL);
//
// Destroyed the created user variable nodes
//
DestroyUserVariableNode ();
return EFI_SUCCESS;
break;
case VarCleanupManually:
//
// Locate FormBrowser2 protocol.
//
Status = gBS->LocateProtocol (&gEfiFormBrowser2ProtocolGuid, NULL, (VOID **) &FormBrowser2);
if (EFI_ERROR (Status)) {
return Status;
}
Private = AllocateZeroPool (sizeof (VARIABLE_CLEANUP_HII_PRIVATE_DATA));
if (Private == NULL) {
return EFI_OUT_OF_RESOURCES;
}
Private->Signature = VARIABLE_CLEANUP_HII_PRIVATE_SIGNATURE;
Private->ConfigAccess.ExtractConfig = VariableCleanupHiiExtractConfig;
Private->ConfigAccess.RouteConfig = VariableCleanupHiiRouteConfig;
Private->ConfigAccess.Callback = VariableCleanupHiiCallback;
Status = gBS->LocateProtocol (
&gEfiHiiConfigRoutingProtocolGuid,
NULL,
(VOID **) &Private->ConfigRouting
);
if (EFI_ERROR (Status)) {
goto Done;
}
//
// Install Device Path Protocol and Config Access protocol to driver handle.
//
Status = gBS->InstallMultipleProtocolInterfaces (
&Private->DriverHandle,
&gEfiDevicePathProtocolGuid,
&mVarCleanupHiiVendorDevicePath,
&gEfiHiiConfigAccessProtocolGuid,
&Private->ConfigAccess,
NULL
);
if (EFI_ERROR (Status)) {
goto Done;
}
//
// Publish our HII data.
//
Private->HiiHandle = HiiAddPackages (
&mVariableCleanupHiiGuid,
Private->DriverHandle,
PlatformVarCleanupLibStrings,
PlatVarCleanupBin,
NULL
);
if (Private->HiiHandle == NULL) {
Status = EFI_OUT_OF_RESOURCES;
goto Done;
}
UpdateUserVariableForm (Private);
Status = FormBrowser2->SendForm (
FormBrowser2,
&Private->HiiHandle,
1,
NULL,
0,
NULL,
NULL
);
break;
default:
return EFI_UNSUPPORTED;
break;
}
Done:
if (Private->DriverHandle != NULL) {
gBS->UninstallMultipleProtocolInterfaces (
Private->DriverHandle,
&gEfiDevicePathProtocolGuid,
&mVarCleanupHiiVendorDevicePath,
&gEfiHiiConfigAccessProtocolGuid,
&Private->ConfigAccess,
NULL
);
}
if (Private->HiiHandle != NULL) {
HiiRemovePackages (Private->HiiHandle);
}
FreePool (Private);
//
// Destroyed the created user variable nodes
//
DestroyUserVariableNode ();
return Status;
}
/**
Get last boot variable error flag.
@return Last boot variable error flag.
**/
VAR_ERROR_FLAG
EFIAPI
GetLastBootVarErrorFlag (
)
{
return mLastVarErrorFlag;
}
/**
Notification function of END_OF_DXE.
This is a notification function registered on END_OF_DXE event.
@param[in] Event Event whose notification function is being invoked.
@param[in] Context Pointer to the notification function's context.
**/
VOID
EFIAPI
PlatformVarCleanupEndOfDxeEvent (
IN EFI_EVENT Event,
IN VOID *Context
)
{
mEndOfDxe = TRUE;
}
/**
The constructor function caches the pointer to VarCheck protocol and last boot variable error flag.
The constructor function locates VarCheck protocol from protocol database.
It will ASSERT() if that operation fails and it will always return EFI_SUCCESS.
@param ImageHandle The firmware allocated handle for the EFI image.
@param SystemTable A pointer to the EFI System Table.
@retval EFI_SUCCESS The constructor always returns EFI_SUCCESS.
**/
EFI_STATUS
EFIAPI
PlatformVarCleanupLibConstructor (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
mLastVarErrorFlag = InternalGetVarErrorFlag ();
DEBUG ((EFI_D_INFO, "mLastVarErrorFlag - 0x%02x\n", mLastVarErrorFlag));
//
// Register EFI_END_OF_DXE_EVENT_GROUP_GUID event.
//
Status = gBS->CreateEventEx (
EVT_NOTIFY_SIGNAL,
TPL_CALLBACK,
PlatformVarCleanupEndOfDxeEvent,
NULL,
&gEfiEndOfDxeEventGroupGuid,
&mPlatVarCleanupLibEndOfDxeEvent
);
ASSERT_EFI_ERROR (Status);
return EFI_SUCCESS;
}
/**
The destructor function closes the End of DXE event.
@param ImageHandle The firmware allocated handle for the EFI image.
@param SystemTable A pointer to the EFI System Table.
@retval EFI_SUCCESS The destructor completed successfully.
**/
EFI_STATUS
EFIAPI
PlatformVarCleanupLibDestructor (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
//
// Close the End of DXE event.
//
Status = gBS->CloseEvent (mPlatVarCleanupLibEndOfDxeEvent);
ASSERT_EFI_ERROR (Status);
return EFI_SUCCESS;
}
|
769577.c | /*++
Copyright (c) 1989-2000 Microsoft Corporation
Module Name:
DevCtrl.c
Abstract:
This module implements the File System Device Control routines for Udfs
called by the dispatch driver.
// @@BEGIN_DDKSPLIT
Author:
Dan Lovinger {DanLo] 28-Jan-1997
Revision History:
// @@END_DDKSPLIT
--*/
#include "UdfProcs.h"
//
// The Bug check file id for this module
//
#define BugCheckFileId (UDFS_BUG_CHECK_DEVCTRL)
//
// The local debug trace level
//
#define Dbg (UDFS_DEBUG_LEVEL_DEVCTRL)
//
// Local support routines
//
NTSTATUS
UdfDvdReadStructure (
IN PIRP_CONTEXT IrpContext,
IN PIRP Irp,
IN PFCB Fcb
);
NTSTATUS
UdfDvdTransferKey (
IN PIRP_CONTEXT IrpContext,
IN PIRP Irp,
IN PFCB Fcb
);
NTSTATUS
UdfDevCtrlCompletionRoutine (
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp,
IN PVOID Contxt
);
#ifdef ALLOC_PRAGMA
#pragma alloc_text(PAGE, UdfCommonDevControl)
#pragma alloc_text(PAGE, UdfDvdReadStructure)
#pragma alloc_text(PAGE, UdfDvdTransferKey)
#endif
NTSTATUS
UdfCommonDevControl (
IN PIRP_CONTEXT IrpContext,
IN PIRP Irp
)
/*++
Routine Description:
This is the common routine for doing Device control operations called
by both the fsd and fsp threads
Arguments:
Irp - Supplies the Irp to process
Return Value:
NTSTATUS - The return status for the operation
--*/
{
NTSTATUS Status;
TYPE_OF_OPEN TypeOfOpen;
PFCB Fcb;
PCCB Ccb;
PIO_STACK_LOCATION IrpSp;
PVOID TargetBuffer;
PAGED_CODE();
//
// Extract and decode the file object.
//
IrpSp = IoGetCurrentIrpStackLocation( Irp );
TypeOfOpen = UdfDecodeFileObject( IrpSp->FileObject,
&Fcb,
&Ccb );
//
// A few IOCTLs actually require some intervention on our part to
// translate some information from file-based to device-based units.
//
if (TypeOfOpen == UserFileOpen) {
UdfAcquireFileShared( IrpContext, Fcb );
try {
UdfVerifyFcbOperation( IrpContext, Fcb);
switch (IrpSp->Parameters.DeviceIoControl.IoControlCode) {
case IOCTL_DVD_READ_KEY:
case IOCTL_DVD_SEND_KEY:
Status = UdfDvdTransferKey( IrpContext, Irp, Fcb );
break;
case IOCTL_DVD_READ_STRUCTURE:
Status = UdfDvdReadStructure( IrpContext, Irp, Fcb );
break;
case IOCTL_STORAGE_SET_READ_AHEAD:
//
// We're just going to no-op this for now.
//
Status = STATUS_SUCCESS;
UdfCompleteRequest( IrpContext, Irp, Status );
break;
default:
Status = STATUS_INVALID_PARAMETER;
UdfCompleteRequest( IrpContext, Irp, Status );
break;
}
}
finally {
UdfReleaseFile( IrpContext, Fcb);
}
return Status;
}
//
// Now the only type of opens we accept are user volume opens.
//
if (TypeOfOpen != UserVolumeOpen) {
UdfCompleteRequest( IrpContext, Irp, STATUS_INVALID_PARAMETER );
return STATUS_INVALID_PARAMETER;
}
//
// Handle the case of the disk type ourselves. We're really just going to
// lie about this, but it is a good lie.
//
if (IrpSp->Parameters.DeviceIoControl.IoControlCode == IOCTL_CDROM_DISK_TYPE) {
//
// Verify the Vcb in this case to detect if the volume has changed.
//
UdfVerifyVcb( IrpContext, Fcb->Vcb );
//
// Check the size of the output buffer.
//
if (IrpSp->Parameters.DeviceIoControl.OutputBufferLength < sizeof( CDROM_DISK_DATA )) {
UdfCompleteRequest( IrpContext, Irp, STATUS_BUFFER_TOO_SMALL );
return STATUS_BUFFER_TOO_SMALL;
}
//
// Copy the data from the Vcb.
//
((PCDROM_DISK_DATA) Irp->AssociatedIrp.SystemBuffer)->DiskData = CDROM_DISK_DATA_TRACK;
Irp->IoStatus.Information = sizeof( CDROM_DISK_DATA );
UdfCompleteRequest( IrpContext, Irp, STATUS_SUCCESS );
return STATUS_SUCCESS;
}
//
// Copy the arguments and set up the completion routine
//
IoCopyCurrentIrpStackLocationToNext( Irp );
IoSetCompletionRoutine( Irp,
UdfDevCtrlCompletionRoutine,
NULL,
TRUE,
TRUE,
TRUE );
//
// Send the request.
//
Status = IoCallDriver( IrpContext->Vcb->TargetDeviceObject, Irp );
//
// Cleanup our Irp Context. The driver has completed the Irp.
//
UdfCompleteRequest( IrpContext, NULL, STATUS_SUCCESS );
return Status;
}
NTSTATUS
UdfDvdTransferKey (
IN PIRP_CONTEXT IrpContext,
IN PIRP Irp,
IN PFCB Fcb
)
/*++
Routine Description:
This routine handles the special form of the Dvd key negotiation IOCTLs
performed in the context of a file. For these IOCTLs, the incoming parameter
is in file-relative form, which must be translated to a device-relatvie form
before it can continue.
Arguments:
Irp - Supplies the Irp to process
Fcb - Supplies the file being operated with
Return Value:
NTSTATUS - The return status for the operation
--*/
{
NTSTATUS Status = STATUS_INVALID_PARAMETER;
PDVD_COPY_PROTECT_KEY TransferKey;
LARGE_INTEGER Offset;
BOOLEAN Result;
PIO_STACK_LOCATION IrpSp;
//
// Grab the input buffer and confirm basic validity.
//
IrpSp = IoGetCurrentIrpStackLocation( Irp );
TransferKey = (PDVD_COPY_PROTECT_KEY) Irp->AssociatedIrp.SystemBuffer;
if ((IrpSp->Parameters.DeviceIoControl.InputBufferLength < sizeof(DVD_COPY_PROTECT_KEY)) ||
(TransferKey->Parameters.TitleOffset.QuadPart >= Fcb->FileSize.QuadPart)) {
UdfCompleteRequest( IrpContext, Irp, Status );
return Status;
}
//
// Now, convert the file byte offset in the structure to a physical sector.
//
Result = FsRtlLookupLargeMcbEntry( &Fcb->Mcb,
LlSectorsFromBytes( Fcb->Vcb, TransferKey->Parameters.TitleOffset.QuadPart ),
&Offset.QuadPart,
NULL,
NULL,
NULL,
NULL );
//
// If we failed the lookup, we know that this must be some form of unrecorded
// extent on the media. This IOCTL is ill-defined at this point, so we have
// to give up.
//
if (!Result || Offset.QuadPart == -1) {
UdfCompleteRequest( IrpContext, Irp, Status );
return Status;
}
//
// The input is buffered from user space, so we know we can just rewrite it.
//
TransferKey->Parameters.TitleOffset.QuadPart = LlBytesFromSectors( Fcb->Vcb, Offset.QuadPart );
//
// Copy the arguments and set up the completion routine
//
IoCopyCurrentIrpStackLocationToNext( Irp );
IoSetCompletionRoutine( Irp,
UdfDevCtrlCompletionRoutine,
NULL,
TRUE,
TRUE,
TRUE );
//
// Send the request.
//
Status = IoCallDriver( IrpContext->Vcb->TargetDeviceObject, Irp );
//
// Cleanup our Irp Context. The driver has completed the Irp.
//
UdfCompleteRequest( IrpContext, NULL, STATUS_SUCCESS );
return Status;
}
NTSTATUS
UdfDvdReadStructure (
IN PIRP_CONTEXT IrpContext,
IN PIRP Irp,
IN PFCB Fcb
)
/*++
Routine Description:
This routine handles the special form of the Dvd structure reading IOCTLs
performed in the context of a file. For these IOCTLs, the incoming parameter
is in file-relative form, which must be translated to a device-relatvie form
before it can continue.
Arguments:
Irp - Supplies the Irp to process
Fcb - Supplies the file being operated with
Return Value:
NTSTATUS - The return status for the operation
--*/
{
NTSTATUS Status = STATUS_INVALID_PARAMETER;
PDVD_READ_STRUCTURE ReadStructure;
LARGE_INTEGER Offset;
BOOLEAN Result;
PIO_STACK_LOCATION IrpSp;
//
// Grab the input buffer and confirm basic validity.
//
IrpSp = IoGetCurrentIrpStackLocation( Irp );
ReadStructure = (PDVD_READ_STRUCTURE) Irp->AssociatedIrp.SystemBuffer;
if ((IrpSp->Parameters.DeviceIoControl.InputBufferLength != sizeof(DVD_READ_STRUCTURE)) ||
(ReadStructure->BlockByteOffset.QuadPart >= Fcb->FileSize.QuadPart)) {
UdfCompleteRequest( IrpContext, Irp, Status );
return Status;
}
//
// Now, convert the file byte offset in the structure to a physical sector.
//
Result = FsRtlLookupLargeMcbEntry( &Fcb->Mcb,
LlSectorsFromBytes( Fcb->Vcb, ReadStructure->BlockByteOffset.QuadPart ),
&Offset.QuadPart,
NULL,
NULL,
NULL,
NULL );
//
// If we failed the lookup, we know that this must be some form of unrecorded
// extent on the media. This IOCTL is ill-defined at this point, so we have
// to give up.
//
if (!Result || Offset.QuadPart == -1) {
UdfCompleteRequest( IrpContext, Irp, Status );
return Status;
}
//
// The input is buffered from user space, so we know we can just rewrite it.
//
ReadStructure->BlockByteOffset.QuadPart = LlBytesFromSectors( Fcb->Vcb, Offset.QuadPart );
//
// Copy the arguments and set up the completion routine
//
IoCopyCurrentIrpStackLocationToNext( Irp );
IoSetCompletionRoutine( Irp,
UdfDevCtrlCompletionRoutine,
NULL,
TRUE,
TRUE,
TRUE );
//
// Send the request.
//
Status = IoCallDriver( IrpContext->Vcb->TargetDeviceObject, Irp );
//
// Cleanup our Irp Context. The driver has completed the Irp.
//
UdfCompleteRequest( IrpContext, NULL, STATUS_SUCCESS );
return Status;
}
//
// Local support routine
//
NTSTATUS
UdfDevCtrlCompletionRoutine (
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp,
IN PVOID Contxt
)
{
//
// Add the hack-o-ramma to fix formats.
//
if (Irp->PendingReturned) {
IoMarkIrpPending( Irp );
}
return STATUS_SUCCESS;
UNREFERENCED_PARAMETER( DeviceObject );
UNREFERENCED_PARAMETER( Contxt );
}
|
201768.c | /*****************************************************************************/
/*
* yam.c -- YAM radio modem driver.
*
* Copyright (C) 1998 Frederic Rible F1OAT ([email protected])
* Adapted from baycom.c driver written by Thomas Sailer ([email protected])
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
*
* History:
* 0.0 F1OAT 06.06.98 Begin of work with baycom.c source code V 0.3
* 0.1 F1OAT 07.06.98 Add timer polling routine for channel arbitration
* 0.2 F6FBB 08.06.98 Added delay after FPGA programming
* 0.3 F6FBB 29.07.98 Delayed PTT implementation for dupmode=2
* 0.4 F6FBB 30.07.98 Added TxTail, Slottime and Persistance
* 0.5 F6FBB 01.08.98 Shared IRQs, /proc/net and network statistics
* 0.6 F6FBB 25.08.98 Added 1200Bds format
* 0.7 F6FBB 12.09.98 Added to the kernel configuration
* 0.8 F6FBB 14.10.98 Fixed slottime/persistence timing bug
* OK1ZIA 2.09.01 Fixed "kfree_skb on hard IRQ"
* using dev_kfree_skb_any(). (important in 2.4 kernel)
*
*/
/*****************************************************************************/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/random.h>
#include <asm/io.h>
#include <asm/system.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/ax25.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/init.h>
#include <linux/yam.h>
#include "yam9600.h"
#include "yam1200.h"
/* --------------------------------------------------------------------- */
static const char yam_drvname[] = "yam";
static char yam_drvinfo[] __initdata = KERN_INFO "YAM driver version 0.8 by F1OAT/F6FBB\n";
/* --------------------------------------------------------------------- */
#define YAM_9600 1
#define YAM_1200 2
#define NR_PORTS 4
#define YAM_MAGIC 0xF10A7654
/* Transmitter states */
#define TX_OFF 0
#define TX_HEAD 1
#define TX_DATA 2
#define TX_CRC1 3
#define TX_CRC2 4
#define TX_TAIL 5
#define YAM_MAX_FRAME 1024
#define DEFAULT_BITRATE 9600 /* bps */
#define DEFAULT_HOLDD 10 /* sec */
#define DEFAULT_TXD 300 /* ms */
#define DEFAULT_TXTAIL 10 /* ms */
#define DEFAULT_SLOT 100 /* ms */
#define DEFAULT_PERS 64 /* 0->255 */
struct yam_port {
int magic;
int bitrate;
int baudrate;
int iobase;
int irq;
int dupmode;
struct net_device *dev;
/* Stats section */
struct net_device_stats stats;
int nb_rxint;
int nb_mdint;
/* Parameters section */
int txd; /* tx delay */
int holdd; /* duplex ptt delay */
int txtail; /* txtail delay */
int slot; /* slottime */
int pers; /* persistence */
/* Tx section */
int tx_state;
int tx_count;
int slotcnt;
unsigned char tx_buf[YAM_MAX_FRAME];
int tx_len;
int tx_crcl, tx_crch;
struct sk_buff_head send_queue; /* Packets awaiting transmission */
/* Rx section */
int dcd;
unsigned char rx_buf[YAM_MAX_FRAME];
int rx_len;
int rx_crcl, rx_crch;
};
struct yam_mcs {
unsigned char bits[YAM_FPGA_SIZE];
int bitrate;
struct yam_mcs *next;
};
static struct net_device *yam_devs[NR_PORTS];
static struct yam_mcs *yam_data;
static DEFINE_TIMER(yam_timer, NULL, 0, 0);
/* --------------------------------------------------------------------- */
#define RBR(iobase) (iobase+0)
#define THR(iobase) (iobase+0)
#define IER(iobase) (iobase+1)
#define IIR(iobase) (iobase+2)
#define FCR(iobase) (iobase+2)
#define LCR(iobase) (iobase+3)
#define MCR(iobase) (iobase+4)
#define LSR(iobase) (iobase+5)
#define MSR(iobase) (iobase+6)
#define SCR(iobase) (iobase+7)
#define DLL(iobase) (iobase+0)
#define DLM(iobase) (iobase+1)
#define YAM_EXTENT 8
/* Interrupt Identification Register Bit Masks */
#define IIR_NOPEND 1
#define IIR_MSR 0
#define IIR_TX 2
#define IIR_RX 4
#define IIR_LSR 6
#define IIR_TIMEOUT 12 /* Fifo mode only */
#define IIR_MASK 0x0F
/* Interrupt Enable Register Bit Masks */
#define IER_RX 1 /* enable rx interrupt */
#define IER_TX 2 /* enable tx interrupt */
#define IER_LSR 4 /* enable line status interrupts */
#define IER_MSR 8 /* enable modem status interrupts */
/* Modem Control Register Bit Masks */
#define MCR_DTR 0x01 /* DTR output */
#define MCR_RTS 0x02 /* RTS output */
#define MCR_OUT1 0x04 /* OUT1 output (not accessible in RS232) */
#define MCR_OUT2 0x08 /* Master Interrupt enable (must be set on PCs) */
#define MCR_LOOP 0x10 /* Loopback enable */
/* Modem Status Register Bit Masks */
#define MSR_DCTS 0x01 /* Delta CTS input */
#define MSR_DDSR 0x02 /* Delta DSR */
#define MSR_DRIN 0x04 /* Delta RI */
#define MSR_DDCD 0x08 /* Delta DCD */
#define MSR_CTS 0x10 /* CTS input */
#define MSR_DSR 0x20 /* DSR input */
#define MSR_RING 0x40 /* RI input */
#define MSR_DCD 0x80 /* DCD input */
/* line status register bit mask */
#define LSR_RXC 0x01
#define LSR_OE 0x02
#define LSR_PE 0x04
#define LSR_FE 0x08
#define LSR_BREAK 0x10
#define LSR_THRE 0x20
#define LSR_TSRE 0x40
/* Line Control Register Bit Masks */
#define LCR_DLAB 0x80
#define LCR_BREAK 0x40
#define LCR_PZERO 0x28
#define LCR_PEVEN 0x18
#define LCR_PODD 0x08
#define LCR_STOP1 0x00
#define LCR_STOP2 0x04
#define LCR_BIT5 0x00
#define LCR_BIT6 0x02
#define LCR_BIT7 0x01
#define LCR_BIT8 0x03
/* YAM Modem <-> UART Port mapping */
#define TX_RDY MSR_DCTS /* transmitter ready to send */
#define RX_DCD MSR_DCD /* carrier detect */
#define RX_FLAG MSR_RING /* hdlc flag received */
#define FPGA_DONE MSR_DSR /* FPGA is configured */
#define PTT_ON (MCR_RTS|MCR_OUT2) /* activate PTT */
#define PTT_OFF (MCR_DTR|MCR_OUT2) /* release PTT */
#define ENABLE_RXINT IER_RX /* enable uart rx interrupt during rx */
#define ENABLE_TXINT IER_MSR /* enable uart ms interrupt during tx */
#define ENABLE_RTXINT (IER_RX|IER_MSR) /* full duplex operations */
/*************************************************************************
* CRC Tables
************************************************************************/
static const unsigned char chktabl[256] =
{0x00, 0x89, 0x12, 0x9b, 0x24, 0xad, 0x36, 0xbf, 0x48, 0xc1, 0x5a, 0xd3, 0x6c, 0xe5, 0x7e,
0xf7, 0x81, 0x08, 0x93, 0x1a, 0xa5, 0x2c, 0xb7, 0x3e, 0xc9, 0x40, 0xdb, 0x52, 0xed, 0x64,
0xff, 0x76, 0x02, 0x8b, 0x10, 0x99, 0x26, 0xaf, 0x34, 0xbd, 0x4a, 0xc3, 0x58, 0xd1, 0x6e,
0xe7, 0x7c, 0xf5, 0x83, 0x0a, 0x91, 0x18, 0xa7, 0x2e, 0xb5, 0x3c, 0xcb, 0x42, 0xd9, 0x50,
0xef, 0x66, 0xfd, 0x74, 0x04, 0x8d, 0x16, 0x9f, 0x20, 0xa9, 0x32, 0xbb, 0x4c, 0xc5, 0x5e,
0xd7, 0x68, 0xe1, 0x7a, 0xf3, 0x85, 0x0c, 0x97, 0x1e, 0xa1, 0x28, 0xb3, 0x3a, 0xcd, 0x44,
0xdf, 0x56, 0xe9, 0x60, 0xfb, 0x72, 0x06, 0x8f, 0x14, 0x9d, 0x22, 0xab, 0x30, 0xb9, 0x4e,
0xc7, 0x5c, 0xd5, 0x6a, 0xe3, 0x78, 0xf1, 0x87, 0x0e, 0x95, 0x1c, 0xa3, 0x2a, 0xb1, 0x38,
0xcf, 0x46, 0xdd, 0x54, 0xeb, 0x62, 0xf9, 0x70, 0x08, 0x81, 0x1a, 0x93, 0x2c, 0xa5, 0x3e,
0xb7, 0x40, 0xc9, 0x52, 0xdb, 0x64, 0xed, 0x76, 0xff, 0x89, 0x00, 0x9b, 0x12, 0xad, 0x24,
0xbf, 0x36, 0xc1, 0x48, 0xd3, 0x5a, 0xe5, 0x6c, 0xf7, 0x7e, 0x0a, 0x83, 0x18, 0x91, 0x2e,
0xa7, 0x3c, 0xb5, 0x42, 0xcb, 0x50, 0xd9, 0x66, 0xef, 0x74, 0xfd, 0x8b, 0x02, 0x99, 0x10,
0xaf, 0x26, 0xbd, 0x34, 0xc3, 0x4a, 0xd1, 0x58, 0xe7, 0x6e, 0xf5, 0x7c, 0x0c, 0x85, 0x1e,
0x97, 0x28, 0xa1, 0x3a, 0xb3, 0x44, 0xcd, 0x56, 0xdf, 0x60, 0xe9, 0x72, 0xfb, 0x8d, 0x04,
0x9f, 0x16, 0xa9, 0x20, 0xbb, 0x32, 0xc5, 0x4c, 0xd7, 0x5e, 0xe1, 0x68, 0xf3, 0x7a, 0x0e,
0x87, 0x1c, 0x95, 0x2a, 0xa3, 0x38, 0xb1, 0x46, 0xcf, 0x54, 0xdd, 0x62, 0xeb, 0x70, 0xf9,
0x8f, 0x06, 0x9d, 0x14, 0xab, 0x22, 0xb9, 0x30, 0xc7, 0x4e, 0xd5, 0x5c, 0xe3, 0x6a, 0xf1,
0x78};
static const unsigned char chktabh[256] =
{0x00, 0x11, 0x23, 0x32, 0x46, 0x57, 0x65, 0x74, 0x8c, 0x9d, 0xaf, 0xbe, 0xca, 0xdb, 0xe9,
0xf8, 0x10, 0x01, 0x33, 0x22, 0x56, 0x47, 0x75, 0x64, 0x9c, 0x8d, 0xbf, 0xae, 0xda, 0xcb,
0xf9, 0xe8, 0x21, 0x30, 0x02, 0x13, 0x67, 0x76, 0x44, 0x55, 0xad, 0xbc, 0x8e, 0x9f, 0xeb,
0xfa, 0xc8, 0xd9, 0x31, 0x20, 0x12, 0x03, 0x77, 0x66, 0x54, 0x45, 0xbd, 0xac, 0x9e, 0x8f,
0xfb, 0xea, 0xd8, 0xc9, 0x42, 0x53, 0x61, 0x70, 0x04, 0x15, 0x27, 0x36, 0xce, 0xdf, 0xed,
0xfc, 0x88, 0x99, 0xab, 0xba, 0x52, 0x43, 0x71, 0x60, 0x14, 0x05, 0x37, 0x26, 0xde, 0xcf,
0xfd, 0xec, 0x98, 0x89, 0xbb, 0xaa, 0x63, 0x72, 0x40, 0x51, 0x25, 0x34, 0x06, 0x17, 0xef,
0xfe, 0xcc, 0xdd, 0xa9, 0xb8, 0x8a, 0x9b, 0x73, 0x62, 0x50, 0x41, 0x35, 0x24, 0x16, 0x07,
0xff, 0xee, 0xdc, 0xcd, 0xb9, 0xa8, 0x9a, 0x8b, 0x84, 0x95, 0xa7, 0xb6, 0xc2, 0xd3, 0xe1,
0xf0, 0x08, 0x19, 0x2b, 0x3a, 0x4e, 0x5f, 0x6d, 0x7c, 0x94, 0x85, 0xb7, 0xa6, 0xd2, 0xc3,
0xf1, 0xe0, 0x18, 0x09, 0x3b, 0x2a, 0x5e, 0x4f, 0x7d, 0x6c, 0xa5, 0xb4, 0x86, 0x97, 0xe3,
0xf2, 0xc0, 0xd1, 0x29, 0x38, 0x0a, 0x1b, 0x6f, 0x7e, 0x4c, 0x5d, 0xb5, 0xa4, 0x96, 0x87,
0xf3, 0xe2, 0xd0, 0xc1, 0x39, 0x28, 0x1a, 0x0b, 0x7f, 0x6e, 0x5c, 0x4d, 0xc6, 0xd7, 0xe5,
0xf4, 0x80, 0x91, 0xa3, 0xb2, 0x4a, 0x5b, 0x69, 0x78, 0x0c, 0x1d, 0x2f, 0x3e, 0xd6, 0xc7,
0xf5, 0xe4, 0x90, 0x81, 0xb3, 0xa2, 0x5a, 0x4b, 0x79, 0x68, 0x1c, 0x0d, 0x3f, 0x2e, 0xe7,
0xf6, 0xc4, 0xd5, 0xa1, 0xb0, 0x82, 0x93, 0x6b, 0x7a, 0x48, 0x59, 0x2d, 0x3c, 0x0e, 0x1f,
0xf7, 0xe6, 0xd4, 0xc5, 0xb1, 0xa0, 0x92, 0x83, 0x7b, 0x6a, 0x58, 0x49, 0x3d, 0x2c, 0x1e,
0x0f};
/*************************************************************************
* FPGA functions
************************************************************************/
static void delay(int ms)
{
unsigned long timeout = jiffies + ((ms * HZ) / 1000);
while (time_before(jiffies, timeout))
cpu_relax();
}
/*
* reset FPGA
*/
static void fpga_reset(int iobase)
{
outb(0, IER(iobase));
outb(LCR_DLAB | LCR_BIT5, LCR(iobase));
outb(1, DLL(iobase));
outb(0, DLM(iobase));
outb(LCR_BIT5, LCR(iobase));
inb(LSR(iobase));
inb(MSR(iobase));
/* turn off FPGA supply voltage */
outb(MCR_OUT1 | MCR_OUT2, MCR(iobase));
delay(100);
/* turn on FPGA supply voltage again */
outb(MCR_DTR | MCR_RTS | MCR_OUT1 | MCR_OUT2, MCR(iobase));
delay(100);
}
/*
* send one byte to FPGA
*/
static int fpga_write(int iobase, unsigned char wrd)
{
unsigned char bit;
int k;
unsigned long timeout = jiffies + HZ / 10;
for (k = 0; k < 8; k++) {
bit = (wrd & 0x80) ? (MCR_RTS | MCR_DTR) : MCR_DTR;
outb(bit | MCR_OUT1 | MCR_OUT2, MCR(iobase));
wrd <<= 1;
outb(0xfc, THR(iobase));
while ((inb(LSR(iobase)) & LSR_TSRE) == 0)
if (time_after(jiffies, timeout))
return -1;
}
return 0;
}
static unsigned char *add_mcs(unsigned char *bits, int bitrate)
{
struct yam_mcs *p;
/* If it already exists, replace the bit data */
p = yam_data;
while (p) {
if (p->bitrate == bitrate) {
memcpy(p->bits, bits, YAM_FPGA_SIZE);
return p->bits;
}
p = p->next;
}
/* Allocate a new mcs */
if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "YAM: no memory to allocate mcs\n");
return NULL;
}
memcpy(p->bits, bits, YAM_FPGA_SIZE);
p->bitrate = bitrate;
p->next = yam_data;
yam_data = p;
return p->bits;
}
static unsigned char *get_mcs(int bitrate)
{
struct yam_mcs *p;
p = yam_data;
while (p) {
if (p->bitrate == bitrate)
return p->bits;
p = p->next;
}
/* Load predefined mcs data */
switch (bitrate) {
case 1200:
return add_mcs(bits_1200, bitrate);
default:
return add_mcs(bits_9600, bitrate);
}
}
/*
* download bitstream to FPGA
* data is contained in bits[] array in yam1200.h resp. yam9600.h
*/
static int fpga_download(int iobase, int bitrate)
{
int i, rc;
unsigned char *pbits;
pbits = get_mcs(bitrate);
if (pbits == NULL)
return -1;
fpga_reset(iobase);
for (i = 0; i < YAM_FPGA_SIZE; i++) {
if (fpga_write(iobase, pbits[i])) {
printk(KERN_ERR "yam: error in write cycle\n");
return -1; /* write... */
}
}
fpga_write(iobase, 0xFF);
rc = inb(MSR(iobase)); /* check DONE signal */
/* Needed for some hardwares */
delay(50);
return (rc & MSR_DSR) ? 0 : -1;
}
/************************************************************************
* Serial port init
************************************************************************/
static void yam_set_uart(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
int divisor = 115200 / yp->baudrate;
outb(0, IER(dev->base_addr));
outb(LCR_DLAB | LCR_BIT8, LCR(dev->base_addr));
outb(divisor, DLL(dev->base_addr));
outb(0, DLM(dev->base_addr));
outb(LCR_BIT8, LCR(dev->base_addr));
outb(PTT_OFF, MCR(dev->base_addr));
outb(0x00, FCR(dev->base_addr));
/* Flush pending irq */
inb(RBR(dev->base_addr));
inb(MSR(dev->base_addr));
/* Enable rx irq */
outb(ENABLE_RTXINT, IER(dev->base_addr));
}
/* --------------------------------------------------------------------- */
enum uart {
c_uart_unknown, c_uart_8250,
c_uart_16450, c_uart_16550, c_uart_16550A
};
static const char *uart_str[] =
{"unknown", "8250", "16450", "16550", "16550A"};
static enum uart yam_check_uart(unsigned int iobase)
{
unsigned char b1, b2, b3;
enum uart u;
enum uart uart_tab[] =
{c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A};
b1 = inb(MCR(iobase));
outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
b2 = inb(MSR(iobase));
outb(0x1a, MCR(iobase));
b3 = inb(MSR(iobase)) & 0xf0;
outb(b1, MCR(iobase)); /* restore old values */
outb(b2, MSR(iobase));
if (b3 != 0x90)
return c_uart_unknown;
inb(RBR(iobase));
inb(RBR(iobase));
outb(0x01, FCR(iobase)); /* enable FIFOs */
u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
if (u == c_uart_16450) {
outb(0x5a, SCR(iobase));
b1 = inb(SCR(iobase));
outb(0xa5, SCR(iobase));
b2 = inb(SCR(iobase));
if ((b1 != 0x5a) || (b2 != 0xa5))
u = c_uart_8250;
}
return u;
}
/******************************************************************************
* Rx Section
******************************************************************************/
static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
{
if (yp->dcd && yp->rx_len >= 3 && yp->rx_len < YAM_MAX_FRAME) {
int pkt_len = yp->rx_len - 2 + 1; /* -CRC + kiss */
struct sk_buff *skb;
if ((yp->rx_crch & yp->rx_crcl) != 0xFF) {
/* Bad crc */
} else {
if (!(skb = dev_alloc_skb(pkt_len))) {
printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
++yp->stats.rx_dropped;
} else {
unsigned char *cp;
cp = skb_put(skb, pkt_len);
*cp++ = 0; /* KISS kludge */
memcpy(cp, yp->rx_buf, pkt_len - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
++yp->stats.rx_packets;
}
}
}
yp->rx_len = 0;
yp->rx_crcl = 0x21;
yp->rx_crch = 0xf3;
}
static inline void yam_rx_byte(struct net_device *dev, struct yam_port *yp, unsigned char rxb)
{
if (yp->rx_len < YAM_MAX_FRAME) {
unsigned char c = yp->rx_crcl;
yp->rx_crcl = (chktabl[c] ^ yp->rx_crch);
yp->rx_crch = (chktabh[c] ^ rxb);
yp->rx_buf[yp->rx_len++] = rxb;
}
}
/********************************************************************************
* TX Section
********************************************************************************/
static void ptt_on(struct net_device *dev)
{
outb(PTT_ON, MCR(dev->base_addr));
}
static void ptt_off(struct net_device *dev)
{
outb(PTT_OFF, MCR(dev->base_addr));
}
static int yam_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
skb_queue_tail(&yp->send_queue, skb);
dev->trans_start = jiffies;
return 0;
}
static void yam_start_tx(struct net_device *dev, struct yam_port *yp)
{
if ((yp->tx_state == TX_TAIL) || (yp->txd == 0))
yp->tx_count = 1;
else
yp->tx_count = (yp->bitrate * yp->txd) / 8000;
yp->tx_state = TX_HEAD;
ptt_on(dev);
}
static void yam_arbitrate(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
if (yp->magic != YAM_MAGIC || yp->tx_state != TX_OFF ||
skb_queue_empty(&yp->send_queue))
return;
/* tx_state is TX_OFF and there is data to send */
if (yp->dupmode) {
/* Full duplex mode, don't wait */
yam_start_tx(dev, yp);
return;
}
if (yp->dcd) {
/* DCD on, wait slotime ... */
yp->slotcnt = yp->slot / 10;
return;
}
/* Is slottime passed ? */
if ((--yp->slotcnt) > 0)
return;
yp->slotcnt = yp->slot / 10;
/* is random > persist ? */
if ((random32() % 256) > yp->pers)
return;
yam_start_tx(dev, yp);
}
static void yam_dotimer(unsigned long dummy)
{
int i;
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev && netif_running(dev))
yam_arbitrate(dev);
}
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
}
static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
{
struct sk_buff *skb;
unsigned char b, temp;
switch (yp->tx_state) {
case TX_OFF:
break;
case TX_HEAD:
if (--yp->tx_count <= 0) {
if (!(skb = skb_dequeue(&yp->send_queue))) {
ptt_off(dev);
yp->tx_state = TX_OFF;
break;
}
yp->tx_state = TX_DATA;
if (skb->data[0] != 0) {
/* do_kiss_params(s, skb->data, skb->len); */
dev_kfree_skb_any(skb);
break;
}
yp->tx_len = skb->len - 1; /* strip KISS byte */
if (yp->tx_len >= YAM_MAX_FRAME || yp->tx_len < 2) {
dev_kfree_skb_any(skb);
break;
}
memcpy(yp->tx_buf, skb->data + 1, yp->tx_len);
dev_kfree_skb_any(skb);
yp->tx_count = 0;
yp->tx_crcl = 0x21;
yp->tx_crch = 0xf3;
yp->tx_state = TX_DATA;
}
break;
case TX_DATA:
b = yp->tx_buf[yp->tx_count++];
outb(b, THR(dev->base_addr));
temp = yp->tx_crcl;
yp->tx_crcl = chktabl[temp] ^ yp->tx_crch;
yp->tx_crch = chktabh[temp] ^ b;
if (yp->tx_count >= yp->tx_len) {
yp->tx_state = TX_CRC1;
}
break;
case TX_CRC1:
yp->tx_crch = chktabl[yp->tx_crcl] ^ yp->tx_crch;
yp->tx_crcl = chktabh[yp->tx_crcl] ^ chktabl[yp->tx_crch] ^ 0xff;
outb(yp->tx_crcl, THR(dev->base_addr));
yp->tx_state = TX_CRC2;
break;
case TX_CRC2:
outb(chktabh[yp->tx_crch] ^ 0xFF, THR(dev->base_addr));
if (skb_queue_empty(&yp->send_queue)) {
yp->tx_count = (yp->bitrate * yp->txtail) / 8000;
if (yp->dupmode == 2)
yp->tx_count += (yp->bitrate * yp->holdd) / 8;
if (yp->tx_count == 0)
yp->tx_count = 1;
yp->tx_state = TX_TAIL;
} else {
yp->tx_count = 1;
yp->tx_state = TX_HEAD;
}
++yp->stats.tx_packets;
break;
case TX_TAIL:
if (--yp->tx_count <= 0) {
yp->tx_state = TX_OFF;
ptt_off(dev);
}
break;
}
}
/***********************************************************************************
* ISR routine
************************************************************************************/
static irqreturn_t yam_interrupt(int irq, void *dev_id)
{
struct net_device *dev;
struct yam_port *yp;
unsigned char iir;
int counter = 100;
int i;
int handled = 0;
for (i = 0; i < NR_PORTS; i++) {
dev = yam_devs[i];
yp = netdev_priv(dev);
if (!netif_running(dev))
continue;
while ((iir = IIR_MASK & inb(IIR(dev->base_addr))) != IIR_NOPEND) {
unsigned char msr = inb(MSR(dev->base_addr));
unsigned char lsr = inb(LSR(dev->base_addr));
unsigned char rxb;
handled = 1;
if (lsr & LSR_OE)
++yp->stats.rx_fifo_errors;
yp->dcd = (msr & RX_DCD) ? 1 : 0;
if (--counter <= 0) {
printk(KERN_ERR "%s: too many irq iir=%d\n",
dev->name, iir);
goto out;
}
if (msr & TX_RDY) {
++yp->nb_mdint;
yam_tx_byte(dev, yp);
}
if (lsr & LSR_RXC) {
++yp->nb_rxint;
rxb = inb(RBR(dev->base_addr));
if (msr & RX_FLAG)
yam_rx_flag(dev, yp);
else
yam_rx_byte(dev, yp, rxb);
}
}
}
out:
return IRQ_RETVAL(handled);
}
#ifdef CONFIG_PROC_FS
static void *yam_seq_start(struct seq_file *seq, loff_t *pos)
{
return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
}
static void *yam_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
}
static void yam_seq_stop(struct seq_file *seq, void *v)
{
}
static int yam_seq_show(struct seq_file *seq, void *v)
{
struct net_device *dev = v;
const struct yam_port *yp = netdev_priv(dev);
seq_printf(seq, "Device %s\n", dev->name);
seq_printf(seq, " Up %d\n", netif_running(dev));
seq_printf(seq, " Speed %u\n", yp->bitrate);
seq_printf(seq, " IoBase 0x%x\n", yp->iobase);
seq_printf(seq, " BaudRate %u\n", yp->baudrate);
seq_printf(seq, " IRQ %u\n", yp->irq);
seq_printf(seq, " TxState %u\n", yp->tx_state);
seq_printf(seq, " Duplex %u\n", yp->dupmode);
seq_printf(seq, " HoldDly %u\n", yp->holdd);
seq_printf(seq, " TxDelay %u\n", yp->txd);
seq_printf(seq, " TxTail %u\n", yp->txtail);
seq_printf(seq, " SlotTime %u\n", yp->slot);
seq_printf(seq, " Persist %u\n", yp->pers);
seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets);
seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets);
seq_printf(seq, " TxInt %u\n", yp->nb_mdint);
seq_printf(seq, " RxInt %u\n", yp->nb_rxint);
seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors);
seq_printf(seq, "\n");
return 0;
}
static struct seq_operations yam_seqops = {
.start = yam_seq_start,
.next = yam_seq_next,
.stop = yam_seq_stop,
.show = yam_seq_show,
};
static int yam_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &yam_seqops);
}
static const struct file_operations yam_info_fops = {
.owner = THIS_MODULE,
.open = yam_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
/* --------------------------------------------------------------------- */
static struct net_device_stats *yam_get_stats(struct net_device *dev)
{
struct yam_port *yp;
if (!dev)
return NULL;
yp = netdev_priv(dev);
if (yp->magic != YAM_MAGIC)
return NULL;
/*
* Get the current statistics. This may be called with the
* card open or closed.
*/
return &yp->stats;
}
/* --------------------------------------------------------------------- */
static int yam_open(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
enum uart u;
int i;
int ret=0;
printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq);
if (!dev || !yp->bitrate)
return -ENXIO;
if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT ||
dev->irq < 2 || dev->irq > 15) {
return -ENXIO;
}
if (!request_region(dev->base_addr, YAM_EXTENT, dev->name))
{
printk(KERN_ERR "%s: cannot 0x%lx busy\n", dev->name, dev->base_addr);
return -EACCES;
}
if ((u = yam_check_uart(dev->base_addr)) == c_uart_unknown) {
printk(KERN_ERR "%s: cannot find uart type\n", dev->name);
ret = -EIO;
goto out_release_base;
}
if (fpga_download(dev->base_addr, yp->bitrate)) {
printk(KERN_ERR "%s: cannot init FPGA\n", dev->name);
ret = -EIO;
goto out_release_base;
}
outb(0, IER(dev->base_addr));
if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
ret = -EBUSY;
goto out_release_base;
}
yam_set_uart(dev);
netif_start_queue(dev);
yp->slotcnt = yp->slot / 10;
/* Reset overruns for all ports - FPGA programming makes overruns */
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
struct yam_port *yp = netdev_priv(dev);
inb(LSR(dev->base_addr));
yp->stats.rx_fifo_errors = 0;
}
printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq,
uart_str[u]);
return 0;
out_release_base:
release_region(dev->base_addr, YAM_EXTENT);
return ret;
}
/* --------------------------------------------------------------------- */
static int yam_close(struct net_device *dev)
{
struct sk_buff *skb;
struct yam_port *yp = netdev_priv(dev);
if (!dev)
return -EINVAL;
/*
* disable interrupts
*/
outb(0, IER(dev->base_addr));
outb(1, MCR(dev->base_addr));
/* Remove IRQ handler if last */
free_irq(dev->irq,dev);
release_region(dev->base_addr, YAM_EXTENT);
netif_stop_queue(dev);
while ((skb = skb_dequeue(&yp->send_queue)))
dev_kfree_skb(skb);
printk(KERN_INFO "%s: close yam at iobase 0x%lx irq %u\n",
yam_drvname, dev->base_addr, dev->irq);
return 0;
}
/* --------------------------------------------------------------------- */
static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd != SIOCDEVPRIVATE)
return -EINVAL;
switch (ioctl_cmd) {
case SIOCYAMRESERVED:
return -EINVAL; /* unused */
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
return -ENOBUFS;
ym->bitrate = 9600;
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
kfree(ym);
return -EFAULT;
}
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
add_mcs(ym->bits, ym->bitrate);
kfree(ym);
break;
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
yp->iobase = yi.cfg.iobase;
dev->base_addr = yi.cfg.iobase;
}
if (yi.cfg.mask & YAM_IRQ) {
if (yi.cfg.irq > 15)
return -EINVAL;
yp->irq = yi.cfg.irq;
dev->irq = yi.cfg.irq;
}
if (yi.cfg.mask & YAM_BITRATE) {
if (yi.cfg.bitrate > YAM_MAXBITRATE)
return -EINVAL;
yp->bitrate = yi.cfg.bitrate;
}
if (yi.cfg.mask & YAM_BAUDRATE) {
if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
return -EINVAL;
yp->baudrate = yi.cfg.baudrate;
}
if (yi.cfg.mask & YAM_MODE) {
if (yi.cfg.mode > YAM_MAXMODE)
return -EINVAL;
yp->dupmode = yi.cfg.mode;
}
if (yi.cfg.mask & YAM_HOLDDLY) {
if (yi.cfg.holddly > YAM_MAXHOLDDLY)
return -EINVAL;
yp->holdd = yi.cfg.holddly;
}
if (yi.cfg.mask & YAM_TXDELAY) {
if (yi.cfg.txdelay > YAM_MAXTXDELAY)
return -EINVAL;
yp->txd = yi.cfg.txdelay;
}
if (yi.cfg.mask & YAM_TXTAIL) {
if (yi.cfg.txtail > YAM_MAXTXTAIL)
return -EINVAL;
yp->txtail = yi.cfg.txtail;
}
if (yi.cfg.mask & YAM_PERSIST) {
if (yi.cfg.persist > YAM_MAXPERSIST)
return -EINVAL;
yp->pers = yi.cfg.persist;
}
if (yi.cfg.mask & YAM_SLOTTIME) {
if (yi.cfg.slottime > YAM_MAXSLOTTIME)
return -EINVAL;
yp->slot = yi.cfg.slottime;
yp->slotcnt = yp->slot / 10;
}
break;
case SIOCYAMGCFG:
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
yi.cfg.bitrate = yp->bitrate;
yi.cfg.baudrate = yp->baudrate;
yi.cfg.mode = yp->dupmode;
yi.cfg.txdelay = yp->txd;
yi.cfg.holddly = yp->holdd;
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
/* --------------------------------------------------------------------- */
static int yam_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
/* addr is an AX.25 shifted ASCII mac address */
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
/* --------------------------------------------------------------------- */
static void yam_setup(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
yp->magic = YAM_MAGIC;
yp->bitrate = DEFAULT_BITRATE;
yp->baudrate = DEFAULT_BITRATE * 2;
yp->iobase = 0;
yp->irq = 0;
yp->dupmode = 0;
yp->holdd = DEFAULT_HOLDD;
yp->txd = DEFAULT_TXD;
yp->txtail = DEFAULT_TXTAIL;
yp->slot = DEFAULT_SLOT;
yp->pers = DEFAULT_PERS;
yp->dev = dev;
dev->base_addr = yp->iobase;
dev->irq = yp->irq;
dev->open = yam_open;
dev->stop = yam_close;
dev->do_ioctl = yam_ioctl;
dev->hard_start_xmit = yam_send_packet;
dev->get_stats = yam_get_stats;
skb_queue_head_init(&yp->send_queue);
dev->hard_header = ax25_hard_header;
dev->rebuild_header = ax25_rebuild_header;
dev->set_mac_address = yam_set_mac_address;
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->mtu = AX25_MTU;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
}
static int __init yam_init_driver(void)
{
struct net_device *dev;
int i, err;
char name[IFNAMSIZ];
printk(yam_drvinfo);
for (i = 0; i < NR_PORTS; i++) {
sprintf(name, "yam%d", i);
dev = alloc_netdev(sizeof(struct yam_port), name,
yam_setup);
if (!dev) {
printk(KERN_ERR "yam: cannot allocate net device %s\n",
dev->name);
err = -ENOMEM;
goto error;
}
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name);
goto error;
}
yam_devs[i] = dev;
}
yam_timer.function = yam_dotimer;
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
proc_net_fops_create("yam", S_IRUGO, &yam_info_fops);
return 0;
error:
while (--i >= 0) {
unregister_netdev(yam_devs[i]);
free_netdev(yam_devs[i]);
}
return err;
}
/* --------------------------------------------------------------------- */
static void __exit yam_cleanup_driver(void)
{
struct yam_mcs *p;
int i;
del_timer(&yam_timer);
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
while (yam_data) {
p = yam_data;
yam_data = yam_data->next;
kfree(p);
}
proc_net_remove("yam");
}
/* --------------------------------------------------------------------- */
MODULE_AUTHOR("Frederic Rible F1OAT [email protected]");
MODULE_DESCRIPTION("Yam amateur radio modem driver");
MODULE_LICENSE("GPL");
module_init(yam_init_driver);
module_exit(yam_cleanup_driver);
/* --------------------------------------------------------------------- */
|
79013.c | /*-
* BSD LICENSE
*
* Copyright(c) 2010-2013 Tilera Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Tilera Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdarg.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <unistd.h>
#include <inttypes.h>
#include <sys/queue.h>
#include <sys/stat.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_ring.h>
#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
#include "testpmd.h"
/* hardcoded configuration (for now) */
static unsigned cfg_n_flows = 1024;
static unsigned cfg_pkt_size = 300;
static uint32_t cfg_ip_src = IPv4(10, 254, 0, 0);
static uint32_t cfg_ip_dst = IPv4(10, 253, 0, 0);
static uint16_t cfg_udp_src = 1000;
static uint16_t cfg_udp_dst = 1001;
static struct ether_addr cfg_ether_src =
{{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x00 }};
static struct ether_addr cfg_ether_dst =
{{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x01 }};
#define IP_DEFTTL 64 /* from RFC 1340. */
#define IP_VERSION 0x40
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
static inline struct rte_mbuf *
tx_mbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
return (m);
}
static inline uint16_t
ip_sum(const uint16_t *hdr, int hdr_len)
{
uint32_t sum = 0;
while (hdr_len > 1)
{
sum += *hdr++;
if (sum & 0x80000000)
sum = (sum & 0xFFFF) + (sum >> 16);
hdr_len -= 2;
}
while (sum >> 16)
sum = (sum & 0xFFFF) + (sum >> 16);
return ~sum;
}
/*
* Multi-flow generation mode.
*
* We originate a bunch of flows (varying destination IP addresses), and
* terminate receive traffic. Received traffic is simply discarded, but we
* still do so in order to maintain traffic statistics.
*/
static void
pkt_burst_flow_gen(struct fwd_stream *fs)
{
unsigned pkt_size = cfg_pkt_size - 4; /* Adjust FCS */
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mempool *mbp;
struct rte_mbuf *pkt;
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ip_hdr;
struct udp_hdr *udp_hdr;
uint16_t vlan_tci;
uint16_t ol_flags;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t i;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
uint64_t core_cycles;
#endif
static int next_flow = 0;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
start_tsc = rte_rdtsc();
#endif
/* Receive a burst of packets and discard them. */
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
nb_pkt_per_burst);
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
mbp = current_fwd_lcore()->mbp;
vlan_tci = ports[fs->tx_port].tx_vlan_id;
ol_flags = ports[fs->tx_port].tx_ol_flags;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = tx_mbuf_alloc(mbp);
if (!pkt)
break;
pkt->pkt.data_len = pkt_size;
pkt->pkt.next = NULL;
/* Initialize Ethernet header. */
eth_hdr = (struct ether_hdr *)pkt->pkt.data;
ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
/* Initialize IP header. */
ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
memset(ip_hdr, 0, sizeof(*ip_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
ip_hdr->fragment_offset = 0;
ip_hdr->time_to_live = IP_DEFTTL;
ip_hdr->next_proto_id = IPPROTO_UDP;
ip_hdr->packet_id = 0;
ip_hdr->src_addr = rte_cpu_to_be_32(cfg_ip_src);
ip_hdr->dst_addr = rte_cpu_to_be_32(cfg_ip_dst +
next_flow);
ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr));
ip_hdr->hdr_checksum = ip_sum((uint16_t *)ip_hdr,
sizeof(*ip_hdr));
/* Initialize UDP header. */
udp_hdr = (struct udp_hdr *)(ip_hdr + 1);
udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src);
udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst);
udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr) -
sizeof(*ip_hdr));
pkt->pkt.nb_segs = 1;
pkt->pkt.pkt_len = pkt_size;
pkt->ol_flags = ol_flags;
pkt->pkt.vlan_macip.f.vlan_tci = vlan_tci;
pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif
if (unlikely(nb_tx < nb_pkt)) {
/* Back out the flow counter. */
next_flow -= (nb_pkt - nb_tx);
while (next_flow < 0)
next_flow += cfg_n_flows;
do {
rte_pktmbuf_free(pkts_burst[nb_tx]);
} while (++nb_tx < nb_pkt);
}
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
end_tsc = rte_rdtsc();
core_cycles = (end_tsc - start_tsc);
fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
#endif
}
struct fwd_engine flow_gen_engine = {
.fwd_mode_name = "flowgen",
.port_fwd_begin = NULL,
.port_fwd_end = NULL,
.packet_fwd = pkt_burst_flow_gen,
};
|
529725.c | //*********************************************************************************************************************
// All Winner Tech, All Right Reserved. 2014-2015 Copyright (c)
//
// File name : de_peak.c
//
// Description : display engine 2.0 peaking basic function definition
//
// History : 2014/03/27 vito cheng v0.1 Initial version
//
//*********************************************************************************************************************
#include "de_peak_type.h"
#include "de_rtmx.h"
#include "de_enhance.h"
#define PEAK_OFST 0xA6000 //PEAKING offset based on RTMX
static volatile __peak_reg_t *peak_dev[DEVICE_NUM][CHN_NUM];
static de_reg_blocks peak_block[DEVICE_NUM][CHN_NUM];
//*********************************************************************************************************************
// function : de_peak_set_reg_base(unsigned int sel, unsigned int chno, unsigned int base)
// description : set peak reg base
// parameters :
// sel <rtmx select>
// chno <overlay select>
// base <reg base>
// return :
// success
//*********************************************************************************************************************
int de_peak_set_reg_base(unsigned int sel, unsigned int chno, unsigned int base)
{
__inf("sel=%d, chno=%d, base=0x%x\n", sel, chno, base);
peak_dev[sel][chno] = (__peak_reg_t *)base;
return 0;
}
int de_peak_update_regs(unsigned int sel, unsigned int chno)
{
if(peak_block[sel][chno].dirty == 0x1){
memcpy((void *)peak_block[sel][chno].off,peak_block[sel][chno].val,peak_block[sel][chno].size);
peak_block[sel][chno].dirty = 0x0;}
return 0;
}
int de_peak_init(unsigned int sel, unsigned int chno, unsigned int reg_base)
{
unsigned int base;
void *memory;
base = reg_base + (sel+1)*0x00100000 + PEAK_OFST; //FIXME: chno is not considered
__inf("sel %d, peak_base[%d]=0x%x\n", sel, chno, base);
memory = disp_sys_malloc(sizeof(__peak_reg_t));
if(NULL == memory) {
__wrn("malloc peak[%d][%d] memory fail! size=0x%x\n", sel, chno, sizeof(__peak_reg_t));
return -1;
}
peak_block[sel][chno].off = base;
peak_block[sel][chno].val = memory;
peak_block[sel][chno].size = 0x30;
peak_block[sel][chno].dirty = 0;
de_peak_set_reg_base(sel, chno, (unsigned int)memory);
return 0;
}
//*********************************************************************************************************************
// function : de_peak_enable(unsigned int sel, unsigned int chno, unsigned int en)
// description : enable/disable peak
// parameters :
// sel <rtmx select>
// chno <overlay select>
// en <enable: 0-diable; 1-enable>
// return :
// success
//*********************************************************************************************************************
int de_peak_enable(unsigned int sel, unsigned int chno, unsigned int en)
{
__inf("sel=%d, chno=%d, en=%d\n", sel, chno, en);
peak_dev[sel][chno]->ctrl.bits.en = en;
peak_block[sel][chno].dirty = 1;
return 0;
}
//*********************************************************************************************************************
// function : de_peak_set_size(unsigned int sel, unsigned int chno, unsigned int width, unsigned int height)
// description : set peak size
// parameters :
// sel <rtmx select>
// chno <overlay select>
// width <input width>
// height <input height>
// return :
// success
//*********************************************************************************************************************
int de_peak_set_size(unsigned int sel, unsigned int chno, unsigned int width, unsigned int height)
{
peak_dev[sel][chno]->size.bits.width = width - 1;
peak_dev[sel][chno]->size.bits.height = height - 1;
peak_block[sel][chno].dirty = 1;
return 0;
}
//*********************************************************************************************************************
// function : de_peak_set_window(unsigned int sel, unsigned int chno, unsigned int win_enable, de_rect window)
// description : set peak window
// parameters :
// sel <rtmx select>
// chno <overlay select>
// win_enable <enable: 0-window mode diable; 1-window mode enable>
// window <window rectangle>
// return :
// success
//*********************************************************************************************************************
int de_peak_set_window(unsigned int sel, unsigned int chno, unsigned int win_enable, de_rect window)
{
peak_dev[sel][chno]->ctrl.bits.win_en = win_enable;
if(win_enable)
{
peak_dev[sel][chno]->win0.bits.win_left = window.x;
peak_dev[sel][chno]->win0.bits.win_top = window.y;
peak_dev[sel][chno]->win1.bits.win_right = window.x + window.w - 1;
peak_dev[sel][chno]->win1.bits.win_bot = window.y + window.h - 1;
}
peak_block[sel][chno].dirty = 1;
return 0;
}
//*********************************************************************************************************************
// function : de_peak_set_para(unsigned int sel, unsigned int chno, unsigned int gain)
// description : set peak para
// parameters :
// sel <rtmx select>
// chno <overlay select>
// gain <peak gain: normal setting 36-42>
// return :
// success
//*********************************************************************************************************************
int de_peak_set_para(unsigned int sel, unsigned int chno, unsigned int gain)
{
peak_dev[sel][chno]->gain.bits.gain = 36; //gain
peak_dev[sel][chno]->filter.bits.filter_sel = 0;
peak_dev[sel][chno]->filter.bits.hp_ratio = 4;
peak_dev[sel][chno]->filter.bits.bp0_ratio = 12;
peak_dev[sel][chno]->filter.bits.bp1_ratio = 0;
peak_dev[sel][chno]->gainctrl.bits.beta = 0;
peak_dev[sel][chno]->gainctrl.bits.dif_up = 128;
peak_dev[sel][chno]->shootctrl.bits.neg_gain = 31;
peak_dev[sel][chno]->coring.bits.corthr = 4;
peak_block[sel][chno].dirty = 1;
return 0;
}
//*********************************************************************************************************************
// function : de_peak_info2para(unsigned int sharp, de_rect window, __peak_config_data *para)
// description : info->para conversion
// parameters :
// sharp <info from user>
// window <window info>
// para <bsp para>
// return :
// success
//*********************************************************************************************************************
int de_peak_info2para(unsigned int sharp, de_rect window, __peak_config_data *para)
{
//parameters
para->peak_en = (sharp == 1 || sharp == 3)?1:0;
//window
//para->win_en = 1;
//para->win.x = window.x;
//para->win.y = window.y;
//para->win.w = window.w;
//para->win.h = window.h;
return 0;
}
|
407016.c | int printf(const char *, ...);
int tofloat(long double ld) {
float a = ld;
double b = ld;
return printf("%f, %f\n", a, b);
}
int toint(long double ld) {
char a = ld;
unsigned char b = ld;
int c = ld;
unsigned int d = ld;
long e = ld;
unsigned long f = ld;
return printf("%d, %d, %d, %u, %ld, %lu\n", a, b, c, d, e, f);
}
int main(void) {
long double d = 3.14, e = 145260912182745.12486L, f = -972316.70L;
return toint(d) + toint(e) + toint(f)
+ tofloat(d) + tofloat(e) + tofloat(f);
}
|
380819.c | /******************************************************************************
* @file main.c
* @version V3.00
* @brief Demonstrate how to transfer data between USB device and PC through USB HID interface.
* A windows tool is also included in this sample code to connect with a USB device.
*
* @copyright SPDX-License-Identifier: Apache-2.0
* @copyright Copyright (C) 2021 Nuvoton Technology Corp. All rights reserved.
******************************************************************************/
#include <stdio.h>
#include "NuMicro.h"
#include "hid_transfer.h"
#define CRYSTAL_LESS 1
#define TRIM_INIT (SYS_BASE+0x10C)
void SYS_Init(void);
void UART0_Init(void);
void PowerDown(void);
int IsDebugFifoEmpty(void);
void SYS_Init(void)
{
/*---------------------------------------------------------------------------------------------------------*/
/* Init System Clock */
/*---------------------------------------------------------------------------------------------------------*/
/* Enable HIRC clock */
CLK_EnableXtalRC(CLK_PWRCTL_HIRCEN_Msk);
/* Wait for HIRC clock ready */
CLK_WaitClockReady(CLK_STATUS_HIRCSTB_Msk);
/* Select HCLK clock source as HIRC and HCLK clock divider as 1 */
CLK_SetHCLK(CLK_CLKSEL0_HCLKSEL_HIRC, CLK_CLKDIV0_HCLK(1));
/* Set PCLK0 and PCLK1 to HCLK/2 */
CLK->PCLKDIV = (CLK_PCLKDIV_APB0DIV_DIV2 | CLK_PCLKDIV_APB1DIV_DIV2);
/* Enable all GPIO clock */
CLK->AHBCLK0 |= CLK_AHBCLK0_GPACKEN_Msk | CLK_AHBCLK0_GPBCKEN_Msk | CLK_AHBCLK0_GPCCKEN_Msk | CLK_AHBCLK0_GPDCKEN_Msk |
CLK_AHBCLK0_GPECKEN_Msk | CLK_AHBCLK0_GPFCKEN_Msk | CLK_AHBCLK0_GPGCKEN_Msk | CLK_AHBCLK0_GPHCKEN_Msk;
CLK->AHBCLK1 |= CLK_AHBCLK1_GPICKEN_Msk | CLK_AHBCLK1_GPJCKEN_Msk;
#if (!CRYSTAL_LESS)
/* Enable HXT clock */
CLK_EnableXtalRC(CLK_PWRCTL_HXTEN_Msk);
/* Wait for HXT clock ready */
CLK_WaitClockReady(CLK_STATUS_HXTSTB_Msk);
/* Set core clock to 192MHz */
CLK_SetCoreClock(FREQ_192MHZ);
/* Select USB clock source as PLL/2 and USB clock divider as 2 */
CLK_SetModuleClock(USBD_MODULE, CLK_CLKSEL0_USBSEL_PLL_DIV2, CLK_CLKDIV0_USB(2));
#else
/* Enable HIRC48M clock */
CLK_EnableXtalRC(CLK_PWRCTL_HIRC48MEN_Msk);
/* Waiting for HIRC48M clock ready */
CLK_WaitClockReady(CLK_STATUS_HIRC48MSTB_Msk);
/* Set core clock to 192MHz */
CLK_SetCoreClock(FREQ_192MHZ);
/* Select USB clock source as HIRC48M and USB clock divider as 1 */
CLK_SetModuleClock(USBD_MODULE, CLK_CLKSEL0_USBSEL_HIRC48M, CLK_CLKDIV0_USB(1));
#endif
/* Enable UART0 module clock */
CLK_EnableModuleClock(UART0_MODULE);
/* Select UART0 module clock source as HIRC and UART0 module clock divider as 1 */
CLK_SetModuleClock(UART0_MODULE, CLK_CLKSEL1_UART0SEL_HIRC, CLK_CLKDIV0_UART0(1));
/* Select USBD */
SYS->USBPHY = (SYS->USBPHY & ~SYS_USBPHY_USBROLE_Msk) | SYS_USBPHY_USBEN_Msk | SYS_USBPHY_SBO_Msk;
/* Enable USBD module clock */
CLK_EnableModuleClock(USBD_MODULE);
/*---------------------------------------------------------------------------------------------------------*/
/* Init I/O Multi-function */
/*---------------------------------------------------------------------------------------------------------*/
/* Set multi-function pins for UART0 RXD and TXD */
SET_UART0_RXD_PB12();
SET_UART0_TXD_PB13();
/* USBD multi-function pins for VBUS, D+, D-, and ID pins */
SET_USB_VBUS_PA12();
SET_USB_D_N_PA13();
SET_USB_D_P_PA14();
SET_USB_OTG_ID_PA15();
}
void UART0_Init(void)
{
/*---------------------------------------------------------------------------------------------------------*/
/* Init UART */
/*---------------------------------------------------------------------------------------------------------*/
/* Reset UART */
SYS_ResetModule(UART0_RST);
/* Configure UART and set UART Baudrate */
UART_Open(UART0, 115200);
}
void PowerDown(void)
{
uint32_t u32TimeOutCnt;
/* Unlock protected registers */
SYS_UnlockReg();
printf("Enter power down ...\n");
u32TimeOutCnt = SystemCoreClock; /* 1 second time-out */
while(!IsDebugFifoEmpty())
if(--u32TimeOutCnt == 0) break;
/* Wakeup Enable */
USBD_ENABLE_INT(USBD_INTEN_WKEN_Msk);
CLK_PowerDown();
/* Clear PWR_DOWN_EN if it is not clear by itself */
if(CLK->PWRCTL & CLK_PWRCTL_PDEN_Msk)
CLK->PWRCTL ^= CLK_PWRCTL_PDEN_Msk;
printf("device wakeup!\n");
/* Lock protected registers */
SYS_LockReg();
}
/*---------------------------------------------------------------------------------------------------------*/
/* Main Function */
/*---------------------------------------------------------------------------------------------------------*/
int32_t main(void)
{
#if CRYSTAL_LESS
uint32_t u32TrimInit;
#endif
/* Unlock protected registers */
SYS_UnlockReg();
/* Init System, peripheral clock and multi-function I/O */
SYS_Init();
/* Init UART for printf */
UART0_Init();
printf("NuMicro USB HID Transfer via Control Transfer\n");
printf("Windows tool will Read and Write one pair of reports(periodic exchanges of reports).\n");
USBD_Open(&gsInfo, HID_ClassRequest, NULL);
/* Endpoint configuration */
HID_Init();
USBD_Start();
NVIC_EnableIRQ(USBD_IRQn);
#if CRYSTAL_LESS
/* Backup default trim */
u32TrimInit = M32(TRIM_INIT);
#endif
/* Clear SOF */
USBD->INTSTS = USBD_INTSTS_SOFIF_Msk;
while(1)
{
#if CRYSTAL_LESS
/* Start USB trim if it is not enabled. */
if((SYS->HIRCTCTL & SYS_HIRCTCTL_FREQSEL_Msk) != 1)
{
/* Start USB trim only when SOF */
if(USBD->INTSTS & USBD_INTSTS_SOFIF_Msk)
{
/* Clear SOF */
USBD->INTSTS = USBD_INTSTS_SOFIF_Msk;
/* Re-enable crystal-less */
SYS->HIRCTCTL = 0x01;
SYS->HIRCTCTL |= SYS_HIRCTCTL_REFCKSEL_Msk | SYS_HIRCTCTL_BOUNDEN_Msk | (8 << SYS_HIRCTCTL_BOUNDARY_Pos);
}
}
/* Disable USB Trim when error */
if(SYS->HIRCTISTS & (SYS_HIRCTISTS_CLKERRIF_Msk | SYS_HIRCTISTS_TFAILIF_Msk))
{
/* Init TRIM */
M32(TRIM_INIT) = u32TrimInit;
/* Disable crystal-less */
SYS->HIRCTCTL = 0;
/* Clear error flags */
SYS->HIRCTISTS = SYS_HIRCTISTS_CLKERRIF_Msk | SYS_HIRCTISTS_TFAILIF_Msk;
/* Clear SOF */
USBD->INTSTS = USBD_INTSTS_SOFIF_Msk;
}
#endif
/* Enter power down when USB suspend */
if(g_u8Suspend)
PowerDown();
}
}
|
420750.c | #include "ct-head/test.h"
#include "geom/circle.h"
//extern CT_IArea __ct_iarea_circle2;
//extern CT_ICircumference __ct_icircumference_circle2;
CT_TEST_DECLS
int test_circle() {
CT_Circle2f c, c2;
ct_circle2f_initr(&c, 1);
ct_circle2f_initr(&c2, 2);
CT_IS(CT_PI == ct_circle2f_area(&c), "area c");
CT_IS(CT_PI * 4 == ct_circle2f_area(&c2), "area c2");
CT_IS(CT_TAU == ct_circle2f_circumference(&c), "circum c");
CT_IS(CT_TAU * 2 == ct_circle2f_circumference(&c2), "circum c2");
CT_Vec2f p = {.x = 0, .y = 0};
CT_IS(1 == ct_circle2f_classify_point(&c, &p), "classify 1");
CT_IS(0 == ct_circle2f_classify_point(&c, ct_set2fxy(&p, 1, 0)),
"classify 0");
CT_IS(-1 == ct_circle2f_classify_point(&c, ct_set2fxy(&p, 1.001f, 0)),
"classify -1");
return 0;
}
|
660502.c | /* SPDX-License-Identifier: MIT */
/*
* Simple test case showing using send and recv through io_uring
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <pthread.h>
#include "liburing.h"
static char str[] = "This is a test of send and recv over io_uring!";
#define MAX_MSG 128
#define PORT 10200
#define HOST "127.0.0.1"
#if 0
# define io_uring_prep_send io_uring_prep_write
# define io_uring_prep_recv io_uring_prep_read
#endif
static int recv_prep(struct io_uring *ring, struct iovec *iov, int *sock,
int registerfiles)
{
struct sockaddr_in saddr;
struct io_uring_sqe *sqe;
int sockfd, ret, val, use_fd;
memset(&saddr, 0, sizeof(saddr));
saddr.sin_family = AF_INET;
saddr.sin_addr.s_addr = htonl(INADDR_ANY);
saddr.sin_port = htons(PORT);
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd < 0) {
perror("socket");
return 1;
}
val = 1;
setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
ret = bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr));
if (ret < 0) {
perror("bind");
goto err;
}
if (registerfiles) {
ret = io_uring_register_files(ring, &sockfd, 1);
if (ret) {
fprintf(stderr, "file reg failed\n");
goto err;
}
use_fd = 0;
} else {
use_fd = sockfd;
}
sqe = io_uring_get_sqe(ring);
io_uring_prep_recv(sqe, use_fd, iov->iov_base, iov->iov_len, 0);
if (registerfiles)
sqe->flags |= IOSQE_FIXED_FILE;
sqe->user_data = 2;
ret = io_uring_submit(ring);
if (ret <= 0) {
fprintf(stderr, "submit failed: %d\n", ret);
goto err;
}
*sock = sockfd;
return 0;
err:
close(sockfd);
return 1;
}
static int do_recv(struct io_uring *ring, struct iovec *iov)
{
struct io_uring_cqe *cqe;
int ret;
ret = io_uring_wait_cqe(ring, &cqe);
if (ret) {
fprintf(stdout, "wait_cqe: %d\n", ret);
goto err;
}
if (cqe->res == -EINVAL) {
fprintf(stdout, "recv not supported, skipping\n");
return 0;
}
if (cqe->res < 0) {
fprintf(stderr, "failed cqe: %d\n", cqe->res);
goto err;
}
if (cqe->res -1 != strlen(str)) {
fprintf(stderr, "got wrong length: %d/%d\n", cqe->res,
(int) strlen(str) + 1);
goto err;
}
if (strcmp(str, iov->iov_base)) {
fprintf(stderr, "string mismatch\n");
goto err;
}
return 0;
err:
return 1;
}
struct recv_data {
pthread_mutex_t mutex;
int use_sqthread;
int registerfiles;
};
static void *recv_fn(void *data)
{
struct recv_data *rd = data;
char buf[MAX_MSG + 1];
struct iovec iov = {
.iov_base = buf,
.iov_len = sizeof(buf) - 1,
};
struct io_uring_params p = { };
struct io_uring ring;
int ret, sock;
if (rd->use_sqthread)
p.flags = IORING_SETUP_SQPOLL;
ret = io_uring_queue_init_params(1, &ring, &p);
if (ret) {
if (rd->use_sqthread && geteuid()) {
fprintf(stdout, "Skipping SQPOLL variant\n");
pthread_mutex_unlock(&rd->mutex);
ret = 0;
goto err;
}
fprintf(stderr, "queue init failed: %d\n", ret);
goto err;
}
if (rd->use_sqthread && !rd->registerfiles) {
if (!(p.features & IORING_FEAT_SQPOLL_NONFIXED)) {
fprintf(stdout, "Non-registered SQPOLL not available, skipping\n");
pthread_mutex_unlock(&rd->mutex);
goto err;
}
}
ret = recv_prep(&ring, &iov, &sock, rd->registerfiles);
if (ret) {
fprintf(stderr, "recv_prep failed: %d\n", ret);
goto err;
}
pthread_mutex_unlock(&rd->mutex);
ret = do_recv(&ring, &iov);
close(sock);
io_uring_queue_exit(&ring);
err:
return (void *)(intptr_t)ret;
}
static int do_send(void)
{
struct sockaddr_in saddr;
struct iovec iov = {
.iov_base = str,
.iov_len = sizeof(str),
};
struct io_uring ring;
struct io_uring_cqe *cqe;
struct io_uring_sqe *sqe;
int sockfd, ret;
ret = io_uring_queue_init(1, &ring, 0);
if (ret) {
fprintf(stderr, "queue init failed: %d\n", ret);
return 1;
}
memset(&saddr, 0, sizeof(saddr));
saddr.sin_family = AF_INET;
saddr.sin_port = htons(PORT);
inet_pton(AF_INET, HOST, &saddr.sin_addr);
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd < 0) {
perror("socket");
return 1;
}
ret = connect(sockfd, &saddr, sizeof(saddr));
if (ret < 0) {
perror("connect");
return 1;
}
sqe = io_uring_get_sqe(&ring);
io_uring_prep_send(sqe, sockfd, iov.iov_base, iov.iov_len, 0);
sqe->user_data = 1;
ret = io_uring_submit(&ring);
if (ret <= 0) {
fprintf(stderr, "submit failed: %d\n", ret);
goto err;
}
ret = io_uring_wait_cqe(&ring, &cqe);
if (cqe->res == -EINVAL) {
fprintf(stdout, "send not supported, skipping\n");
close(sockfd);
return 0;
}
if (cqe->res != iov.iov_len) {
fprintf(stderr, "failed cqe: %d\n", cqe->res);
goto err;
}
close(sockfd);
return 0;
err:
close(sockfd);
return 1;
}
static int test(int use_sqthread, int regfiles)
{
pthread_mutexattr_t attr;
pthread_t recv_thread;
struct recv_data rd;
int ret;
void *retval;
pthread_mutexattr_init(&attr);
pthread_mutexattr_setpshared(&attr, 1);
pthread_mutex_init(&rd.mutex, &attr);
pthread_mutex_lock(&rd.mutex);
rd.use_sqthread = use_sqthread;
rd.registerfiles = regfiles;
ret = pthread_create(&recv_thread, NULL, recv_fn, &rd);
if (ret) {
fprintf(stderr, "Thread create failed: %d\n", ret);
return 1;
}
pthread_mutex_lock(&rd.mutex);
do_send();
pthread_join(recv_thread, &retval);
return (int)(intptr_t)retval;
}
int main(int argc, char *argv[])
{
int ret;
if (argc > 1)
return 0;
ret = test(0, 0);
if (ret) {
fprintf(stderr, "test sqthread=0 failed\n");
return ret;
}
ret = test(1, 1);
if (ret) {
fprintf(stderr, "test sqthread=1 reg=1 failed\n");
return ret;
}
ret = test(1, 0);
if (ret) {
fprintf(stderr, "test sqthread=1 reg=0 failed\n");
return ret;
}
return 0;
}
|
346991.c | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE194_Unexpected_Sign_Extension__fscanf_memmove_51a.c
Label Definition File: CWE194_Unexpected_Sign_Extension.label.xml
Template File: sources-sink-51a.tmpl.c
*/
/*
* @description
* CWE: 194 Unexpected Sign Extension
* BadSource: fscanf Read data from the console using fscanf()
* GoodSource: Positive integer
* Sink: memmove
* BadSink : Copy strings using memmove() with the length of data
* Flow Variant: 51 Data flow: data passed as an argument from one function to another in different source files
*
* */
#include "std_testcase.h"
#ifndef OMITBAD
/* bad function declaration */
void CWE194_Unexpected_Sign_Extension__fscanf_memmove_51b_badSink(short data);
void CWE194_Unexpected_Sign_Extension__fscanf_memmove_51_bad()
{
short data;
/* Initialize data */
data = 0;
/* FLAW: Use a value input from the console using fscanf() */
fscanf (stdin, "%hd", &data);
CWE194_Unexpected_Sign_Extension__fscanf_memmove_51b_badSink(data);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good function declarations */
void CWE194_Unexpected_Sign_Extension__fscanf_memmove_51b_goodG2BSink(short data);
/* goodG2B uses the GoodSource with the BadSink */
static void goodG2B()
{
short data;
/* Initialize data */
data = 0;
/* FIX: Use a positive integer less than &InitialDataSize&*/
data = 100-1;
CWE194_Unexpected_Sign_Extension__fscanf_memmove_51b_goodG2BSink(data);
}
void CWE194_Unexpected_Sign_Extension__fscanf_memmove_51_good()
{
goodG2B();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE194_Unexpected_Sign_Extension__fscanf_memmove_51_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE194_Unexpected_Sign_Extension__fscanf_memmove_51_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
647244.c | /*-
* Copyright (c) 1999-2002, 2007 Robert N. M. Watson
* Copyright (c) 2001-2003 Networks Associates Technology, Inc.
* All rights reserved.
*
* This software was developed by Robert Watson for the TrustedBSD Project.
*
* This software was developed for the FreeBSD Project in part by Network
* Associates Laboratories, the Security Research Division of Network
* Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
* as part of the DARPA CHATS research program.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Developed by the TrustedBSD Project.
*
* Sample policy implementing no entry points; for performance measurement
* purposes only. If you're looking for a stub policy to base new policies
* on, try mac_stub.
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <security/mac/mac_policy.h>
static struct mac_policy_ops none_ops =
{
};
MAC_POLICY_SET(&none_ops, mac_none, "TrustedBSD MAC/None",
MPC_LOADTIME_FLAG_UNLOADOK, NULL);
|
40591.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <gsl/gsl_linalg.h>
int DEBUG = 1;
int DEBUG_SOLUTION = 1;
int STORE_TIMING = 0;
FILE* decompFile;
FILE* solveFile;
double* generateVector(int size)
{
double* vec = calloc(sizeof(double), size);
for (int i = 0; i < size; ++i){
double val = (double)rand() / RAND_MAX; // double [0, 1]
vec[i] = val;
}
return vec;
}
void printMatrix(double* m, int size)
{
for (int i = 0; i < size; ++i){
for (int j = 0; j < size; ++j){
printf("%g ", m[j + i * size]);
}
printf("\n");
}
printf("\n");
}
void printVector(double* v, int size)
{
for (int i = 0; i < size; ++i){
printf("%g", v[i]);
printf("\n");
}
printf("\n");
}
double* copyVector(double* src, int size)
{
double* vec = calloc(sizeof(double), size);
for (int i = 0; i < size; ++i){
vec[i] = src[i];
}
return vec;
}
void checkSolution(double* m, double* b, gsl_vector x, int size)
{
const double epsilon = 0.0001;
int failures = 0;
for (int i = 0; i < size; ++i){
double sum = 0;
for (int j = 0; j < size; ++j){
sum += m[j + i * size] * x.data[j];
// printf("%g * %g = %g\n", m[j + i * size], x.data[j], m[j + i * size] * x.data[j]);
}
if (abs(sum - b[i]) > epsilon){
failures++;
if (DEBUG_SOLUTION){
printf("Wrong solution was calculated: %g --- %g\n", sum, b[i]);
printf("\n");
}
}
}
if (DEBUG_SOLUTION){
if (failures == 0){
printf("All calculated values were correct");
} else {
printf("Total %n errors in calculated value", failures);
}
printf("\n");
}
}
void decomp(gsl_matrix *A, gsl_permutation* p, int *signum, int size)
{
clock_t start = clock();
gsl_linalg_LU_decomp(A, p, signum);
clock_t end = clock();
double t = ((double) (end - start)) / CLOCKS_PER_SEC;
if (STORE_TIMING) fprintf (decompFile,"%d %g\n", size, t);
}
void solve(const gsl_matrix* LU, const gsl_permutation* p, const gsl_vector* b, gsl_vector* x, int size)
{
clock_t start = clock();
gsl_linalg_LU_solve(LU, p, b, x);
clock_t end = clock();
double t = ((double) (end - start)) / CLOCKS_PER_SEC;
if (STORE_TIMING) fprintf (solveFile,"%d %g\n", size, t);
}
void calculate(int n)
{
double* a_data = generateVector(n * n);
double* b_data = generateVector(n);
double* originalMatrix = copyVector(a_data, n * n);
gsl_matrix_view m = gsl_matrix_view_array(a_data, n, n);
gsl_vector_view b = gsl_vector_view_array(b_data, n);
if (DEBUG){
printf("Matrix M:\n");
printMatrix(a_data, n);
printf("Vector B:\n");
printVector(b_data, n);
}
gsl_vector *x = gsl_vector_alloc(n);
int s;
gsl_permutation *p = gsl_permutation_alloc(n);
decomp(&m.matrix, p, &s, n);
solve(&m.matrix, p, &b.vector, x, n);
if (DEBUG){
printf("Solution X:\n");
gsl_vector_fprintf(stdout, x, "%g");
printf("\n");
}
checkSolution(originalMatrix, b_data, *x, n);
gsl_permutation_free(p);
gsl_vector_free(x);
}
int main(int argc, char* argv[])
{
if (argc != 2){
return -1;
}
char *pEnd;
int n = strtol(argv[1], &pEnd, 0);
srand(time(NULL));
decompFile = fopen("decomp.txt", "w");
solveFile = fopen("solve.txt", "w");
if (n == 0){
return -1;
} else if (n < 0){
DEBUG = 0;
DEBUG_SOLUTION = 0;
STORE_TIMING = 1;
for (int i = 10; i <= 1000; i+=10){
calculate(i);
}
} else {
calculate(n);
}
return 0;
} |
86419.c | /*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
* This file is released under the GPL.
*/
#include "dm-zoned.h"
#include <linux/module.h>
#include <linux/crc32.h>
#define DM_MSG_PREFIX "zoned metadata"
/*
* Metadata version.
*/
#define DMZ_META_VER 1
/*
* On-disk super block magic.
*/
#define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
(((unsigned int)('Z')) << 16) | \
(((unsigned int)('B')) << 8) | \
((unsigned int)('D')))
/*
* On disk super block.
* This uses only 512 B but uses on disk a full 4KB block. This block is
* followed on disk by the mapping table of chunks to zones and the bitmap
* blocks indicating zone block validity.
* The overall resulting metadata format is:
* (1) Super block (1 block)
* (2) Chunk mapping table (nr_map_blocks)
* (3) Bitmap blocks (nr_bitmap_blocks)
* All metadata blocks are stored in conventional zones, starting from the
* the first conventional zone found on disk.
*/
struct dmz_super {
/* Magic number */
__le32 magic; /* 4 */
/* Metadata version number */
__le32 version; /* 8 */
/* Generation number */
__le64 gen; /* 16 */
/* This block number */
__le64 sb_block; /* 24 */
/* The number of metadata blocks, including this super block */
__le32 nr_meta_blocks; /* 28 */
/* The number of sequential zones reserved for reclaim */
__le32 nr_reserved_seq; /* 32 */
/* The number of entries in the mapping table */
__le32 nr_chunks; /* 36 */
/* The number of blocks used for the chunk mapping table */
__le32 nr_map_blocks; /* 40 */
/* The number of blocks used for the block bitmaps */
__le32 nr_bitmap_blocks; /* 44 */
/* Checksum */
__le32 crc; /* 48 */
/* Padding to full 512B sector */
u8 reserved[464]; /* 512 */
};
/*
* Chunk mapping entry: entries are indexed by chunk number
* and give the zone ID (dzone_id) mapping the chunk on disk.
* This zone may be sequential or random. If it is a sequential
* zone, a second zone (bzone_id) used as a write buffer may
* also be specified. This second zone will always be a randomly
* writeable zone.
*/
struct dmz_map {
__le32 dzone_id;
__le32 bzone_id;
};
/*
* Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
*/
#define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
#define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
#define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
#define DMZ_MAP_UNMAPPED UINT_MAX
/*
* Meta data block descriptor (for cached metadata blocks).
*/
struct dmz_mblock {
struct rb_node node;
struct list_head link;
sector_t no;
unsigned int ref;
unsigned long state;
struct page *page;
void *data;
};
/*
* Metadata block state flags.
*/
enum {
DMZ_META_DIRTY,
DMZ_META_READING,
DMZ_META_WRITING,
DMZ_META_ERROR,
};
/*
* Super block information (one per metadata set).
*/
struct dmz_sb {
sector_t block;
struct dmz_mblock *mblk;
struct dmz_super *sb;
};
/*
* In-memory metadata.
*/
struct dmz_metadata {
struct dmz_dev *dev;
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
unsigned int nr_useable_zones;
unsigned int nr_meta_blocks;
unsigned int nr_meta_zones;
unsigned int nr_data_zones;
unsigned int nr_rnd_zones;
unsigned int nr_reserved_seq;
unsigned int nr_chunks;
/* Zone information array */
struct dm_zone *zones;
struct dm_zone *sb_zone;
struct dmz_sb sb[2];
unsigned int mblk_primary;
u64 sb_gen;
unsigned int min_nr_mblks;
unsigned int max_nr_mblks;
atomic_t nr_mblks;
struct rw_semaphore mblk_sem;
struct mutex mblk_flush_lock;
spinlock_t mblk_lock;
struct rb_root mblk_rbtree;
struct list_head mblk_lru_list;
struct list_head mblk_dirty_list;
struct shrinker mblk_shrinker;
/* Zone allocation management */
struct mutex map_lock;
struct dmz_mblock **map_mblk;
unsigned int nr_rnd;
atomic_t unmap_nr_rnd;
struct list_head unmap_rnd_list;
struct list_head map_rnd_list;
unsigned int nr_seq;
atomic_t unmap_nr_seq;
struct list_head unmap_seq_list;
struct list_head map_seq_list;
atomic_t nr_reserved_seq_zones;
struct list_head reserved_seq_zones_list;
wait_queue_head_t free_wq;
};
/*
* Various accessors
*/
unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
{
return ((unsigned int)(zone - zmd->zones));
}
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
{
return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
}
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
{
return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
}
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
{
return zmd->nr_chunks;
}
unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
{
return zmd->nr_rnd;
}
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
{
return atomic_read(&zmd->unmap_nr_rnd);
}
/*
* Lock/unlock mapping table.
* The map lock also protects all the zone lists.
*/
void dmz_lock_map(struct dmz_metadata *zmd)
{
mutex_lock(&zmd->map_lock);
}
void dmz_unlock_map(struct dmz_metadata *zmd)
{
mutex_unlock(&zmd->map_lock);
}
/*
* Lock/unlock metadata access. This is a "read" lock on a semaphore
* that prevents metadata flush from running while metadata are being
* modified. The actual metadata write mutual exclusion is achieved with
* the map lock and zone styate management (active and reclaim state are
* mutually exclusive).
*/
void dmz_lock_metadata(struct dmz_metadata *zmd)
{
down_read(&zmd->mblk_sem);
}
void dmz_unlock_metadata(struct dmz_metadata *zmd)
{
up_read(&zmd->mblk_sem);
}
/*
* Lock/unlock flush: prevent concurrent executions
* of dmz_flush_metadata as well as metadata modification in reclaim
* while flush is being executed.
*/
void dmz_lock_flush(struct dmz_metadata *zmd)
{
mutex_lock(&zmd->mblk_flush_lock);
}
void dmz_unlock_flush(struct dmz_metadata *zmd)
{
mutex_unlock(&zmd->mblk_flush_lock);
}
/*
* Allocate a metadata block.
*/
static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct dmz_mblock *mblk = NULL;
/* See if we can reuse cached blocks */
if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
spin_lock(&zmd->mblk_lock);
mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
struct dmz_mblock, link);
if (mblk) {
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
mblk->no = mblk_no;
}
spin_unlock(&zmd->mblk_lock);
if (mblk)
return mblk;
}
/* Allocate a new block */
mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
if (!mblk)
return NULL;
mblk->page = alloc_page(GFP_NOIO);
if (!mblk->page) {
kfree(mblk);
return NULL;
}
RB_CLEAR_NODE(&mblk->node);
INIT_LIST_HEAD(&mblk->link);
mblk->ref = 0;
mblk->state = 0;
mblk->no = mblk_no;
mblk->data = page_address(mblk->page);
atomic_inc(&zmd->nr_mblks);
return mblk;
}
/*
* Free a metadata block.
*/
static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
{
__free_pages(mblk->page, 0);
kfree(mblk);
atomic_dec(&zmd->nr_mblks);
}
/*
* Insert a metadata block in the rbtree.
*/
static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
{
struct rb_root *root = &zmd->mblk_rbtree;
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct dmz_mblock *b;
/* Figure out where to put the new node */
while (*new) {
b = container_of(*new, struct dmz_mblock, node);
parent = *new;
new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
}
/* Add new node and rebalance tree */
rb_link_node(&mblk->node, parent, new);
rb_insert_color(&mblk->node, root);
}
/*
* Lookup a metadata block in the rbtree. If the block is found, increment
* its reference count.
*/
static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct rb_root *root = &zmd->mblk_rbtree;
struct rb_node *node = root->rb_node;
struct dmz_mblock *mblk;
while (node) {
mblk = container_of(node, struct dmz_mblock, node);
if (mblk->no == mblk_no) {
/*
* If this is the first reference to the block,
* remove it from the LRU list.
*/
mblk->ref++;
if (mblk->ref == 1 &&
!test_bit(DMZ_META_DIRTY, &mblk->state))
list_del_init(&mblk->link);
return mblk;
}
node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
}
return NULL;
}
/*
* Metadata block BIO end callback.
*/
static void dmz_mblock_bio_end_io(struct bio *bio)
{
struct dmz_mblock *mblk = bio->bi_private;
int flag;
if (bio->bi_status)
set_bit(DMZ_META_ERROR, &mblk->state);
if (bio_op(bio) == REQ_OP_WRITE)
flag = DMZ_META_WRITING;
else
flag = DMZ_META_READING;
clear_bit_unlock(flag, &mblk->state);
smp_mb__after_atomic();
wake_up_bit(&mblk->state, flag);
bio_put(bio);
}
/*
* Read an uncached metadata block from disk and add it to the cache.
*/
static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
/* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
return NULL;
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
return NULL;
}
spin_lock(&zmd->mblk_lock);
/*
* Make sure that another context did not start reading
* the block already.
*/
m = dmz_get_mblock_fast(zmd, mblk_no);
if (m) {
spin_unlock(&zmd->mblk_lock);
dmz_free_mblock(zmd, mblk);
bio_put(bio);
return m;
}
mblk->ref++;
set_bit(DMZ_META_READING, &mblk->state);
dmz_insert_mblock(zmd, mblk);
spin_unlock(&zmd->mblk_lock);
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
return mblk;
}
/*
* Free metadata blocks.
*/
static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
unsigned long limit)
{
struct dmz_mblock *mblk;
unsigned long count = 0;
if (!zmd->max_nr_mblks)
return 0;
while (!list_empty(&zmd->mblk_lru_list) &&
atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
count < limit) {
mblk = list_first_entry(&zmd->mblk_lru_list,
struct dmz_mblock, link);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
count++;
}
return count;
}
/*
* For mblock shrinker: get the number of unused metadata blocks in the cache.
*/
static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
return atomic_read(&zmd->nr_mblks);
}
/*
* For mblock shrinker: scan unused metadata blocks and shrink the cache.
*/
static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
unsigned long count;
spin_lock(&zmd->mblk_lock);
count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
spin_unlock(&zmd->mblk_lock);
return count ? count : SHRINK_STOP;
}
/*
* Release a metadata block.
*/
static void dmz_release_mblock(struct dmz_metadata *zmd,
struct dmz_mblock *mblk)
{
if (!mblk)
return;
spin_lock(&zmd->mblk_lock);
mblk->ref--;
if (mblk->ref == 0) {
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
} else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
dmz_shrink_mblock_cache(zmd, 1);
}
}
spin_unlock(&zmd->mblk_lock);
}
/*
* Get a metadata block from the rbtree. If the block
* is not present, read it from disk.
*/
static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct dmz_mblock *mblk;
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
mblk = dmz_get_mblock_fast(zmd, mblk_no);
spin_unlock(&zmd->mblk_lock);
if (!mblk) {
/* Cache miss: read the block from disk */
mblk = dmz_get_mblock_slow(zmd, mblk_no);
if (!mblk)
return ERR_PTR(-ENOMEM);
}
/* Wait for on-going read I/O and check for error */
wait_on_bit_io(&mblk->state, DMZ_META_READING,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
return ERR_PTR(-EIO);
}
return mblk;
}
/*
* Mark a metadata block dirty.
*/
static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
{
spin_lock(&zmd->mblk_lock);
if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
spin_unlock(&zmd->mblk_lock);
}
/*
* Issue a metadata block write BIO.
*/
static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
unsigned int set)
{
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
set_bit(DMZ_META_ERROR, &mblk->state);
return;
}
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
}
/*
* Read/write a metadata block.
*/
static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
struct page *page)
{
struct bio *bio;
int ret;
bio = bio_alloc(GFP_NOIO, 1);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
return ret;
}
/*
* Write super block of the specified metadata set.
*/
static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
{
sector_t block = zmd->sb[set].block;
struct dmz_mblock *mblk = zmd->sb[set].mblk;
struct dmz_super *sb = zmd->sb[set].sb;
u64 sb_gen = zmd->sb_gen + 1;
int ret;
sb->magic = cpu_to_le32(DMZ_MAGIC);
sb->version = cpu_to_le32(DMZ_META_VER);
sb->gen = cpu_to_le64(sb_gen);
sb->sb_block = cpu_to_le64(block);
sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
sb->crc = 0;
sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
if (ret == 0)
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
/*
* Write dirty metadata blocks to the specified set.
*/
static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
struct list_head *write_list,
unsigned int set)
{
struct dmz_mblock *mblk;
struct blk_plug plug;
int ret = 0;
/* Issue writes */
blk_start_plug(&plug);
list_for_each_entry(mblk, write_list, link)
dmz_write_mblock(zmd, mblk, set);
blk_finish_plug(&plug);
/* Wait for completion */
list_for_each_entry(mblk, write_list, link) {
wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
ret = -EIO;
}
}
/* Flush drive cache (this will also sync data) */
if (ret == 0)
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
/*
* Log dirty metadata blocks.
*/
static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
struct list_head *write_list)
{
unsigned int log_set = zmd->mblk_primary ^ 0x1;
int ret;
/* Write dirty blocks to the log */
ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
if (ret)
return ret;
/*
* No error so far: now validate the log by updating the
* log index super block generation.
*/
ret = dmz_write_sb(zmd, log_set);
if (ret)
return ret;
return 0;
}
/*
* Flush dirty metadata blocks.
*/
int dmz_flush_metadata(struct dmz_metadata *zmd)
{
struct dmz_mblock *mblk;
struct list_head write_list;
int ret;
if (WARN_ON(!zmd))
return 0;
INIT_LIST_HEAD(&write_list);
/*
* Make sure that metadata blocks are stable before logging: take
* the write lock on the metadata semaphore to prevent target BIOs
* from modifying metadata.
*/
down_write(&zmd->mblk_sem);
/*
* This is called from the target flush work and reclaim work.
* Concurrent execution is not allowed.
*/
dmz_lock_flush(zmd);
/* Get dirty blocks */
spin_lock(&zmd->mblk_lock);
list_splice_init(&zmd->mblk_dirty_list, &write_list);
spin_unlock(&zmd->mblk_lock);
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
goto out;
}
/*
* The primary metadata set is still clean. Keep it this way until
* all updates are successful in the secondary set. That is, use
* the secondary set as a log.
*/
ret = dmz_log_dirty_mblocks(zmd, &write_list);
if (ret)
goto out;
/*
* The log is on disk. It is now safe to update in place
* in the primary metadata set.
*/
ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
if (ret)
goto out;
ret = dmz_write_sb(zmd, zmd->mblk_primary);
if (ret)
goto out;
while (!list_empty(&write_list)) {
mblk = list_first_entry(&write_list, struct dmz_mblock, link);
list_del_init(&mblk->link);
spin_lock(&zmd->mblk_lock);
clear_bit(DMZ_META_DIRTY, &mblk->state);
if (mblk->ref == 0)
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
spin_unlock(&zmd->mblk_lock);
}
zmd->sb_gen++;
out:
if (ret && !list_empty(&write_list)) {
spin_lock(&zmd->mblk_lock);
list_splice(&write_list, &zmd->mblk_dirty_list);
spin_unlock(&zmd->mblk_lock);
}
dmz_unlock_flush(zmd);
up_write(&zmd->mblk_sem);
return ret;
}
/*
* Check super block.
*/
static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
{
unsigned int nr_meta_zones, nr_data_zones;
struct dmz_dev *dev = zmd->dev;
u32 crc, stored_crc;
u64 gen;
gen = le64_to_cpu(sb->gen);
stored_crc = le32_to_cpu(sb->crc);
sb->crc = 0;
crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
if (crc != stored_crc) {
dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
crc, stored_crc);
return -ENXIO;
}
if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
DMZ_MAGIC, le32_to_cpu(sb->magic));
return -ENXIO;
}
if (le32_to_cpu(sb->version) != DMZ_META_VER) {
dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
DMZ_META_VER, le32_to_cpu(sb->version));
return -ENXIO;
}
nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
>> dev->zone_nr_blocks_shift;
if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
return -ENXIO;
}
if (!le32_to_cpu(sb->nr_reserved_seq) ||
le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
dmz_dev_err(dev, "Invalid number of reserved sequential zones");
return -ENXIO;
}
nr_data_zones = zmd->nr_useable_zones -
(nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
dmz_dev_err(dev, "Invalid number of chunks %u / %u",
le32_to_cpu(sb->nr_chunks), nr_data_zones);
return -ENXIO;
}
/* OK */
zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
zmd->nr_meta_zones = nr_meta_zones;
zmd->nr_data_zones = nr_data_zones;
return 0;
}
/*
* Read the first or second super block from disk.
*/
static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
{
return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
zmd->sb[set].mblk->page);
}
/*
* Determine the position of the secondary super blocks on disk.
* This is used only if a corruption of the primary super block
* is detected.
*/
static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
{
unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
struct dmz_mblock *mblk;
int i;
/* Allocate a block */
mblk = dmz_alloc_mblock(zmd, 0);
if (!mblk)
return -ENOMEM;
zmd->sb[1].mblk = mblk;
zmd->sb[1].sb = mblk->data;
/* Bad first super block: search for the second one */
zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
if (dmz_read_sb(zmd, 1) != 0)
break;
if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
return 0;
zmd->sb[1].block += zone_nr_blocks;
}
dmz_free_mblock(zmd, mblk);
zmd->sb[1].mblk = NULL;
return -EIO;
}
/*
* Read the first or second super block from disk.
*/
static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
{
struct dmz_mblock *mblk;
int ret;
/* Allocate a block */
mblk = dmz_alloc_mblock(zmd, 0);
if (!mblk)
return -ENOMEM;
zmd->sb[set].mblk = mblk;
zmd->sb[set].sb = mblk->data;
/* Read super block */
ret = dmz_read_sb(zmd, set);
if (ret) {
dmz_free_mblock(zmd, mblk);
zmd->sb[set].mblk = NULL;
return ret;
}
return 0;
}
/*
* Recover a metadata set.
*/
static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
{
unsigned int src_set = dst_set ^ 0x1;
struct page *page;
int i, ret;
dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set);
if (dst_set == 0)
zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
else {
zmd->sb[1].block = zmd->sb[0].block +
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
}
page = alloc_page(GFP_NOIO);
if (!page)
return -ENOMEM;
/* Copy metadata blocks */
for (i = 1; i < zmd->nr_meta_blocks; i++) {
ret = dmz_rdwr_block(zmd, REQ_OP_READ,
zmd->sb[src_set].block + i, page);
if (ret)
goto out;
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE,
zmd->sb[dst_set].block + i, page);
if (ret)
goto out;
}
/* Finalize with the super block */
if (!zmd->sb[dst_set].mblk) {
zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
if (!zmd->sb[dst_set].mblk) {
ret = -ENOMEM;
goto out;
}
zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
}
ret = dmz_write_sb(zmd, dst_set);
out:
__free_pages(page, 0);
return ret;
}
/*
* Get super block from disk.
*/
static int dmz_load_sb(struct dmz_metadata *zmd)
{
bool sb_good[2] = {false, false};
u64 sb_gen[2] = {0, 0};
int ret;
/* Read and check the primary super block */
zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
ret = dmz_get_sb(zmd, 0);
if (ret) {
dmz_dev_err(zmd->dev, "Read primary super block failed");
return ret;
}
ret = dmz_check_sb(zmd, zmd->sb[0].sb);
/* Read and check secondary super block */
if (ret == 0) {
sb_good[0] = true;
zmd->sb[1].block = zmd->sb[0].block +
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
ret = dmz_get_sb(zmd, 1);
} else
ret = dmz_lookup_secondary_sb(zmd);
if (ret) {
dmz_dev_err(zmd->dev, "Read secondary super block failed");
return ret;
}
ret = dmz_check_sb(zmd, zmd->sb[1].sb);
if (ret == 0)
sb_good[1] = true;
/* Use highest generation sb first */
if (!sb_good[0] && !sb_good[1]) {
dmz_dev_err(zmd->dev, "No valid super block found");
return -EIO;
}
if (sb_good[0])
sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
else
ret = dmz_recover_mblocks(zmd, 0);
if (sb_good[1])
sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
else
ret = dmz_recover_mblocks(zmd, 1);
if (ret) {
dmz_dev_err(zmd->dev, "Recovery failed");
return -EIO;
}
if (sb_gen[0] >= sb_gen[1]) {
zmd->sb_gen = sb_gen[0];
zmd->mblk_primary = 0;
} else {
zmd->sb_gen = sb_gen[1];
zmd->mblk_primary = 1;
}
dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)",
zmd->mblk_primary, zmd->sb_gen);
return 0;
}
/*
* Initialize a zone descriptor.
*/
static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
struct blk_zone *blkz)
{
struct dmz_dev *dev = zmd->dev;
/* Ignore the eventual last runt (smaller) zone */
if (blkz->len != dev->zone_nr_sectors) {
if (blkz->start + blkz->len == dev->capacity)
return 0;
return -ENXIO;
}
INIT_LIST_HEAD(&zone->link);
atomic_set(&zone->refcount, 0);
zone->chunk = DMZ_MAP_UNMAPPED;
if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
set_bit(DMZ_RND, &zone->flags);
zmd->nr_rnd_zones++;
} else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
set_bit(DMZ_SEQ, &zone->flags);
} else
return -ENXIO;
if (blkz->cond == BLK_ZONE_COND_OFFLINE)
set_bit(DMZ_OFFLINE, &zone->flags);
else if (blkz->cond == BLK_ZONE_COND_READONLY)
set_bit(DMZ_READ_ONLY, &zone->flags);
if (dmz_is_rnd(zone))
zone->wp_block = 0;
else
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
if (!zmd->sb_zone) {
/* Super block zone */
zmd->sb_zone = zone;
}
}
}
return 0;
}
/*
* Free zones descriptors.
*/
static void dmz_drop_zones(struct dmz_metadata *zmd)
{
kfree(zmd->zones);
zmd->zones = NULL;
}
/*
* The size of a zone report in number of zones.
* This results in 4096*64B=256KB report zones commands.
*/
#define DMZ_REPORT_NR_ZONES 4096
/*
* Allocate and initialize zone descriptors using the zone
* information from disk.
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
struct dmz_dev *dev = zmd->dev;
struct dm_zone *zone;
struct blk_zone *blkz;
unsigned int nr_blkz;
sector_t sector = 0;
int i, ret = 0;
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
/* Allocate zone array */
zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
if (!zmd->zones)
return -ENOMEM;
dmz_dev_info(dev, "Using %zu B for zone information",
sizeof(struct dm_zone) * dev->nr_zones);
/* Get zone information */
nr_blkz = DMZ_REPORT_NR_ZONES;
blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
if (!blkz) {
ret = -ENOMEM;
goto out;
}
/*
* Get zone information and initialize zone descriptors.
* At the same time, determine where the super block
* should be: first block of the first randomly writable
* zone.
*/
zone = zmd->zones;
while (sector < dev->capacity) {
/* Get zone information */
nr_blkz = DMZ_REPORT_NR_ZONES;
ret = blkdev_report_zones(dev->bdev, sector, blkz,
&nr_blkz, GFP_KERNEL);
if (ret) {
dmz_dev_err(dev, "Report zones failed %d", ret);
goto out;
}
/* Process report */
for (i = 0; i < nr_blkz; i++) {
ret = dmz_init_zone(zmd, zone, &blkz[i]);
if (ret)
goto out;
sector += dev->zone_nr_sectors;
zone++;
}
}
/* The entire zone configuration of the disk should now be known */
if (sector < dev->capacity) {
dmz_dev_err(dev, "Failed to get correct zone information");
ret = -ENXIO;
}
out:
kfree(blkz);
if (ret)
dmz_drop_zones(zmd);
return ret;
}
/*
* Update a zone information.
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
unsigned int nr_blkz = 1;
struct blk_zone blkz;
int ret;
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
&blkz, &nr_blkz, GFP_NOIO);
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
return ret;
}
clear_bit(DMZ_OFFLINE, &zone->flags);
clear_bit(DMZ_READ_ONLY, &zone->flags);
if (blkz.cond == BLK_ZONE_COND_OFFLINE)
set_bit(DMZ_OFFLINE, &zone->flags);
else if (blkz.cond == BLK_ZONE_COND_READONLY)
set_bit(DMZ_READ_ONLY, &zone->flags);
if (dmz_is_seq(zone))
zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
else
zone->wp_block = 0;
return 0;
}
/*
* Check a zone write pointer position when the zone is marked
* with the sequential write error flag.
*/
static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
struct dm_zone *zone)
{
unsigned int wp = 0;
int ret;
wp = zone->wp_block;
ret = dmz_update_zone(zmd, zone);
if (ret)
return ret;
dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)",
dmz_id(zmd, zone), zone->wp_block, wp);
if (zone->wp_block < wp) {
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
wp - zone->wp_block);
}
return 0;
}
static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
{
return &zmd->zones[zone_id];
}
/*
* Reset a zone write pointer.
*/
static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
int ret;
/*
* Ignore offline zones, read only zones,
* and conventional zones.
*/
if (dmz_is_offline(zone) ||
dmz_is_readonly(zone) ||
dmz_is_rnd(zone))
return 0;
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
struct dmz_dev *dev = zmd->dev;
ret = blkdev_reset_zones(dev->bdev,
dmz_start_sect(zmd, zone),
dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
return ret;
}
}
/* Clear write error bit and rewind write pointer position */
clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
zone->wp_block = 0;
return 0;
}
static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
/*
* Initialize chunk mapping.
*/
static int dmz_load_mapping(struct dmz_metadata *zmd)
{
struct dmz_dev *dev = zmd->dev;
struct dm_zone *dzone, *bzone;
struct dmz_mblock *dmap_mblk = NULL;
struct dmz_map *dmap;
unsigned int i = 0, e = 0, chunk = 0;
unsigned int dzone_id;
unsigned int bzone_id;
/* Metadata block array for the chunk mapping table */
zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
sizeof(struct dmz_mblk *), GFP_KERNEL);
if (!zmd->map_mblk)
return -ENOMEM;
/* Get chunk mapping table blocks and initialize zone mapping */
while (chunk < zmd->nr_chunks) {
if (!dmap_mblk) {
/* Get mapping block */
dmap_mblk = dmz_get_mblock(zmd, i + 1);
if (IS_ERR(dmap_mblk))
return PTR_ERR(dmap_mblk);
zmd->map_mblk[i] = dmap_mblk;
dmap = (struct dmz_map *) dmap_mblk->data;
i++;
e = 0;
}
/* Check data zone */
dzone_id = le32_to_cpu(dmap[e].dzone_id);
if (dzone_id == DMZ_MAP_UNMAPPED)
goto next;
if (dzone_id >= dev->nr_zones) {
dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
chunk, dzone_id);
return -EIO;
}
dzone = dmz_get(zmd, dzone_id);
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = chunk;
dmz_get_zone_weight(zmd, dzone);
if (dmz_is_rnd(dzone))
list_add_tail(&dzone->link, &zmd->map_rnd_list);
else
list_add_tail(&dzone->link, &zmd->map_seq_list);
/* Check buffer zone */
bzone_id = le32_to_cpu(dmap[e].bzone_id);
if (bzone_id == DMZ_MAP_UNMAPPED)
goto next;
if (bzone_id >= dev->nr_zones) {
dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
chunk, bzone_id);
return -EIO;
}
bzone = dmz_get(zmd, bzone_id);
if (!dmz_is_rnd(bzone)) {
dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
chunk, bzone_id);
return -EIO;
}
set_bit(DMZ_DATA, &bzone->flags);
set_bit(DMZ_BUF, &bzone->flags);
bzone->chunk = chunk;
bzone->bzone = dzone;
dzone->bzone = bzone;
dmz_get_zone_weight(zmd, bzone);
list_add_tail(&bzone->link, &zmd->map_rnd_list);
next:
chunk++;
e++;
if (e >= DMZ_MAP_ENTRIES)
dmap_mblk = NULL;
}
/*
* At this point, only meta zones and mapped data zones were
* fully initialized. All remaining zones are unmapped data
* zones. Finish initializing those here.
*/
for (i = 0; i < dev->nr_zones; i++) {
dzone = dmz_get(zmd, i);
if (dmz_is_meta(dzone))
continue;
if (dmz_is_rnd(dzone))
zmd->nr_rnd++;
else
zmd->nr_seq++;
if (dmz_is_data(dzone)) {
/* Already initialized */
continue;
}
/* Unmapped data zone */
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = DMZ_MAP_UNMAPPED;
if (dmz_is_rnd(dzone)) {
list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
atomic_inc(&zmd->unmap_nr_rnd);
} else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
atomic_inc(&zmd->nr_reserved_seq_zones);
zmd->nr_seq--;
} else {
list_add_tail(&dzone->link, &zmd->unmap_seq_list);
atomic_inc(&zmd->unmap_nr_seq);
}
}
return 0;
}
/*
* Set a data chunk mapping.
*/
static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
unsigned int dzone_id, unsigned int bzone_id)
{
struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
dmz_dirty_mblock(zmd, dmap_mblk);
}
/*
* The list of mapped zones is maintained in LRU order.
* This rotates a zone at the end of its map list.
*/
static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
if (list_empty(&zone->link))
return;
list_del_init(&zone->link);
if (dmz_is_seq(zone)) {
/* LRU rotate sequential zone */
list_add_tail(&zone->link, &zmd->map_seq_list);
} else {
/* LRU rotate random zone */
list_add_tail(&zone->link, &zmd->map_rnd_list);
}
}
/*
* The list of mapped random zones is maintained
* in LRU order. This rotates a zone at the end of the list.
*/
static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
__dmz_lru_zone(zmd, zone);
if (zone->bzone)
__dmz_lru_zone(zmd, zone->bzone);
}
/*
* Wait for any zone to be freed.
*/
static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
{
DEFINE_WAIT(wait);
prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
dmz_unlock_map(zmd);
dmz_unlock_metadata(zmd);
io_schedule_timeout(HZ);
dmz_lock_metadata(zmd);
dmz_lock_map(zmd);
finish_wait(&zmd->free_wq, &wait);
}
/*
* Lock a zone for reclaim (set the zone RECLAIM bit).
* Returns false if the zone cannot be locked or if it is already locked
* and 1 otherwise.
*/
int dmz_lock_zone_reclaim(struct dm_zone *zone)
{
/* Active zones cannot be reclaimed */
if (dmz_is_active(zone))
return 0;
return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
}
/*
* Clear a zone reclaim flag.
*/
void dmz_unlock_zone_reclaim(struct dm_zone *zone)
{
WARN_ON(dmz_is_active(zone));
WARN_ON(!dmz_in_reclaim(zone));
clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
smp_mb__after_atomic();
wake_up_bit(&zone->flags, DMZ_RECLAIM);
}
/*
* Wait for a zone reclaim to complete.
*/
static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
{
dmz_unlock_map(zmd);
dmz_unlock_metadata(zmd);
wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
dmz_lock_metadata(zmd);
dmz_lock_map(zmd);
}
/*
* Select a random write zone for reclaim.
*/
static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
{
struct dm_zone *dzone = NULL;
struct dm_zone *zone;
if (list_empty(&zmd->map_rnd_list))
return NULL;
list_for_each_entry(zone, &zmd->map_rnd_list, link) {
if (dmz_is_buf(zone))
dzone = zone->bzone;
else
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
return dzone;
}
return NULL;
}
/*
* Select a buffered sequential zone for reclaim.
*/
static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
{
struct dm_zone *zone;
if (list_empty(&zmd->map_seq_list))
return NULL;
list_for_each_entry(zone, &zmd->map_seq_list, link) {
if (!zone->bzone)
continue;
if (dmz_lock_zone_reclaim(zone))
return zone;
}
return NULL;
}
/*
* Select a zone for reclaim.
*/
struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
{
struct dm_zone *zone;
/*
* Search for a zone candidate to reclaim: 2 cases are possible.
* (1) There is no free sequential zones. Then a random data zone
* cannot be reclaimed. So choose a sequential zone to reclaim so
* that afterward a random zone can be reclaimed.
* (2) At least one free sequential zone is available, then choose
* the oldest random zone (data or buffer) that can be locked.
*/
dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list))
zone = dmz_get_seq_zone_for_reclaim(zmd);
else
zone = dmz_get_rnd_zone_for_reclaim(zmd);
dmz_unlock_map(zmd);
return zone;
}
/*
* Activate a zone (increment its reference count).
*/
void dmz_activate_zone(struct dm_zone *zone)
{
set_bit(DMZ_ACTIVE, &zone->flags);
atomic_inc(&zone->refcount);
}
/*
* Deactivate a zone. This decrement the zone reference counter
* and clears the active state of the zone once the count reaches 0,
* indicating that all BIOs to the zone have completed. Returns
* true if the zone was deactivated.
*/
void dmz_deactivate_zone(struct dm_zone *zone)
{
if (atomic_dec_and_test(&zone->refcount)) {
WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
smp_mb__after_atomic();
}
}
/*
* Get the zone mapping a chunk, if the chunk is mapped already.
* If no mapping exist and the operation is WRITE, a zone is
* allocated and used to map the chunk.
* The zone returned will be set to the active state.
*/
struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
{
struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
unsigned int dzone_id;
struct dm_zone *dzone = NULL;
int ret = 0;
dmz_lock_map(zmd);
again:
/* Get the chunk mapping */
dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
if (dzone_id == DMZ_MAP_UNMAPPED) {
/*
* Read or discard in unmapped chunks are fine. But for
* writes, we need a mapping, so get one.
*/
if (op != REQ_OP_WRITE)
goto out;
/* Alloate a random zone */
dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!dzone) {
dmz_wait_for_free_zones(zmd);
goto again;
}
dmz_map_zone(zmd, dzone, chunk);
} else {
/* The chunk is already mapped: get the mapping zone */
dzone = dmz_get(zmd, dzone_id);
if (dzone->chunk != chunk) {
dzone = ERR_PTR(-EIO);
goto out;
}
/* Repair write pointer if the sequential dzone has error */
if (dmz_seq_write_err(dzone)) {
ret = dmz_handle_seq_write_err(zmd, dzone);
if (ret) {
dzone = ERR_PTR(-EIO);
goto out;
}
clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
}
}
/*
* If the zone is being reclaimed, the chunk mapping may change
* to a different zone. So wait for reclaim and retry. Otherwise,
* activate the zone (this will prevent reclaim from touching it).
*/
if (dmz_in_reclaim(dzone)) {
dmz_wait_for_reclaim(zmd, dzone);
goto again;
}
dmz_activate_zone(dzone);
dmz_lru_zone(zmd, dzone);
out:
dmz_unlock_map(zmd);
return dzone;
}
/*
* Write and discard change the block validity of data zones and their buffer
* zones. Check here that valid blocks are still present. If all blocks are
* invalid, the zones can be unmapped on the fly without waiting for reclaim
* to do it.
*/
void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
{
struct dm_zone *bzone;
dmz_lock_map(zmd);
bzone = dzone->bzone;
if (bzone) {
if (dmz_weight(bzone))
dmz_lru_zone(zmd, bzone);
else {
/* Empty buffer zone: reclaim it */
dmz_unmap_zone(zmd, bzone);
dmz_free_zone(zmd, bzone);
bzone = NULL;
}
}
/* Deactivate the data zone */
dmz_deactivate_zone(dzone);
if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
dmz_lru_zone(zmd, dzone);
else {
/* Unbuffered inactive empty data zone: reclaim it */
dmz_unmap_zone(zmd, dzone);
dmz_free_zone(zmd, dzone);
}
dmz_unlock_map(zmd);
}
/*
* Allocate and map a random zone to buffer a chunk
* already mapped to a sequential zone.
*/
struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
struct dm_zone *dzone)
{
struct dm_zone *bzone;
dmz_lock_map(zmd);
again:
bzone = dzone->bzone;
if (bzone)
goto out;
/* Alloate a random zone */
bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!bzone) {
dmz_wait_for_free_zones(zmd);
goto again;
}
/* Update the chunk mapping */
dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone),
dmz_id(zmd, bzone));
set_bit(DMZ_BUF, &bzone->flags);
bzone->chunk = dzone->chunk;
bzone->bzone = dzone;
dzone->bzone = bzone;
list_add_tail(&bzone->link, &zmd->map_rnd_list);
out:
dmz_unlock_map(zmd);
return bzone;
}
/*
* Get an unmapped (free) zone.
* This must be called with the mapping lock held.
*/
struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
{
struct list_head *list;
struct dm_zone *zone;
if (flags & DMZ_ALLOC_RND)
list = &zmd->unmap_rnd_list;
else
list = &zmd->unmap_seq_list;
again:
if (list_empty(list)) {
/*
* No free zone: if this is for reclaim, allow using the
* reserved sequential zones.
*/
if (!(flags & DMZ_ALLOC_RECLAIM) ||
list_empty(&zmd->reserved_seq_zones_list))
return NULL;
zone = list_first_entry(&zmd->reserved_seq_zones_list,
struct dm_zone, link);
list_del_init(&zone->link);
atomic_dec(&zmd->nr_reserved_seq_zones);
return zone;
}
zone = list_first_entry(list, struct dm_zone, link);
list_del_init(&zone->link);
if (dmz_is_rnd(zone))
atomic_dec(&zmd->unmap_nr_rnd);
else
atomic_dec(&zmd->unmap_nr_seq);
if (dmz_is_offline(zone)) {
dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
zone = NULL;
goto again;
}
return zone;
}
/*
* Free a zone.
* This must be called with the mapping lock held.
*/
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
/* If this is a sequential zone, reset it */
if (dmz_is_seq(zone))
dmz_reset_zone(zmd, zone);
/* Return the zone to its type unmap list */
if (dmz_is_rnd(zone)) {
list_add_tail(&zone->link, &zmd->unmap_rnd_list);
atomic_inc(&zmd->unmap_nr_rnd);
} else if (atomic_read(&zmd->nr_reserved_seq_zones) <
zmd->nr_reserved_seq) {
list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
atomic_inc(&zmd->nr_reserved_seq_zones);
} else {
list_add_tail(&zone->link, &zmd->unmap_seq_list);
atomic_inc(&zmd->unmap_nr_seq);
}
wake_up_all(&zmd->free_wq);
}
/*
* Map a chunk to a zone.
* This must be called with the mapping lock held.
*/
void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
unsigned int chunk)
{
/* Set the chunk mapping */
dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone),
DMZ_MAP_UNMAPPED);
dzone->chunk = chunk;
if (dmz_is_rnd(dzone))
list_add_tail(&dzone->link, &zmd->map_rnd_list);
else
list_add_tail(&dzone->link, &zmd->map_seq_list);
}
/*
* Unmap a zone.
* This must be called with the mapping lock held.
*/
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
unsigned int chunk = zone->chunk;
unsigned int dzone_id;
if (chunk == DMZ_MAP_UNMAPPED) {
/* Already unmapped */
return;
}
if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
/*
* Unmapping the chunk buffer zone: clear only
* the chunk buffer mapping
*/
dzone_id = dmz_id(zmd, zone->bzone);
zone->bzone->bzone = NULL;
zone->bzone = NULL;
} else {
/*
* Unmapping the chunk data zone: the zone must
* not be buffered.
*/
if (WARN_ON(zone->bzone)) {
zone->bzone->bzone = NULL;
zone->bzone = NULL;
}
dzone_id = DMZ_MAP_UNMAPPED;
}
dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
zone->chunk = DMZ_MAP_UNMAPPED;
list_del_init(&zone->link);
}
/*
* Set @nr_bits bits in @bitmap starting from @bit.
* Return the number of bits changed from 0 to 1.
*/
static unsigned int dmz_set_bits(unsigned long *bitmap,
unsigned int bit, unsigned int nr_bits)
{
unsigned long *addr;
unsigned int end = bit + nr_bits;
unsigned int n = 0;
while (bit < end) {
if (((bit & (BITS_PER_LONG - 1)) == 0) &&
((end - bit) >= BITS_PER_LONG)) {
/* Try to set the whole word at once */
addr = bitmap + BIT_WORD(bit);
if (*addr == 0) {
*addr = ULONG_MAX;
n += BITS_PER_LONG;
bit += BITS_PER_LONG;
continue;
}
}
if (!test_and_set_bit(bit, bitmap))
n++;
bit++;
}
return n;
}
/*
* Get the bitmap block storing the bit for chunk_block in zone.
*/
static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
struct dm_zone *zone,
sector_t chunk_block)
{
sector_t bitmap_block = 1 + zmd->nr_map_blocks +
(sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
(chunk_block >> DMZ_BLOCK_SHIFT_BITS);
return dmz_get_mblock(zmd, bitmap_block);
}
/*
* Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
*/
int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
struct dm_zone *to_zone)
{
struct dmz_mblock *from_mblk, *to_mblk;
sector_t chunk_block = 0;
/* Get the zones bitmap blocks */
while (chunk_block < zmd->dev->zone_nr_blocks) {
from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
if (IS_ERR(from_mblk))
return PTR_ERR(from_mblk);
to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
if (IS_ERR(to_mblk)) {
dmz_release_mblock(zmd, from_mblk);
return PTR_ERR(to_mblk);
}
memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
dmz_dirty_mblock(zmd, to_mblk);
dmz_release_mblock(zmd, to_mblk);
dmz_release_mblock(zmd, from_mblk);
chunk_block += DMZ_BLOCK_SIZE_BITS;
}
to_zone->weight = from_zone->weight;
return 0;
}
/*
* Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
* starting from chunk_block.
*/
int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
struct dm_zone *to_zone, sector_t chunk_block)
{
unsigned int nr_blocks;
int ret;
/* Get the zones bitmap blocks */
while (chunk_block < zmd->dev->zone_nr_blocks) {
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
if (ret <= 0)
return ret;
nr_blocks = ret;
ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
}
return 0;
}
/*
* Validate all the blocks in the range [block..block+nr_blocks-1].
*/
int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
struct dmz_mblock *mblk;
unsigned int n = 0;
dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
dmz_id(zmd, zone), (unsigned long long)chunk_block,
nr_blocks);
WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
if (IS_ERR(mblk))
return PTR_ERR(mblk);
/* Set bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
if (count) {
dmz_dirty_mblock(zmd, mblk);
n += count;
}
dmz_release_mblock(zmd, mblk);
nr_blocks -= nr_bits;
chunk_block += nr_bits;
}
if (likely(zone->weight + n <= zone_nr_blocks))
zone->weight += n;
else {
dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
dmz_id(zmd, zone), zone->weight,
zone_nr_blocks - n);
zone->weight = zone_nr_blocks;
}
return 0;
}
/*
* Clear nr_bits bits in bitmap starting from bit.
* Return the number of bits cleared.
*/
static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
{
unsigned long *addr;
int end = bit + nr_bits;
int n = 0;
while (bit < end) {
if (((bit & (BITS_PER_LONG - 1)) == 0) &&
((end - bit) >= BITS_PER_LONG)) {
/* Try to clear whole word at once */
addr = bitmap + BIT_WORD(bit);
if (*addr == ULONG_MAX) {
*addr = 0;
n += BITS_PER_LONG;
bit += BITS_PER_LONG;
continue;
}
}
if (test_and_clear_bit(bit, bitmap))
n++;
bit++;
}
return n;
}
/*
* Invalidate all the blocks in the range [block..block+nr_blocks-1].
*/
int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
struct dmz_mblock *mblk;
unsigned int n = 0;
dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
if (IS_ERR(mblk))
return PTR_ERR(mblk);
/* Clear bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
count = dmz_clear_bits((unsigned long *)mblk->data,
bit, nr_bits);
if (count) {
dmz_dirty_mblock(zmd, mblk);
n += count;
}
dmz_release_mblock(zmd, mblk);
nr_blocks -= nr_bits;
chunk_block += nr_bits;
}
if (zone->weight >= n)
zone->weight -= n;
else {
dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
dmz_id(zmd, zone), zone->weight, n);
zone->weight = 0;
}
return 0;
}
/*
* Get a block bit value.
*/
static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block)
{
struct dmz_mblock *mblk;
int ret;
WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
if (IS_ERR(mblk))
return PTR_ERR(mblk);
/* Get offset */
ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
(unsigned long *) mblk->data) != 0;
dmz_release_mblock(zmd, mblk);
return ret;
}
/*
* Return the number of blocks from chunk_block to the first block with a bit
* value specified by set. Search at most nr_blocks blocks from chunk_block.
*/
static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block, unsigned int nr_blocks,
int set)
{
struct dmz_mblock *mblk;
unsigned int bit, set_bit, nr_bits;
unsigned long *bitmap;
int n = 0;
WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
if (IS_ERR(mblk))
return PTR_ERR(mblk);
/* Get offset */
bitmap = (unsigned long *) mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
if (set)
set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
else
set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
dmz_release_mblock(zmd, mblk);
n += set_bit - bit;
if (set_bit < DMZ_BLOCK_SIZE_BITS)
break;
nr_blocks -= nr_bits;
chunk_block += nr_bits;
}
return n;
}
/*
* Test if chunk_block is valid. If it is, the number of consecutive
* valid blocks from chunk_block will be returned.
*/
int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block)
{
int valid;
valid = dmz_test_block(zmd, zone, chunk_block);
if (valid <= 0)
return valid;
/* The block is valid: get the number of valid blocks from block */
return dmz_to_next_set_block(zmd, zone, chunk_block,
zmd->dev->zone_nr_blocks - chunk_block, 0);
}
/*
* Find the first valid block from @chunk_block in @zone.
* If such a block is found, its number is returned using
* @chunk_block and the total number of valid blocks from @chunk_block
* is returned.
*/
int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t *chunk_block)
{
sector_t start_block = *chunk_block;
int ret;
ret = dmz_to_next_set_block(zmd, zone, start_block,
zmd->dev->zone_nr_blocks - start_block, 1);
if (ret < 0)
return ret;
start_block += ret;
*chunk_block = start_block;
return dmz_to_next_set_block(zmd, zone, start_block,
zmd->dev->zone_nr_blocks - start_block, 0);
}
/*
* Count the number of bits set starting from bit up to bit + nr_bits - 1.
*/
static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
{
unsigned long *addr;
int end = bit + nr_bits;
int n = 0;
while (bit < end) {
if (((bit & (BITS_PER_LONG - 1)) == 0) &&
((end - bit) >= BITS_PER_LONG)) {
addr = (unsigned long *)bitmap + BIT_WORD(bit);
if (*addr == ULONG_MAX) {
n += BITS_PER_LONG;
bit += BITS_PER_LONG;
continue;
}
}
if (test_bit(bit, bitmap))
n++;
bit++;
}
return n;
}
/*
* Get a zone weight.
*/
static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
{
struct dmz_mblock *mblk;
sector_t chunk_block = 0;
unsigned int bit, nr_bits;
unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
void *bitmap;
int n = 0;
while (nr_blocks) {
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
if (IS_ERR(mblk)) {
n = 0;
break;
}
/* Count bits in this block */
bitmap = mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
n += dmz_count_bits(bitmap, bit, nr_bits);
dmz_release_mblock(zmd, mblk);
nr_blocks -= nr_bits;
chunk_block += nr_bits;
}
zone->weight = n;
}
/*
* Cleanup the zoned metadata resources.
*/
static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
{
struct rb_root *root;
struct dmz_mblock *mblk, *next;
int i;
/* Release zone mapping resources */
if (zmd->map_mblk) {
for (i = 0; i < zmd->nr_map_blocks; i++)
dmz_release_mblock(zmd, zmd->map_mblk[i]);
kfree(zmd->map_mblk);
zmd->map_mblk = NULL;
}
/* Release super blocks */
for (i = 0; i < 2; i++) {
if (zmd->sb[i].mblk) {
dmz_free_mblock(zmd, zmd->sb[i].mblk);
zmd->sb[i].mblk = NULL;
}
}
/* Free cached blocks */
while (!list_empty(&zmd->mblk_dirty_list)) {
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
(u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
}
while (!list_empty(&zmd->mblk_lru_list)) {
mblk = list_first_entry(&zmd->mblk_lru_list,
struct dmz_mblock, link);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
}
/* Sanity checks: the mblock rbtree should now be empty */
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
(u64)mblk->no, mblk->ref);
mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
}
/* Free the zone descriptors */
dmz_drop_zones(zmd);
}
/*
* Initialize the zoned metadata.
*/
int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
{
struct dmz_metadata *zmd;
unsigned int i, zid;
struct dm_zone *zone;
int ret;
zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
if (!zmd)
return -ENOMEM;
zmd->dev = dev;
zmd->mblk_rbtree = RB_ROOT;
init_rwsem(&zmd->mblk_sem);
mutex_init(&zmd->mblk_flush_lock);
spin_lock_init(&zmd->mblk_lock);
INIT_LIST_HEAD(&zmd->mblk_lru_list);
INIT_LIST_HEAD(&zmd->mblk_dirty_list);
mutex_init(&zmd->map_lock);
atomic_set(&zmd->unmap_nr_rnd, 0);
INIT_LIST_HEAD(&zmd->unmap_rnd_list);
INIT_LIST_HEAD(&zmd->map_rnd_list);
atomic_set(&zmd->unmap_nr_seq, 0);
INIT_LIST_HEAD(&zmd->unmap_seq_list);
INIT_LIST_HEAD(&zmd->map_seq_list);
atomic_set(&zmd->nr_reserved_seq_zones, 0);
INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
init_waitqueue_head(&zmd->free_wq);
/* Initialize zone descriptors */
ret = dmz_init_zones(zmd);
if (ret)
goto err;
/* Get super block */
ret = dmz_load_sb(zmd);
if (ret)
goto err;
/* Set metadata zones starting from sb_zone */
zid = dmz_id(zmd, zmd->sb_zone);
for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
zone = dmz_get(zmd, zid + i);
if (!dmz_is_rnd(zone))
goto err;
set_bit(DMZ_META, &zone->flags);
}
/* Load mapping table */
ret = dmz_load_mapping(zmd);
if (ret)
goto err;
/*
* Cache size boundaries: allow at least 2 super blocks, the chunk map
* blocks and enough blocks to be able to cache the bitmap blocks of
* up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
* the cache to add 512 more metadata blocks.
*/
zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
ret = register_shrinker(&zmd->mblk_shrinker);
if (ret) {
dmz_dev_err(dev, "Register metadata cache shrinker failed");
goto err;
}
dmz_dev_info(dev, "Host-%s zoned block device",
bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
"aware" : "managed");
dmz_dev_info(dev, " %llu 512-byte logical sectors",
(u64)dev->capacity);
dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
dev->nr_zones, (u64)dev->zone_nr_sectors);
dmz_dev_info(dev, " %u metadata zones",
zmd->nr_meta_zones * 2);
dmz_dev_info(dev, " %u data zones for %u chunks",
zmd->nr_data_zones, zmd->nr_chunks);
dmz_dev_info(dev, " %u random zones (%u unmapped)",
zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
dmz_dev_info(dev, " %u reserved sequential data zones",
zmd->nr_reserved_seq);
dmz_dev_debug(dev, "Format:");
dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
zmd->nr_meta_blocks, zmd->max_nr_mblks);
dmz_dev_debug(dev, " %u data zone mapping blocks",
zmd->nr_map_blocks);
dmz_dev_debug(dev, " %u bitmap blocks",
zmd->nr_bitmap_blocks);
*metadata = zmd;
return 0;
err:
dmz_cleanup_metadata(zmd);
kfree(zmd);
*metadata = NULL;
return ret;
}
/*
* Cleanup the zoned metadata resources.
*/
void dmz_dtr_metadata(struct dmz_metadata *zmd)
{
unregister_shrinker(&zmd->mblk_shrinker);
dmz_cleanup_metadata(zmd);
kfree(zmd);
}
/*
* Check zone information on resume.
*/
int dmz_resume_metadata(struct dmz_metadata *zmd)
{
struct dmz_dev *dev = zmd->dev;
struct dm_zone *zone;
sector_t wp_block;
unsigned int i;
int ret;
/* Check zones */
for (i = 0; i < dev->nr_zones; i++) {
zone = dmz_get(zmd, i);
if (!zone) {
dmz_dev_err(dev, "Unable to get zone %u", i);
return -EIO;
}
wp_block = zone->wp_block;
ret = dmz_update_zone(zmd, zone);
if (ret) {
dmz_dev_err(dev, "Broken zone %u", i);
return ret;
}
if (dmz_is_offline(zone)) {
dmz_dev_warn(dev, "Zone %u is offline", i);
continue;
}
/* Check write pointer */
if (!dmz_is_seq(zone))
zone->wp_block = 0;
else if (zone->wp_block != wp_block) {
dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
i, (u64)zone->wp_block, (u64)wp_block);
zone->wp_block = wp_block;
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
dev->zone_nr_blocks - zone->wp_block);
}
}
return 0;
}
|
864011.c | /*
* tcpprobe - Observe the TCP flow with kprobes.
*
* The idea for this came from Werner Almesberger's umlsim
* Copyright (C) 2004, Stephen Hemminger <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/time.h>
#include <net/net_namespace.h>
#include <net/tcp.h>
MODULE_AUTHOR("Stephen Hemminger <[email protected]>");
MODULE_DESCRIPTION("TCP cwnd snooper");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
static int port __read_mostly = 0;
MODULE_PARM_DESC(port, "Port to match (0=all)");
module_param(port, int, 0);
static unsigned int bufsize __read_mostly = 4096;
MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
module_param(bufsize, uint, 0);
static int full __read_mostly;
MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
module_param(full, int, 0);
static const char procname[] = "tcpprobe";
struct tcp_log {
ktime_t tstamp;
__be32 saddr, daddr;
__be16 sport, dport;
u16 length;
u32 snd_nxt;
u32 snd_una;
u32 snd_wnd;
u32 snd_cwnd;
u32 ssthresh;
u32 srtt;
};
static struct {
spinlock_t lock;
wait_queue_head_t wait;
ktime_t start;
u32 lastcwnd;
unsigned long head, tail;
struct tcp_log *log;
} tcp_probe;
static inline int tcp_probe_used(void)
{
return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
}
static inline int tcp_probe_avail(void)
{
return bufsize - tcp_probe_used() - 1;
}
/*
* Hook inserted to be called before each receive packet.
* Note: arguments must match tcp_rcv_established()!
*/
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th, unsigned len)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
/* Only update if port matches */
if ((port == 0 || ntohs(inet->inet_dport) == port ||
ntohs(inet->inet_sport) == port) &&
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
spin_lock(&tcp_probe.lock);
/* If log fills, just silently drop */
if (tcp_probe_avail() > 1) {
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
p->tstamp = ktime_get();
p->saddr = inet->inet_saddr;
p->sport = inet->inet_sport;
p->daddr = inet->inet_daddr;
p->dport = inet->inet_dport;
p->length = skb->len;
p->snd_nxt = tp->snd_nxt;
p->snd_una = tp->snd_una;
p->snd_cwnd = tp->snd_cwnd;
p->snd_wnd = tp->snd_wnd;
p->ssthresh = tcp_current_ssthresh(sk);
p->srtt = tp->srtt >> 3;
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
}
tcp_probe.lastcwnd = tp->snd_cwnd;
spin_unlock(&tcp_probe.lock);
wake_up(&tcp_probe.wait);
}
jprobe_return();
return 0;
}
static struct jprobe tcp_jprobe = {
.kp = {
.symbol_name = "tcp_rcv_established",
},
.entry = jtcp_rcv_established,
};
static int tcpprobe_open(struct inode * inode, struct file * file)
{
/* Reset (empty) log */
spin_lock_bh(&tcp_probe.lock);
tcp_probe.head = tcp_probe.tail = 0;
tcp_probe.start = ktime_get();
spin_unlock_bh(&tcp_probe.lock);
return 0;
}
static int tcpprobe_sprint(char *tbuf, int n)
{
const struct tcp_log *p
= tcp_probe.log + tcp_probe.tail;
struct timespec tv
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
return scnprintf(tbuf, n,
"%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
&p->saddr, ntohs(p->sport),
&p->daddr, ntohs(p->dport),
p->length, p->snd_nxt, p->snd_una,
p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
}
static ssize_t tcpprobe_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
int error = 0;
size_t cnt = 0;
if (!buf)
return -EINVAL;
while (cnt < len) {
char tbuf[164];
int width;
/* Wait for data in buffer */
error = wait_event_interruptible(tcp_probe.wait,
tcp_probe_used() > 0);
if (error)
break;
spin_lock_bh(&tcp_probe.lock);
if (tcp_probe.head == tcp_probe.tail) {
/* multiple readers race? */
spin_unlock_bh(&tcp_probe.lock);
continue;
}
width = tcpprobe_sprint(tbuf, sizeof(tbuf));
if (cnt + width < len)
tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
spin_unlock_bh(&tcp_probe.lock);
/* if record greater than space available
return partial buffer (so far) */
if (cnt + width >= len)
break;
if (copy_to_user(buf + cnt, tbuf, width))
return -EFAULT;
cnt += width;
}
return cnt == 0 ? error : cnt;
}
static const struct file_operations tcpprobe_fops = {
.owner = THIS_MODULE,
.open = tcpprobe_open,
.read = tcpprobe_read,
.llseek = noop_llseek,
};
static __init int tcpprobe_init(void)
{
int ret = -ENOMEM;
init_waitqueue_head(&tcp_probe.wait);
spin_lock_init(&tcp_probe.lock);
if (bufsize == 0)
return -EINVAL;
bufsize = roundup_pow_of_two(bufsize);
tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
if (!tcp_probe.log)
goto err0;
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &tcpprobe_fops))
goto err0;
ret = register_jprobe(&tcp_jprobe);
if (ret)
goto err1;
pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize);
return 0;
err1:
proc_net_remove(&init_net, procname);
err0:
kfree(tcp_probe.log);
return ret;
}
module_init(tcpprobe_init);
static __exit void tcpprobe_exit(void)
{
proc_net_remove(&init_net, procname);
unregister_jprobe(&tcp_jprobe);
kfree(tcp_probe.log);
}
module_exit(tcpprobe_exit);
|
843757.c | #include <ansi.h>
#include <armor.h>
inherit CLOTH;
void create()
{
set_name("马褂", ({ "cloth" }) );
set_weight(3000);
if( clonep() )
set_default_object(__FILE__);
else {
set("unit", "件");
set("long",
"一件普通的马褂。\n");
set("material", "cloth");
set("armor_prop/armor", 1);
}
setup();
}
|
1003894.c | #include <locale.h>
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#else
#include <stdlib.h>
#endif
#include <fcntl.h>
#include <math.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <termios.h>
#include <ctype.h>
#include <dirent.h>
#include <fftw3.h>
#include <getopt.h>
#include <pthread.h>
#include <signal.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include "debug.h"
#include "util.h"
#ifdef NCURSES
#include "output/terminal_bcircle.h"
#include "output/terminal_ncurses.h"
#include <curses.h>
#endif
#include "output/raw.h"
#include "output/terminal_noncurses.h"
#include "input/alsa.h"
#include "input/common.h"
#include "input/fifo.h"
#include "input/portaudio.h"
#include "input/pulse.h"
#include "input/shmem.h"
#include "input/sndio.h"
#include "config.h"
#ifdef __GNUC__
// curses.h or other sources may already define
#undef GCC_UNUSED
#define GCC_UNUSED __attribute__((unused))
#else
#define GCC_UNUSED /* nothing */
#endif
#define LEFT_CHANNEL 1
#define RIGHT_CHANNEL 2
// struct termios oldtio, newtio;
// int M = 8 * 1024;
// used by sig handler
// needs to know output mode in orer to clean up terminal
int output_mode;
// whether we should reload the config or not
int should_reload = 0;
// whether we should only reload colors or not
int reload_colors = 0;
// these variables are used only in main, but making them global
// will allow us to not free them on exit without ASan complaining
struct config_params p;
fftw_complex *out_bass_l, *out_bass_r;
fftw_plan p_bass_l, p_bass_r;
fftw_complex *out_mid_l, *out_mid_r;
fftw_plan p_mid_l, p_mid_r;
fftw_complex *out_treble_l, *out_treble_r;
fftw_plan p_treble_l, p_treble_r;
// general: cleanup
void cleanup(void) {
if (output_mode == OUTPUT_NCURSES) {
#ifdef NCURSES
cleanup_terminal_ncurses();
#else
;
#endif
} else if (output_mode == OUTPUT_NONCURSES) {
cleanup_terminal_noncurses();
}
}
// general: handle signals
void sig_handler(int sig_no) {
if (sig_no == SIGUSR1) {
should_reload = 1;
return;
}
if (sig_no == SIGUSR2) {
reload_colors = 1;
return;
}
cleanup();
if (sig_no == SIGINT) {
printf("CTRL-C pressed -- goodbye\n");
}
signal(sig_no, SIG_DFL);
raise(sig_no);
}
#ifdef ALSA
static bool is_loop_device_for_sure(const char *text) {
const char *const LOOPBACK_DEVICE_PREFIX = "hw:Loopback,";
return strncmp(text, LOOPBACK_DEVICE_PREFIX, strlen(LOOPBACK_DEVICE_PREFIX)) == 0;
}
static bool directory_exists(const char *path) {
DIR *const dir = opendir(path);
if (dir == NULL)
return false;
closedir(dir);
return true;
}
#endif
int *separate_freq_bands(int FFTbassbufferSize, fftw_complex out_bass[FFTbassbufferSize / 2 + 1],
int FFTmidbufferSize, fftw_complex out_mid[FFTmidbufferSize / 2 + 1],
int FFTtreblebufferSize,
fftw_complex out_treble[FFTtreblebufferSize / 2 + 1], int bass_cut_off_bar,
int treble_cut_off_bar, int number_of_bars,
int FFTbuffer_lower_cut_off[256], int FFTbuffer_upper_cut_off[256],
double eq[256], int channel, double sens, double ignore) {
int n, i;
double peak[257];
static int bars_left[256];
static int bars_right[256];
double y[FFTbassbufferSize / 2 + 1];
double temp;
// process: separate frequency bands
for (n = 0; n < number_of_bars; n++) {
peak[n] = 0;
i = 0;
// process: get peaks
for (i = FFTbuffer_lower_cut_off[n]; i <= FFTbuffer_upper_cut_off[n]; i++) {
if (n <= bass_cut_off_bar) {
y[i] = hypot(out_bass[i][0], out_bass[i][1]);
} else if (n > bass_cut_off_bar && n <= treble_cut_off_bar) {
y[i] = hypot(out_mid[i][0], out_mid[i][1]);
} else if (n > treble_cut_off_bar) {
y[i] = hypot(out_treble[i][0], out_treble[i][1]);
}
peak[n] += y[i]; // adding upp band
}
peak[n] = peak[n] /
(FFTbuffer_upper_cut_off[n] - FFTbuffer_lower_cut_off[n] + 1); // getting average
temp = peak[n] * sens * eq[n]; // multiplying with k and sens
// printf("%d peak o: %f * sens: %f * k: %f = f: %f\n", o, peak[o], sens, eq[o], temp);
if (temp <= ignore)
temp = 0;
if (channel == LEFT_CHANNEL)
bars_left[n] = temp;
else
bars_right[n] = temp;
}
if (channel == LEFT_CHANNEL)
return bars_left;
else
return bars_right;
}
int *monstercat_filter(int *bars, int number_of_bars, int waves, double monstercat) {
int z;
// process [smoothing]: monstercat-style "average"
int m_y, de;
if (waves > 0) {
for (z = 0; z < number_of_bars; z++) { // waves
bars[z] = bars[z] / 1.25;
// if (bars[z] < 1) bars[z] = 1;
for (m_y = z - 1; m_y >= 0; m_y--) {
de = z - m_y;
bars[m_y] = max(bars[z] - pow(de, 2), bars[m_y]);
}
for (m_y = z + 1; m_y < number_of_bars; m_y++) {
de = m_y - z;
bars[m_y] = max(bars[z] - pow(de, 2), bars[m_y]);
}
}
} else if (monstercat > 0) {
for (z = 0; z < number_of_bars; z++) {
// if (bars[z] < 1)bars[z] = 1;
for (m_y = z - 1; m_y >= 0; m_y--) {
de = z - m_y;
bars[m_y] = max(bars[z] / pow(monstercat, de), bars[m_y]);
}
for (m_y = z + 1; m_y < number_of_bars; m_y++) {
de = m_y - z;
bars[m_y] = max(bars[z] / pow(monstercat, de), bars[m_y]);
}
}
}
return bars;
}
// general: entry point
int main(int argc, char **argv) {
// general: define variables
pthread_t p_thread;
int thr_id GCC_UNUSED;
float cut_off_frequency[256];
float relative_cut_off[256];
int bars[256], FFTbuffer_lower_cut_off[256], FFTbuffer_upper_cut_off[256];
int *bars_left, *bars_right, *bars_mono;
int bars_mem[256];
int bars_last[256];
int previous_frame[256];
int sleep = 0;
int n, height, lines, width, c, rest, inAtty, fp, fptest, rc;
bool silence;
// int cont = 1;
int fall[256];
// float temp;
float bars_peak[256];
double eq[256];
float g;
struct timespec req = {.tv_sec = 0, .tv_nsec = 0};
struct timespec sleep_mode_timer = {.tv_sec = 0, .tv_nsec = 0};
char configPath[PATH_MAX];
char *usage = "\n\
Usage : " PACKAGE " [options]\n\
Visualize audio input in terminal. \n\
\n\
Options:\n\
-p path to config file\n\
-v print version\n\
\n\
Keys:\n\
Up Increase sensitivity\n\
Down Decrease sensitivity\n\
Left Decrease number of bars\n\
Right Increase number of bars\n\
r Reload config\n\
c Reload colors only\n\
f Cycle foreground color\n\
b Cycle background color\n\
q Quit\n\
\n\
as of 0.4.0 all options are specified in config file, see in '/home/username/.config/cava/' \n";
char ch = '\0';
int number_of_bars = 25;
int sourceIsAuto = 1;
double userEQ_keys_to_bars_ratio;
struct audio_data audio;
memset(&audio, 0, sizeof(audio));
#ifndef NDEBUG
int maxvalue = 0;
int minvalue = 0;
#endif
// general: console title
printf("%c]0;%s%c", '\033', PACKAGE, '\007');
configPath[0] = '\0';
// general: handle Ctrl+C
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_handler = &sig_handler;
sigaction(SIGINT, &action, NULL);
sigaction(SIGTERM, &action, NULL);
sigaction(SIGUSR1, &action, NULL);
sigaction(SIGUSR2, &action, NULL);
// general: handle command-line arguments
while ((c = getopt(argc, argv, "p:vh")) != -1) {
switch (c) {
case 'p': // argument: fifo path
snprintf(configPath, sizeof(configPath), "%s", optarg);
break;
case 'h': // argument: print usage
printf("%s", usage);
return 1;
case '?': // argument: print usage
printf("%s", usage);
return 1;
case 'v': // argument: print version
printf(PACKAGE " " VERSION "\n");
return 0;
default: // argument: no arguments; exit
abort();
}
n = 0;
}
// general: main loop
while (1) {
debug("loading config\n");
// config: load
struct error_s error;
error.length = 0;
if (!load_config(configPath, &p, 0, &error)) {
fprintf(stderr, "Error loading config. %s", error.message);
exit(EXIT_FAILURE);
}
output_mode = p.om;
if (output_mode != OUTPUT_RAW) {
// Check if we're running in a tty
inAtty = 0;
if (strncmp(ttyname(0), "/dev/tty", 8) == 0 || strcmp(ttyname(0), "/dev/console") == 0)
inAtty = 1;
// in macos vitual terminals are called ttys(xyz) and there are no ttys
if (strncmp(ttyname(0), "/dev/ttys", 9) == 0)
inAtty = 0;
if (inAtty) {
system("setfont cava.psf >/dev/null 2>&1");
system("setterm -blank 0");
}
// We use unicode block characters to draw the bars and
// the locale var LANG must be set to use unicode chars.
// For some reason this var can't be retrieved with
// setlocale(LANG, NULL), so we get it with getenv.
// Also we can't set it with setlocale(LANG "") so we
// must set LC_ALL instead.
// Attempting to set to en_US if not set, if that lang
// is not installed and LANG is not set there will be
// no output, for mor info see #109 #344
if (!getenv("LANG"))
setlocale(LC_ALL, "en_US.utf8");
else
setlocale(LC_ALL, "");
}
// input: init
int bass_cut_off = 150;
int treble_cut_off = 1500;
audio.source = malloc(1 + strlen(p.audio_source));
strcpy(audio.source, p.audio_source);
audio.format = -1;
audio.rate = 0;
audio.FFTbassbufferSize = 4096;
audio.FFTmidbufferSize = 1024;
audio.FFTtreblebufferSize = 512;
audio.terminate = 0;
if (p.stereo)
audio.channels = 2;
if (!p.stereo)
audio.channels = 1;
audio.average = false;
audio.left = false;
audio.right = false;
if (strcmp(p.mono_option, "average") == 0)
audio.average = true;
if (strcmp(p.mono_option, "left") == 0)
audio.left = true;
if (strcmp(p.mono_option, "right") == 0)
audio.right = true;
audio.bass_index = 0;
audio.mid_index = 0;
audio.treble_index = 0;
// BASS
// audio.FFTbassbufferSize = audio.rate / 20; // audio.FFTbassbufferSize;
audio.in_bass_r = fftw_alloc_real(2 * (audio.FFTbassbufferSize / 2 + 1));
audio.in_bass_l = fftw_alloc_real(2 * (audio.FFTbassbufferSize / 2 + 1));
memset(audio.in_bass_r, 0, 2 * (audio.FFTbassbufferSize / 2 + 1) * sizeof(double));
memset(audio.in_bass_l, 0, 2 * (audio.FFTbassbufferSize / 2 + 1) * sizeof(double));
out_bass_l = fftw_alloc_complex(2 * (audio.FFTbassbufferSize / 2 + 1));
out_bass_r = fftw_alloc_complex(2 * (audio.FFTbassbufferSize / 2 + 1));
memset(out_bass_l, 0, 2 * (audio.FFTbassbufferSize / 2 + 1) * sizeof(fftw_complex));
memset(out_bass_r, 0, 2 * (audio.FFTbassbufferSize / 2 + 1) * sizeof(fftw_complex));
p_bass_l = fftw_plan_dft_r2c_1d(audio.FFTbassbufferSize, audio.in_bass_l, out_bass_l,
FFTW_MEASURE);
p_bass_r = fftw_plan_dft_r2c_1d(audio.FFTbassbufferSize, audio.in_bass_r, out_bass_r,
FFTW_MEASURE);
// MID
// audio.FFTmidbufferSize = audio.rate / bass_cut_off; // audio.FFTbassbufferSize;
audio.in_mid_r = fftw_alloc_real(2 * (audio.FFTmidbufferSize / 2 + 1));
audio.in_mid_l = fftw_alloc_real(2 * (audio.FFTmidbufferSize / 2 + 1));
memset(audio.in_mid_r, 0, 2 * (audio.FFTmidbufferSize / 2 + 1) * sizeof(double));
memset(audio.in_mid_l, 0, 2 * (audio.FFTmidbufferSize / 2 + 1) * sizeof(double));
out_mid_l = fftw_alloc_complex(2 * (audio.FFTmidbufferSize / 2 + 1));
out_mid_r = fftw_alloc_complex(2 * (audio.FFTmidbufferSize / 2 + 1));
memset(out_mid_l, 0, 2 * (audio.FFTmidbufferSize / 2 + 1) * sizeof(fftw_complex));
memset(out_mid_r, 0, 2 * (audio.FFTmidbufferSize / 2 + 1) * sizeof(fftw_complex));
p_mid_l =
fftw_plan_dft_r2c_1d(audio.FFTmidbufferSize, audio.in_mid_l, out_mid_l, FFTW_MEASURE);
p_mid_r =
fftw_plan_dft_r2c_1d(audio.FFTmidbufferSize, audio.in_mid_r, out_mid_r, FFTW_MEASURE);
// TRIEBLE
// audio.FFTtreblebufferSize = audio.rate / treble_cut_off; // audio.FFTbassbufferSize;
audio.in_treble_r = fftw_alloc_real(2 * (audio.FFTtreblebufferSize / 2 + 1));
audio.in_treble_l = fftw_alloc_real(2 * (audio.FFTtreblebufferSize / 2 + 1));
memset(audio.in_treble_r, 0, 2 * (audio.FFTtreblebufferSize / 2 + 1) * sizeof(double));
memset(audio.in_treble_l, 0, 2 * (audio.FFTtreblebufferSize / 2 + 1) * sizeof(double));
out_treble_l = fftw_alloc_complex(2 * (audio.FFTtreblebufferSize / 2 + 1));
out_treble_r = fftw_alloc_complex(2 * (audio.FFTtreblebufferSize / 2 + 1));
memset(out_treble_l, 0, 2 * (audio.FFTtreblebufferSize / 2 + 1) * sizeof(fftw_complex));
memset(out_treble_r, 0, 2 * (audio.FFTtreblebufferSize / 2 + 1) * sizeof(fftw_complex));
p_treble_l = fftw_plan_dft_r2c_1d(audio.FFTtreblebufferSize, audio.in_treble_l,
out_treble_l, FFTW_MEASURE);
p_treble_r = fftw_plan_dft_r2c_1d(audio.FFTtreblebufferSize, audio.in_treble_r,
out_treble_r, FFTW_MEASURE);
debug("got buffer size: %d, %d, %d", audio.FFTbassbufferSize, audio.FFTmidbufferSize,
audio.FFTtreblebufferSize);
reset_output_buffers(&audio);
debug("starting audio thread\n");
switch (p.im) {
#ifdef ALSA
case INPUT_ALSA:
// input_alsa: wait for the input to be ready
if (is_loop_device_for_sure(audio.source)) {
if (directory_exists("/sys/")) {
if (!directory_exists("/sys/module/snd_aloop/")) {
cleanup();
fprintf(stderr,
"Linux kernel module \"snd_aloop\" does not seem to be loaded.\n"
"Maybe run \"sudo modprobe snd_aloop\".\n");
exit(EXIT_FAILURE);
}
}
}
thr_id = pthread_create(&p_thread, NULL, input_alsa,
(void *)&audio); // starting alsamusic listener
n = 0;
while (audio.format == -1 || audio.rate == 0) {
req.tv_sec = 0;
req.tv_nsec = 1000000;
nanosleep(&req, NULL);
n++;
if (n > 2000) {
cleanup();
fprintf(stderr, "could not get rate and/or format, problems with audio thread? "
"quiting...\n");
exit(EXIT_FAILURE);
}
}
debug("got format: %d and rate %d\n", audio.format, audio.rate);
break;
#endif
case INPUT_FIFO:
// starting fifomusic listener
thr_id = pthread_create(&p_thread, NULL, input_fifo, (void *)&audio);
audio.rate = p.fifoSample;
audio.format = p.fifoSampleBits;
break;
#ifdef PULSE
case INPUT_PULSE:
if (strcmp(audio.source, "auto") == 0) {
getPulseDefaultSink((void *)&audio);
sourceIsAuto = 1;
} else
sourceIsAuto = 0;
// starting pulsemusic listener
thr_id = pthread_create(&p_thread, NULL, input_pulse, (void *)&audio);
audio.rate = 44100;
break;
#endif
#ifdef SNDIO
case INPUT_SNDIO:
thr_id = pthread_create(&p_thread, NULL, input_sndio, (void *)&audio);
audio.rate = 44100;
break;
#endif
case INPUT_SHMEM:
thr_id = pthread_create(&p_thread, NULL, input_shmem, (void *)&audio);
n = 0;
while (audio.rate == 0) {
req.tv_sec = 0;
req.tv_nsec = 1000000;
nanosleep(&req, NULL);
n++;
if (n > 2000) {
cleanup();
fprintf(stderr, "could not get rate and/or format, problems with audio thread? "
"quiting...\n");
exit(EXIT_FAILURE);
}
}
debug("got format: %d and rate %d\n", audio.format, audio.rate);
// audio.rate = 44100;
break;
#ifdef PORTAUDIO
case INPUT_PORTAUDIO:
thr_id = pthread_create(&p_thread, NULL, input_portaudio, (void *)&audio);
audio.rate = 44100;
break;
#endif
default:
exit(EXIT_FAILURE); // Can't happen.
}
if (p.upper_cut_off > audio.rate / 2) {
cleanup();
fprintf(stderr, "higher cuttoff frequency can't be higher then sample rate / 2");
exit(EXIT_FAILURE);
}
bool reloadConf = false;
while (!reloadConf) { // jumbing back to this loop means that you resized the screen
for (n = 0; n < 256; n++) {
bars_last[n] = 0;
previous_frame[n] = 0;
fall[n] = 0;
bars_peak[n] = 0;
bars_mem[n] = 0;
bars[n] = 0;
}
switch (output_mode) {
#ifdef NCURSES
// output: start ncurses mode
case OUTPUT_NCURSES:
init_terminal_ncurses(p.color, p.bcolor, p.col, p.bgcol, p.gradient,
p.gradient_count, p.gradient_colors, &width, &lines);
// we have 8 times as much height due to using 1/8 block characters
height = lines * 8;
break;
#endif
case OUTPUT_NONCURSES:
get_terminal_dim_noncurses(&width, &lines);
init_terminal_noncurses(inAtty, p.col, p.bgcol, width, lines, p.bar_width);
height = (lines - 1) * 8;
break;
case OUTPUT_RAW:
if (strcmp(p.raw_target, "/dev/stdout") != 0) {
// checking if file exists
if (access(p.raw_target, F_OK) != -1) {
// testopening in case it's a fifo
fptest = open(p.raw_target, O_RDONLY | O_NONBLOCK, 0644);
if (fptest == -1) {
printf("could not open file %s for writing\n", p.raw_target);
exit(1);
}
} else {
printf("creating fifo %s\n", p.raw_target);
if (mkfifo(p.raw_target, 0664) == -1) {
printf("could not create fifo %s\n", p.raw_target);
exit(1);
}
// fifo needs to be open for reading in order to write to it
fptest = open(p.raw_target, O_RDONLY | O_NONBLOCK, 0644);
}
}
fp = open(p.raw_target, O_WRONLY | O_NONBLOCK | O_CREAT, 0644);
if (fp == -1) {
printf("could not open file %s for writing\n", p.raw_target);
exit(1);
}
printf("open file %s for writing raw output\n", p.raw_target);
// width must be hardcoded for raw output.
width = 256;
if (strcmp(p.data_format, "binary") == 0) {
height = pow(2, p.bit_format) - 1;
} else {
height = p.ascii_range;
}
break;
default:
exit(EXIT_FAILURE); // Can't happen.
}
// handle for user setting too many bars
if (p.fixedbars) {
p.autobars = 0;
if (p.fixedbars * p.bar_width + p.fixedbars * p.bar_spacing - p.bar_spacing > width)
p.autobars = 1;
}
// getting orignial numbers of barss incase of resize
if (p.autobars == 1) {
number_of_bars = (width + p.bar_spacing) / (p.bar_width + p.bar_spacing);
// if (p.bar_spacing != 0) number_of_bars = (width - number_of_bars * p.bar_spacing
// + p.bar_spacing) / bar_width;
} else
number_of_bars = p.fixedbars;
if (number_of_bars < 1)
number_of_bars = 1; // must have at least 1 bars
if (number_of_bars > 256)
number_of_bars = 256; // cant have more than 256 bars
if (p.stereo) { // stereo must have even numbers of bars
if (number_of_bars % 2 != 0)
number_of_bars--;
}
// checks if there is stil extra room, will use this to center
rest = (width - number_of_bars * p.bar_width - number_of_bars * p.bar_spacing +
p.bar_spacing) /
2;
if (rest < 0)
rest = 0;
// process [smoothing]: calculate gravity
g = p.gravity * ((float)height / 2160) * pow((60 / (float)p.framerate), 2.5);
// calculate integral value, must be reduced with height
double integral = p.integral;
if (height > 320)
integral = p.integral * 1 / sqrt((log10((float)height / 10)));
#ifndef NDEBUG
debug("height: %d width: %d bars:%d bar width: %d rest: %d\n", height, width,
number_of_bars, p.bar_width, rest);
#endif
if (p.stereo)
number_of_bars =
number_of_bars / 2; // in stereo onle half number of number_of_bars per channel
if (p.userEQ_enabled && (number_of_bars > 0)) {
userEQ_keys_to_bars_ratio =
(double)(((double)p.userEQ_keys) / ((double)number_of_bars));
}
// calculate frequency constant (used to distribute bars across the frequency band)
double frequency_constant = log10((float)p.lower_cut_off / (float)p.upper_cut_off) /
(1 / ((float)number_of_bars + 1) - 1);
// process: calculate cutoff frequencies and eq
int bass_cut_off_bar = -1;
int treble_cut_off_bar = -1;
bool first_bar = false;
int first_treble_bar = 0;
for (n = 0; n < number_of_bars + 1; n++) {
double bar_distribution_coefficient = frequency_constant * (-1);
bar_distribution_coefficient +=
((float)n + 1) / ((float)number_of_bars + 1) * frequency_constant;
cut_off_frequency[n] = p.upper_cut_off * pow(10, bar_distribution_coefficient);
relative_cut_off[n] = cut_off_frequency[n] / (audio.rate / 2);
// remember nyquist!, pr my calculations this should be rate/2
// and nyquist freq in M/2 but testing shows it is not...
// or maybe the nq freq is in M/4
eq[n] = pow(cut_off_frequency[n], 1);
eq[n] *= (float)height / pow(2, 28);
if (p.userEQ_enabled)
eq[n] *= p.userEQ[(int)floor(((double)n) * userEQ_keys_to_bars_ratio)];
eq[n] /= log2(audio.FFTbassbufferSize);
if (cut_off_frequency[n] < bass_cut_off) {
// BASS
FFTbuffer_lower_cut_off[n] =
relative_cut_off[n] * (audio.FFTbassbufferSize / 2) + 1;
bass_cut_off_bar++;
treble_cut_off_bar++;
eq[n] *= log2(audio.FFTbassbufferSize);
} else if (cut_off_frequency[n] > bass_cut_off &&
cut_off_frequency[n] < treble_cut_off) {
// MID
FFTbuffer_lower_cut_off[n] =
relative_cut_off[n] * (audio.FFTmidbufferSize / 2) + 1;
treble_cut_off_bar++;
if ((treble_cut_off_bar - bass_cut_off_bar) == 1) {
first_bar = true;
FFTbuffer_upper_cut_off[n - 1] =
relative_cut_off[n] * (audio.FFTbassbufferSize / 2);
if (FFTbuffer_upper_cut_off[n - 1] < FFTbuffer_lower_cut_off[n - 1])
FFTbuffer_upper_cut_off[n - 1] = FFTbuffer_lower_cut_off[n - 1];
} else {
first_bar = false;
}
eq[n] *= log2(audio.FFTmidbufferSize);
} else {
// TREBLE
FFTbuffer_lower_cut_off[n] =
relative_cut_off[n] * (audio.FFTtreblebufferSize / 2) + 1;
first_treble_bar++;
if (first_treble_bar == 1) {
first_bar = true;
FFTbuffer_upper_cut_off[n - 1] =
relative_cut_off[n] * (audio.FFTmidbufferSize / 2);
if (FFTbuffer_upper_cut_off[n - 1] < FFTbuffer_lower_cut_off[n - 1])
FFTbuffer_upper_cut_off[n - 1] = FFTbuffer_lower_cut_off[n - 1];
} else {
first_bar = false;
}
eq[n] *= log2(audio.FFTtreblebufferSize);
}
if (n != 0 && !first_bar) {
FFTbuffer_upper_cut_off[n - 1] = FFTbuffer_lower_cut_off[n] - 1;
// pushing the spectrum up if the exponential function gets "clumped" in the
// bass
if (FFTbuffer_lower_cut_off[n] <= FFTbuffer_lower_cut_off[n - 1])
FFTbuffer_lower_cut_off[n] = FFTbuffer_lower_cut_off[n - 1] + 1;
FFTbuffer_upper_cut_off[n - 1] = FFTbuffer_lower_cut_off[n] - 1;
}
#ifndef NDEBUG
initscr();
curs_set(0);
timeout(0);
if (n != 0) {
mvprintw(n, 0, "%d: %f -> %f (%d -> %d) bass: %d, treble:%d \n", n,
cut_off_frequency[n - 1], cut_off_frequency[n],
FFTbuffer_lower_cut_off[n - 1], FFTbuffer_upper_cut_off[n - 1],
bass_cut_off_bar, treble_cut_off_bar);
}
#endif
}
if (p.stereo)
number_of_bars = number_of_bars * 2;
bool resizeTerminal = false;
fcntl(0, F_SETFL, O_NONBLOCK);
if (p.framerate <= 1) {
req.tv_sec = 1 / (float)p.framerate;
} else {
req.tv_sec = 0;
req.tv_nsec = (1 / (float)p.framerate) * 1e9;
}
while (!resizeTerminal) {
// general: keyboard controls
#ifdef NCURSES
if (output_mode == OUTPUT_NCURSES)
ch = getch();
#endif
if (output_mode == OUTPUT_NONCURSES)
ch = fgetc(stdin);
switch (ch) {
case 65: // key up
p.sens = p.sens * 1.05;
break;
case 66: // key down
p.sens = p.sens * 0.95;
break;
case 68: // key right
p.bar_width++;
resizeTerminal = true;
break;
case 67: // key left
if (p.bar_width > 1)
p.bar_width--;
resizeTerminal = true;
break;
case 'r': // reload config
should_reload = 1;
break;
case 'c': // reload colors
reload_colors = 1;
break;
case 'f': // change forground color
if (p.col < 7)
p.col++;
else
p.col = 0;
resizeTerminal = true;
break;
case 'b': // change backround color
if (p.bgcol < 7)
p.bgcol++;
else
p.bgcol = 0;
resizeTerminal = true;
break;
case 'q':
if (sourceIsAuto)
free(audio.source);
cleanup();
return EXIT_SUCCESS;
}
if (should_reload) {
reloadConf = true;
resizeTerminal = true;
should_reload = 0;
}
if (reload_colors) {
struct error_s error;
error.length = 0;
if (!load_config(configPath, (void *)&p, 1, &error)) {
cleanup();
fprintf(stderr, "Error loading config. %s", error.message);
exit(EXIT_FAILURE);
}
resizeTerminal = true;
reload_colors = 0;
}
// if (cont == 0) break;
#ifndef NDEBUG
// clear();
refresh();
#endif
// process: check if input is present
silence = true;
for (n = 0; n < audio.FFTbassbufferSize; n++) {
if (audio.in_bass_l[n] || audio.in_bass_r[n]) {
silence = false;
break;
}
}
if (silence)
sleep++;
else
sleep = 0;
// process: if input was present for the last 5 seconds apply FFT to it
if (sleep < p.framerate * 5) {
// process: execute FFT and sort frequency bands
if (p.stereo) {
fftw_execute(p_bass_l);
fftw_execute(p_bass_r);
fftw_execute(p_mid_l);
fftw_execute(p_mid_r);
fftw_execute(p_treble_l);
fftw_execute(p_treble_r);
bars_left = separate_freq_bands(
audio.FFTbassbufferSize, out_bass_l, audio.FFTmidbufferSize, out_mid_l,
audio.FFTtreblebufferSize, out_treble_l, bass_cut_off_bar,
treble_cut_off_bar, number_of_bars / 2, FFTbuffer_lower_cut_off,
FFTbuffer_upper_cut_off, eq, LEFT_CHANNEL, p.sens, p.ignore);
bars_right = separate_freq_bands(
audio.FFTbassbufferSize, out_bass_r, audio.FFTmidbufferSize, out_mid_r,
audio.FFTtreblebufferSize, out_treble_r, bass_cut_off_bar,
treble_cut_off_bar, number_of_bars / 2, FFTbuffer_lower_cut_off,
FFTbuffer_upper_cut_off, eq, RIGHT_CHANNEL, p.sens, p.ignore);
} else {
fftw_execute(p_bass_l);
fftw_execute(p_mid_l);
fftw_execute(p_treble_l);
bars_mono = separate_freq_bands(
audio.FFTbassbufferSize, out_bass_l, audio.FFTmidbufferSize, out_mid_l,
audio.FFTtreblebufferSize, out_treble_l, bass_cut_off_bar,
treble_cut_off_bar, number_of_bars, FFTbuffer_lower_cut_off,
FFTbuffer_upper_cut_off, eq, LEFT_CHANNEL, p.sens, p.ignore);
}
} else { //**if in sleep mode wait and continue**//
#ifndef NDEBUG
printw("no sound detected for 5 sec, going to sleep mode\n");
#endif
// wait 0.1 sec, then check sound again.
sleep_mode_timer.tv_sec = 0;
sleep_mode_timer.tv_nsec = 100000000;
nanosleep(&sleep_mode_timer, NULL);
continue;
}
// process [filter]
if (p.monstercat) {
if (p.stereo) {
bars_left =
monstercat_filter(bars_left, number_of_bars / 2, p.waves, p.monstercat);
bars_right = monstercat_filter(bars_right, number_of_bars / 2, p.waves,
p.monstercat);
} else {
bars_mono =
monstercat_filter(bars_mono, number_of_bars, p.waves, p.monstercat);
}
}
// processing signal
bool senselow = true;
for (n = 0; n < number_of_bars; n++) {
// mirroring stereo channels
if (p.stereo) {
if (n < number_of_bars / 2) {
bars[n] = bars_left[number_of_bars / 2 - n - 1];
} else {
bars[n] = bars_right[n - number_of_bars / 2];
}
} else {
bars[n] = bars_mono[n];
}
// process [smoothing]: falloff
if (g > 0) {
if (bars[n] < bars_last[n]) {
bars[n] = bars_peak[n] - (g * fall[n] * fall[n]);
if (bars[n] < 0)
bars[n] = 0;
fall[n]++;
} else {
bars_peak[n] = bars[n];
fall[n] = 0;
}
bars_last[n] = bars[n];
}
// process [smoothing]: integral
if (p.integral > 0) {
bars[n] = bars_mem[n] * integral + bars[n];
bars_mem[n] = bars[n];
int diff = height - bars[n];
if (diff < 0)
diff = 0;
double div = 1 / (diff + 1);
// bars[n] = bars[n] - pow(div, 10) * (height + 1);
bars_mem[n] = bars_mem[n] * (1 - div / 20);
}
#ifndef NDEBUG
mvprintw(n, 0, "%d: f:%f->%f (%d->%d), eq:\
%15e, peak:%d \n",
n, cut_off_frequency[n], cut_off_frequency[n + 1],
FFTbuffer_lower_cut_off[n], FFTbuffer_upper_cut_off[n], eq[n],
bars[n]);
if (bars[n] < minvalue) {
minvalue = bars[n];
debug("min value: %d\n", minvalue); // checking maxvalue 10000
}
if (bars[n] > maxvalue) {
maxvalue = bars[n];
}
if (bars[n] < 0) {
debug("negative bar value!! %d\n", bars[n]);
// exit(EXIT_FAILURE); // Can't happen.
}
#endif
// zero values causes divided by zero segfault (if not raw)
if (output_mode != OUTPUT_RAW && bars[n] < 1)
bars[n] = 1;
// autmatic sens adjustment
if (p.autosens) {
if (bars[n] > height && senselow) {
p.sens = p.sens * 0.98;
senselow = false;
}
}
}
if (p.autosens && !silence && senselow)
p.sens = p.sens * 1.001;
#ifndef NDEBUG
mvprintw(n + 1, 0, "sensitivity %.10e", p.sens);
mvprintw(n + 2, 0, "min value: %d\n", minvalue); // checking maxvalue 10000
mvprintw(n + 3, 0, "max value: %d\n", maxvalue); // checking maxvalue 10000
#endif
// output: draw processed input
#ifdef NDEBUG
switch (output_mode) {
case OUTPUT_NCURSES:
#ifdef NCURSES
rc = draw_terminal_ncurses(inAtty, lines, width, number_of_bars, p.bar_width,
p.bar_spacing, rest, bars, previous_frame,
p.gradient);
break;
#endif
case OUTPUT_NONCURSES:
rc = draw_terminal_noncurses(inAtty, lines, width, number_of_bars, p.bar_width,
p.bar_spacing, rest, bars, previous_frame);
break;
case OUTPUT_RAW:
rc = print_raw_out(number_of_bars, fp, p.is_bin, p.bit_format, p.ascii_range,
p.bar_delim, p.frame_delim, bars);
break;
default:
exit(EXIT_FAILURE); // Can't happen.
}
// terminal has been resized breaking to recalibrating values
if (rc == -1)
resizeTerminal = true;
#endif
memcpy(previous_frame, bars, 256 * sizeof(int));
// checking if audio thread has exited unexpectedly
if (audio.terminate == 1) {
cleanup();
fprintf(stderr, "Audio thread exited unexpectedly. %s\n", audio.error_message);
exit(EXIT_FAILURE);
}
nanosleep(&req, NULL);
} // resize terminal
} // reloading config
req.tv_sec = 0;
req.tv_nsec = 100; // waiting some time to make shure audio is ready
nanosleep(&req, NULL);
//**telling audio thread to terminate**//
audio.terminate = 1;
pthread_join(p_thread, NULL);
if (p.userEQ_enabled)
free(p.userEQ);
if (sourceIsAuto)
free(audio.source);
fftw_free(audio.in_bass_r);
fftw_free(audio.in_bass_l);
fftw_free(out_bass_r);
fftw_free(out_bass_l);
fftw_destroy_plan(p_bass_l);
fftw_destroy_plan(p_bass_r);
fftw_free(audio.in_mid_r);
fftw_free(audio.in_mid_l);
fftw_free(out_mid_r);
fftw_free(out_mid_l);
fftw_destroy_plan(p_mid_l);
fftw_destroy_plan(p_mid_r);
fftw_free(audio.in_treble_r);
fftw_free(audio.in_treble_l);
fftw_free(out_treble_r);
fftw_free(out_treble_l);
fftw_destroy_plan(p_treble_l);
fftw_destroy_plan(p_treble_r);
cleanup();
// fclose(fp);
}
}
|
659080.c | /** @file
This file contains the internal functions required to generate a Firmware Volume.
Copyright (c) 2004 - 2018, Intel Corporation. All rights reserved.<BR>
Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
Portions Copyright (c) 2016 HP Development Company, L.P.<BR>
Portions Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
//
// Include files
//
#if defined(__FreeBSD__)
#include <uuid.h>
#elif defined(__GNUC__)
#include <uuid/uuid.h>
#endif
#ifdef __GNUC__
#include <sys/stat.h>
#endif
#include <string.h>
#ifndef __GNUC__
#include <io.h>
#endif
#include <assert.h>
#include <Guid/FfsSectionAlignmentPadding.h>
#include "WinNtInclude.h"
#include "GenFvInternalLib.h"
#include "FvLib.h"
#include "PeCoffLib.h"
#define ARMT_UNCONDITIONAL_JUMP_INSTRUCTION 0xEB000000
#define ARM64_UNCONDITIONAL_JUMP_INSTRUCTION 0x14000000
BOOLEAN mArm = FALSE;
BOOLEAN mRiscV = FALSE;
STATIC UINT32 MaxFfsAlignment = 0;
BOOLEAN VtfFileFlag = FALSE;
EFI_GUID mEfiFirmwareVolumeTopFileGuid = EFI_FFS_VOLUME_TOP_FILE_GUID;
EFI_GUID mFileGuidArray [MAX_NUMBER_OF_FILES_IN_FV];
EFI_GUID mZeroGuid = {0x0, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}};
EFI_GUID mDefaultCapsuleGuid = {0x3B6686BD, 0x0D76, 0x4030, { 0xB7, 0x0E, 0xB5, 0x51, 0x9E, 0x2F, 0xC5, 0xA0 }};
EFI_GUID mEfiFfsSectionAlignmentPaddingGuid = EFI_FFS_SECTION_ALIGNMENT_PADDING_GUID;
CHAR8 *mFvbAttributeName[] = {
EFI_FVB2_READ_DISABLED_CAP_STRING,
EFI_FVB2_READ_ENABLED_CAP_STRING,
EFI_FVB2_READ_STATUS_STRING,
EFI_FVB2_WRITE_DISABLED_CAP_STRING,
EFI_FVB2_WRITE_ENABLED_CAP_STRING,
EFI_FVB2_WRITE_STATUS_STRING,
EFI_FVB2_LOCK_CAP_STRING,
EFI_FVB2_LOCK_STATUS_STRING,
NULL,
EFI_FVB2_STICKY_WRITE_STRING,
EFI_FVB2_MEMORY_MAPPED_STRING,
EFI_FVB2_ERASE_POLARITY_STRING,
EFI_FVB2_READ_LOCK_CAP_STRING,
EFI_FVB2_READ_LOCK_STATUS_STRING,
EFI_FVB2_WRITE_LOCK_CAP_STRING,
EFI_FVB2_WRITE_LOCK_STATUS_STRING
};
CHAR8 *mFvbAlignmentName[] = {
EFI_FVB2_ALIGNMENT_1_STRING,
EFI_FVB2_ALIGNMENT_2_STRING,
EFI_FVB2_ALIGNMENT_4_STRING,
EFI_FVB2_ALIGNMENT_8_STRING,
EFI_FVB2_ALIGNMENT_16_STRING,
EFI_FVB2_ALIGNMENT_32_STRING,
EFI_FVB2_ALIGNMENT_64_STRING,
EFI_FVB2_ALIGNMENT_128_STRING,
EFI_FVB2_ALIGNMENT_256_STRING,
EFI_FVB2_ALIGNMENT_512_STRING,
EFI_FVB2_ALIGNMENT_1K_STRING,
EFI_FVB2_ALIGNMENT_2K_STRING,
EFI_FVB2_ALIGNMENT_4K_STRING,
EFI_FVB2_ALIGNMENT_8K_STRING,
EFI_FVB2_ALIGNMENT_16K_STRING,
EFI_FVB2_ALIGNMENT_32K_STRING,
EFI_FVB2_ALIGNMENT_64K_STRING,
EFI_FVB2_ALIGNMENT_128K_STRING,
EFI_FVB2_ALIGNMENT_256K_STRING,
EFI_FVB2_ALIGNMENT_512K_STRING,
EFI_FVB2_ALIGNMENT_1M_STRING,
EFI_FVB2_ALIGNMENT_2M_STRING,
EFI_FVB2_ALIGNMENT_4M_STRING,
EFI_FVB2_ALIGNMENT_8M_STRING,
EFI_FVB2_ALIGNMENT_16M_STRING,
EFI_FVB2_ALIGNMENT_32M_STRING,
EFI_FVB2_ALIGNMENT_64M_STRING,
EFI_FVB2_ALIGNMENT_128M_STRING,
EFI_FVB2_ALIGNMENT_256M_STRING,
EFI_FVB2_ALIGNMENT_512M_STRING,
EFI_FVB2_ALIGNMENT_1G_STRING,
EFI_FVB2_ALIGNMENT_2G_STRING
};
//
// This data array will be located at the base of the Firmware Volume Header (FVH)
// in the boot block. It must not exceed 14 bytes of code. The last 2 bytes
// will be used to keep the FVH checksum consistent.
// This code will be run in response to a startup IPI for HT-enabled systems.
//
#define SIZEOF_STARTUP_DATA_ARRAY 0x10
UINT8 m128kRecoveryStartupApDataArray[SIZEOF_STARTUP_DATA_ARRAY] = {
//
// EA D0 FF 00 F0 ; far jmp F000:FFD0
// 0, 0, 0, 0, 0, 0, 0, 0, 0, ; Reserved bytes
// 0, 0 ; Checksum Padding
//
0xEA,
0xD0,
0xFF,
0x0,
0xF0,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00
};
UINT8 m64kRecoveryStartupApDataArray[SIZEOF_STARTUP_DATA_ARRAY] = {
//
// EB CE ; jmp short ($-0x30)
// ; (from offset 0x0 to offset 0xFFD0)
// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ; Reserved bytes
// 0, 0 ; Checksum Padding
//
0xEB,
0xCE,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00
};
FV_INFO mFvDataInfo;
CAP_INFO mCapDataInfo;
BOOLEAN mIsLargeFfs = FALSE;
EFI_PHYSICAL_ADDRESS mFvBaseAddress[0x10];
UINT32 mFvBaseAddressNumber = 0;
EFI_STATUS
ParseFvInf (
IN MEMORY_FILE *InfFile,
OUT FV_INFO *FvInfo
)
/*++
Routine Description:
This function parses a FV.INF file and copies info into a FV_INFO structure.
Arguments:
InfFile Memory file image.
FvInfo Information read from INF file.
Returns:
EFI_SUCCESS INF file information successfully retrieved.
EFI_ABORTED INF file has an invalid format.
EFI_NOT_FOUND A required string was not found in the INF file.
--*/
{
CHAR8 Value[MAX_LONG_FILE_PATH];
UINT64 Value64;
UINTN Index;
UINTN Number;
EFI_STATUS Status;
EFI_GUID GuidValue;
//
// Read the FV base address
//
if (!mFvDataInfo.BaseAddressSet) {
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_FV_BASE_ADDRESS_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
//
// Get the base address
//
Status = AsciiStringToUint64 (Value, FALSE, &Value64);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s = %s", EFI_FV_BASE_ADDRESS_STRING, Value);
return EFI_ABORTED;
}
DebugMsg (NULL, 0, 9, "rebase address", "%s = %s", EFI_FV_BASE_ADDRESS_STRING, Value);
FvInfo->BaseAddress = Value64;
FvInfo->BaseAddressSet = TRUE;
}
}
//
// Read the FV File System Guid
//
if (!FvInfo->FvFileSystemGuidSet) {
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_FV_FILESYSTEMGUID_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
//
// Get the guid value
//
Status = StringToGuid (Value, &GuidValue);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s = %s", EFI_FV_FILESYSTEMGUID_STRING, Value);
return EFI_ABORTED;
}
memcpy (&FvInfo->FvFileSystemGuid, &GuidValue, sizeof (EFI_GUID));
FvInfo->FvFileSystemGuidSet = TRUE;
}
}
//
// Read the FV Extension Header File Name
//
Status = FindToken (InfFile, ATTRIBUTES_SECTION_STRING, EFI_FV_EXT_HEADER_FILE_NAME, 0, Value);
if (Status == EFI_SUCCESS) {
strcpy (FvInfo->FvExtHeaderFile, Value);
}
//
// Read the FV file name
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_FV_FILE_NAME_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
//
// copy the file name
//
strcpy (FvInfo->FvName, Value);
}
//
// Read Fv Attribute
//
for (Index = 0; Index < sizeof (mFvbAttributeName)/sizeof (CHAR8 *); Index ++) {
if ((mFvbAttributeName [Index] != NULL) && \
(FindToken (InfFile, ATTRIBUTES_SECTION_STRING, mFvbAttributeName [Index], 0, Value) == EFI_SUCCESS)) {
if ((strcmp (Value, TRUE_STRING) == 0) || (strcmp (Value, ONE_STRING) == 0)) {
FvInfo->FvAttributes |= 1 << Index;
} else if ((strcmp (Value, FALSE_STRING) != 0) && (strcmp (Value, ZERO_STRING) != 0)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s expected %s | %s", mFvbAttributeName [Index], TRUE_STRING, FALSE_STRING);
return EFI_ABORTED;
}
}
}
//
// Read Fv Alignment
//
for (Index = 0; Index < sizeof (mFvbAlignmentName)/sizeof (CHAR8 *); Index ++) {
if (FindToken (InfFile, ATTRIBUTES_SECTION_STRING, mFvbAlignmentName [Index], 0, Value) == EFI_SUCCESS) {
if (strcmp (Value, TRUE_STRING) == 0) {
FvInfo->FvAttributes |= Index << 16;
DebugMsg (NULL, 0, 9, "FV file alignment", "Align = %s", mFvbAlignmentName [Index]);
break;
}
}
}
//
// Read weak alignment flag
//
Status = FindToken (InfFile, ATTRIBUTES_SECTION_STRING, EFI_FV_WEAK_ALIGNMENT_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
if ((strcmp (Value, TRUE_STRING) == 0) || (strcmp (Value, ONE_STRING) == 0)) {
FvInfo->FvAttributes |= EFI_FVB2_WEAK_ALIGNMENT;
} else if ((strcmp (Value, FALSE_STRING) != 0) && (strcmp (Value, ZERO_STRING) != 0)) {
Error (NULL, 0, 2000, "Invalid parameter", "Weak alignment value expected one of TRUE, FALSE, 1 or 0.");
return EFI_ABORTED;
}
}
//
// Read block maps
//
for (Index = 0; Index < MAX_NUMBER_OF_FV_BLOCKS; Index++) {
if (FvInfo->FvBlocks[Index].Length == 0) {
//
// Read block size
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_BLOCK_SIZE_STRING, Index, Value);
if (Status == EFI_SUCCESS) {
//
// Update the size of block
//
Status = AsciiStringToUint64 (Value, FALSE, &Value64);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s = %s", EFI_BLOCK_SIZE_STRING, Value);
return EFI_ABORTED;
}
FvInfo->FvBlocks[Index].Length = (UINT32) Value64;
DebugMsg (NULL, 0, 9, "FV Block Size", "%s = %s", EFI_BLOCK_SIZE_STRING, Value);
} else {
//
// If there is no blocks size, but there is the number of block, then we have a mismatched pair
// and should return an error.
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_NUM_BLOCKS_STRING, Index, Value);
if (!EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "both %s and %s must be specified.", EFI_NUM_BLOCKS_STRING, EFI_BLOCK_SIZE_STRING);
return EFI_ABORTED;
} else {
//
// We are done
//
break;
}
}
//
// Read blocks number
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_NUM_BLOCKS_STRING, Index, Value);
if (Status == EFI_SUCCESS) {
//
// Update the number of blocks
//
Status = AsciiStringToUint64 (Value, FALSE, &Value64);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s = %s", EFI_NUM_BLOCKS_STRING, Value);
return EFI_ABORTED;
}
FvInfo->FvBlocks[Index].NumBlocks = (UINT32) Value64;
DebugMsg (NULL, 0, 9, "FV Block Number", "%s = %s", EFI_NUM_BLOCKS_STRING, Value);
}
}
}
if (Index == 0) {
Error (NULL, 0, 2001, "Missing required argument", "block size.");
return EFI_ABORTED;
}
//
// Read files
//
Number = 0;
for (Number = 0; Number < MAX_NUMBER_OF_FILES_IN_FV; Number ++) {
if (FvInfo->FvFiles[Number][0] == '\0') {
break;
}
}
for (Index = 0; Number + Index < MAX_NUMBER_OF_FILES_IN_FV; Index++) {
//
// Read the FFS file list
//
Status = FindToken (InfFile, FILES_SECTION_STRING, EFI_FILE_NAME_STRING, Index, Value);
if (Status == EFI_SUCCESS) {
//
// Add the file
//
strcpy (FvInfo->FvFiles[Number + Index], Value);
DebugMsg (NULL, 0, 9, "FV component file", "the %uth name is %s", (unsigned) Index, Value);
} else {
break;
}
}
if ((Index + Number) == 0) {
Warning (NULL, 0, 0, "FV components are not specified.", NULL);
}
return EFI_SUCCESS;
}
VOID
UpdateFfsFileState (
IN EFI_FFS_FILE_HEADER *FfsFile,
IN EFI_FIRMWARE_VOLUME_HEADER *FvHeader
)
/*++
Routine Description:
This function changes the FFS file attributes based on the erase polarity
of the FV. Update the reserved bits of State to EFI_FVB2_ERASE_POLARITY.
Arguments:
FfsFile File header.
FvHeader FV header.
Returns:
None
--*/
{
if (FvHeader->Attributes & EFI_FVB2_ERASE_POLARITY) {
FfsFile->State = (UINT8)~(FfsFile->State);
// FfsFile->State |= ~(UINT8) EFI_FILE_ALL_STATE_BITS;
}
}
EFI_STATUS
ReadFfsAlignment (
IN EFI_FFS_FILE_HEADER *FfsFile,
IN OUT UINT32 *Alignment
)
/*++
Routine Description:
This function determines the alignment of the FFS input file from the file
attributes.
Arguments:
FfsFile FFS file to parse
Alignment The minimum required alignment offset of the FFS file
Returns:
EFI_SUCCESS The function completed successfully.
EFI_INVALID_PARAMETER One of the input parameters was invalid.
EFI_ABORTED An error occurred.
--*/
{
//
// Verify input parameters.
//
if (FfsFile == NULL || Alignment == NULL) {
return EFI_INVALID_PARAMETER;
}
switch ((FfsFile->Attributes >> 3) & 0x07) {
case 0:
//
// 1 byte alignment
//if bit 1 have set, 128K byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 17;
} else {
*Alignment = 0;
}
break;
case 1:
//
// 16 byte alignment
//if bit 1 have set, 256K byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 18;
} else {
*Alignment = 4;
}
break;
case 2:
//
// 128 byte alignment
//if bit 1 have set, 512K byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 19;
} else {
*Alignment = 7;
}
break;
case 3:
//
// 512 byte alignment
//if bit 1 have set, 1M byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 20;
} else {
*Alignment = 9;
}
break;
case 4:
//
// 1K byte alignment
//if bit 1 have set, 2M byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 21;
} else {
*Alignment = 10;
}
break;
case 5:
//
// 4K byte alignment
//if bit 1 have set, 4M byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 22;
} else {
*Alignment = 12;
}
break;
case 6:
//
// 32K byte alignment
//if bit 1 have set , 8M byte alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 23;
} else {
*Alignment = 15;
}
break;
case 7:
//
// 64K byte alignment
//if bit 1 have set, 16M alignment
//
if (FfsFile->Attributes & FFS_ATTRIB_DATA_ALIGNMENT2) {
*Alignment = 24;
} else {
*Alignment = 16;
}
break;
default:
break;
}
return EFI_SUCCESS;
}
EFI_STATUS
AddPadFile (
IN OUT MEMORY_FILE *FvImage,
IN UINT32 DataAlignment,
IN VOID *FvEnd,
IN EFI_FIRMWARE_VOLUME_EXT_HEADER *ExtHeader,
IN UINT32 NextFfsSize
)
/*++
Routine Description:
This function adds a pad file to the FV image if it required to align the
data of the next file.
Arguments:
FvImage The memory image of the FV to add it to.
The current offset must be valid.
DataAlignment The data alignment of the next FFS file.
FvEnd End of the empty data in FvImage.
ExtHeader PI FvExtHeader Optional
Returns:
EFI_SUCCESS The function completed successfully.
EFI_INVALID_PARAMETER One of the input parameters was invalid.
EFI_OUT_OF_RESOURCES Insufficient resources exist in the FV to complete
the pad file add.
--*/
{
EFI_FFS_FILE_HEADER *PadFile;
UINTN PadFileSize;
UINT32 NextFfsHeaderSize;
UINT32 CurFfsHeaderSize;
UINT32 Index;
Index = 0;
CurFfsHeaderSize = sizeof (EFI_FFS_FILE_HEADER);
//
// Verify input parameters.
//
if (FvImage == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Calculate the pad file size
//
//
// Append extension header size
//
if (ExtHeader != NULL) {
PadFileSize = ExtHeader->ExtHeaderSize;
if (PadFileSize + sizeof (EFI_FFS_FILE_HEADER) >= MAX_FFS_SIZE) {
CurFfsHeaderSize = sizeof (EFI_FFS_FILE_HEADER2);
}
PadFileSize += CurFfsHeaderSize;
} else {
NextFfsHeaderSize = sizeof (EFI_FFS_FILE_HEADER);
if (NextFfsSize >= MAX_FFS_SIZE) {
NextFfsHeaderSize = sizeof (EFI_FFS_FILE_HEADER2);
}
//
// Check if a pad file is necessary
//
if (((UINTN) FvImage->CurrentFilePointer - (UINTN) FvImage->FileImage + NextFfsHeaderSize) % DataAlignment == 0) {
return EFI_SUCCESS;
}
PadFileSize = (UINTN) FvImage->CurrentFilePointer - (UINTN) FvImage->FileImage + sizeof (EFI_FFS_FILE_HEADER) + NextFfsHeaderSize;
//
// Add whatever it takes to get to the next aligned address
//
while ((PadFileSize % DataAlignment) != 0) {
PadFileSize++;
}
//
// Subtract the next file header size
//
PadFileSize -= NextFfsHeaderSize;
//
// Subtract the starting offset to get size
//
PadFileSize -= (UINTN) FvImage->CurrentFilePointer - (UINTN) FvImage->FileImage;
}
//
// Verify that we have enough space for the file header
//
if (((UINTN) FvImage->CurrentFilePointer + PadFileSize) > (UINTN) FvEnd) {
return EFI_OUT_OF_RESOURCES;
}
//
// Write pad file header
//
PadFile = (EFI_FFS_FILE_HEADER *) FvImage->CurrentFilePointer;
//
// Write PadFile FFS header with PadType, don't need to set PAD file guid in its header.
//
PadFile->Type = EFI_FV_FILETYPE_FFS_PAD;
PadFile->Attributes = 0;
//
// Write pad file size (calculated size minus next file header size)
//
if (PadFileSize >= MAX_FFS_SIZE) {
memset(PadFile->Size, 0, sizeof(UINT8) * 3);
((EFI_FFS_FILE_HEADER2 *)PadFile)->ExtendedSize = PadFileSize;
PadFile->Attributes |= FFS_ATTRIB_LARGE_FILE;
} else {
PadFile->Size[0] = (UINT8) (PadFileSize & 0xFF);
PadFile->Size[1] = (UINT8) ((PadFileSize >> 8) & 0xFF);
PadFile->Size[2] = (UINT8) ((PadFileSize >> 16) & 0xFF);
}
//
// Fill in checksums and state, they must be 0 for checksumming.
//
PadFile->IntegrityCheck.Checksum.Header = 0;
PadFile->IntegrityCheck.Checksum.File = 0;
PadFile->State = 0;
PadFile->IntegrityCheck.Checksum.Header = CalculateChecksum8 ((UINT8 *) PadFile, CurFfsHeaderSize);
PadFile->IntegrityCheck.Checksum.File = FFS_FIXED_CHECKSUM;
PadFile->State = EFI_FILE_HEADER_CONSTRUCTION | EFI_FILE_HEADER_VALID | EFI_FILE_DATA_VALID;
UpdateFfsFileState (
(EFI_FFS_FILE_HEADER *) PadFile,
(EFI_FIRMWARE_VOLUME_HEADER *) FvImage->FileImage
);
//
// Update the current FV pointer
//
FvImage->CurrentFilePointer += PadFileSize;
if (ExtHeader != NULL) {
//
// Copy Fv Extension Header and Set Fv Extension header offset
//
if (ExtHeader->ExtHeaderSize > sizeof (EFI_FIRMWARE_VOLUME_EXT_HEADER)) {
for (Index = sizeof (EFI_FIRMWARE_VOLUME_EXT_HEADER); Index < ExtHeader->ExtHeaderSize;) {
if (((EFI_FIRMWARE_VOLUME_EXT_ENTRY *)((UINT8 *)ExtHeader + Index))-> ExtEntryType == EFI_FV_EXT_TYPE_USED_SIZE_TYPE) {
if (VtfFileFlag) {
((EFI_FIRMWARE_VOLUME_EXT_ENTRY_USED_SIZE_TYPE *)((UINT8 *)ExtHeader + Index))->UsedSize = mFvTotalSize;
} else {
((EFI_FIRMWARE_VOLUME_EXT_ENTRY_USED_SIZE_TYPE *)((UINT8 *)ExtHeader + Index))->UsedSize = mFvTakenSize;
}
break;
}
Index += ((EFI_FIRMWARE_VOLUME_EXT_ENTRY *)((UINT8 *)ExtHeader + Index))-> ExtEntrySize;
}
}
memcpy ((UINT8 *)PadFile + CurFfsHeaderSize, ExtHeader, ExtHeader->ExtHeaderSize);
((EFI_FIRMWARE_VOLUME_HEADER *) FvImage->FileImage)->ExtHeaderOffset = (UINT16) ((UINTN) ((UINT8 *)PadFile + CurFfsHeaderSize) - (UINTN) FvImage->FileImage);
//
// Make next file start at QWord Boundary
//
while (((UINTN) FvImage->CurrentFilePointer & (EFI_FFS_FILE_HEADER_ALIGNMENT - 1)) != 0) {
FvImage->CurrentFilePointer++;
}
}
return EFI_SUCCESS;
}
BOOLEAN
IsVtfFile (
IN EFI_FFS_FILE_HEADER *FileBuffer
)
/*++
Routine Description:
This function checks the header to validate if it is a VTF file
Arguments:
FileBuffer Buffer in which content of a file has been read.
Returns:
TRUE If this is a VTF file
FALSE If this is not a VTF file
--*/
{
if (!memcmp (&FileBuffer->Name, &mEfiFirmwareVolumeTopFileGuid, sizeof (EFI_GUID))) {
return TRUE;
} else {
return FALSE;
}
}
EFI_STATUS
WriteMapFile (
IN OUT FILE *FvMapFile,
IN CHAR8 *FileName,
IN EFI_FFS_FILE_HEADER *FfsFile,
IN EFI_PHYSICAL_ADDRESS ImageBaseAddress,
IN PE_COFF_LOADER_IMAGE_CONTEXT *pImageContext
)
/*++
Routine Description:
This function gets the basic debug information (entrypoint, baseaddress, .text, .data section base address)
from PE/COFF image and abstracts Pe Map file information and add them into FvMap file for Debug.
Arguments:
FvMapFile A pointer to FvMap File
FileName Ffs File PathName
FfsFile A pointer to Ffs file image.
ImageBaseAddress PeImage Base Address.
pImageContext Image Context Information.
Returns:
EFI_SUCCESS Added required map information.
--*/
{
CHAR8 PeMapFileName [MAX_LONG_FILE_PATH];
CHAR8 *Cptr, *Cptr2;
CHAR8 FileGuidName [MAX_LINE_LEN];
FILE *PeMapFile;
CHAR8 Line [MAX_LINE_LEN];
CHAR8 KeyWord [MAX_LINE_LEN];
CHAR8 KeyWord2 [MAX_LINE_LEN];
CHAR8 FunctionName [MAX_LINE_LEN];
EFI_PHYSICAL_ADDRESS FunctionAddress;
UINT32 FunctionType;
CHAR8 FunctionTypeName [MAX_LINE_LEN];
UINT32 Index;
UINT32 AddressOfEntryPoint;
UINT32 Offset;
EFI_IMAGE_OPTIONAL_HEADER_UNION *ImgHdr;
EFI_TE_IMAGE_HEADER *TEImageHeader;
EFI_IMAGE_SECTION_HEADER *SectionHeader;
long long TempLongAddress;
UINT32 TextVirtualAddress;
UINT32 DataVirtualAddress;
EFI_PHYSICAL_ADDRESS LinkTimeBaseAddress;
BOOLEAN IsUseClang;
//
// Init local variable
//
FunctionType = 0;
//
// Print FileGuid to string buffer.
//
PrintGuidToBuffer (&FfsFile->Name, (UINT8 *)FileGuidName, MAX_LINE_LEN, TRUE);
//
// Construct Map file Name
//
if (strlen (FileName) >= MAX_LONG_FILE_PATH) {
return EFI_ABORTED;
}
strncpy (PeMapFileName, FileName, MAX_LONG_FILE_PATH - 1);
PeMapFileName[MAX_LONG_FILE_PATH - 1] = 0;
//
// Change '\\' to '/', unified path format.
//
Cptr = PeMapFileName;
while (*Cptr != '\0') {
if (*Cptr == '\\') {
*Cptr = FILE_SEP_CHAR;
}
Cptr ++;
}
//
// Get Map file
//
Cptr = PeMapFileName + strlen (PeMapFileName);
while ((*Cptr != '.') && (Cptr >= PeMapFileName)) {
Cptr --;
}
if (Cptr < PeMapFileName) {
return EFI_NOT_FOUND;
} else {
*(Cptr + 1) = 'm';
*(Cptr + 2) = 'a';
*(Cptr + 3) = 'p';
*(Cptr + 4) = '\0';
}
//
// Get module Name
//
Cptr2 = Cptr;
while ((*Cptr != FILE_SEP_CHAR) && (Cptr >= PeMapFileName)) {
Cptr --;
}
*Cptr2 = '\0';
if (strlen (Cptr + 1) >= MAX_LINE_LEN) {
return EFI_ABORTED;
}
strncpy (KeyWord, Cptr + 1, MAX_LINE_LEN - 1);
KeyWord[MAX_LINE_LEN - 1] = 0;
*Cptr2 = '.';
//
// AddressOfEntryPoint and Offset in Image
//
if (!pImageContext->IsTeImage) {
ImgHdr = (EFI_IMAGE_OPTIONAL_HEADER_UNION *) ((UINT8 *) pImageContext->Handle + pImageContext->PeCoffHeaderOffset);
AddressOfEntryPoint = ImgHdr->Pe32.OptionalHeader.AddressOfEntryPoint;
Offset = 0;
SectionHeader = (EFI_IMAGE_SECTION_HEADER *) (
(UINT8 *) ImgHdr +
sizeof (UINT32) +
sizeof (EFI_IMAGE_FILE_HEADER) +
ImgHdr->Pe32.FileHeader.SizeOfOptionalHeader
);
Index = ImgHdr->Pe32.FileHeader.NumberOfSections;
} else {
TEImageHeader = (EFI_TE_IMAGE_HEADER *) pImageContext->Handle;
AddressOfEntryPoint = TEImageHeader->AddressOfEntryPoint;
Offset = TEImageHeader->StrippedSize - sizeof (EFI_TE_IMAGE_HEADER);
SectionHeader = (EFI_IMAGE_SECTION_HEADER *) (TEImageHeader + 1);
Index = TEImageHeader->NumberOfSections;
}
//
// module information output
//
if (ImageBaseAddress == 0) {
fprintf (FvMapFile, "%s (dummy) (", KeyWord);
fprintf (FvMapFile, "BaseAddress=%010llx, ", (unsigned long long) ImageBaseAddress);
} else {
fprintf (FvMapFile, "%s (Fixed Flash Address, ", KeyWord);
fprintf (FvMapFile, "BaseAddress=0x%010llx, ", (unsigned long long) (ImageBaseAddress + Offset));
}
fprintf (FvMapFile, "EntryPoint=0x%010llx, ", (unsigned long long) (ImageBaseAddress + AddressOfEntryPoint));
if (!pImageContext->IsTeImage) {
fprintf (FvMapFile, "Type=PE");
} else {
fprintf (FvMapFile, "Type=TE");
}
fprintf (FvMapFile, ")\n");
fprintf (FvMapFile, "(GUID=%s", FileGuidName);
TextVirtualAddress = 0;
DataVirtualAddress = 0;
for (; Index > 0; Index --, SectionHeader ++) {
if (stricmp ((CHAR8 *)SectionHeader->Name, ".text") == 0) {
TextVirtualAddress = SectionHeader->VirtualAddress;
} else if (stricmp ((CHAR8 *)SectionHeader->Name, ".data") == 0) {
DataVirtualAddress = SectionHeader->VirtualAddress;
} else if (stricmp ((CHAR8 *)SectionHeader->Name, ".sdata") == 0) {
DataVirtualAddress = SectionHeader->VirtualAddress;
}
}
fprintf (FvMapFile, " .textbaseaddress=0x%010llx", (unsigned long long) (ImageBaseAddress + TextVirtualAddress));
fprintf (FvMapFile, " .databaseaddress=0x%010llx", (unsigned long long) (ImageBaseAddress + DataVirtualAddress));
fprintf (FvMapFile, ")\n\n");
//
// Open PeMapFile
//
PeMapFile = fopen (LongFilePath (PeMapFileName), "r");
if (PeMapFile == NULL) {
// fprintf (stdout, "can't open %s file to reading\n", PeMapFileName);
return EFI_ABORTED;
}
VerboseMsg ("The map file is %s", PeMapFileName);
//
// Output Functions information into Fv Map file
//
LinkTimeBaseAddress = 0;
IsUseClang = FALSE;
while (fgets (Line, MAX_LINE_LEN, PeMapFile) != NULL) {
//
// Skip blank line
//
if (Line[0] == 0x0a) {
FunctionType = 0;
continue;
}
//
// By Address and Static keyword
//
if (FunctionType == 0) {
sscanf (Line, "%s", KeyWord);
if (stricmp (KeyWord, "Address") == 0) {
sscanf (Line, "%s %s", KeyWord, KeyWord2);
if (stricmp (KeyWord2, "Size") == 0) {
IsUseClang = TRUE;
FunctionType = 1;
continue;
}
//
// function list
//
FunctionType = 1;
fgets (Line, MAX_LINE_LEN, PeMapFile);
} else if (stricmp (KeyWord, "Static") == 0) {
//
// static function list
//
FunctionType = 2;
fgets (Line, MAX_LINE_LEN, PeMapFile);
} else if (stricmp (KeyWord, "Preferred") ==0) {
sscanf (Line + strlen (" Preferred load address is"), "%llx", &TempLongAddress);
LinkTimeBaseAddress = (UINT64) TempLongAddress;
}
continue;
}
//
// Printf Function Information
//
if (FunctionType == 1) {
if (IsUseClang) {
sscanf (Line, "%llx %s %s %s", &TempLongAddress, KeyWord, KeyWord2, FunctionTypeName);
FunctionAddress = (UINT64) TempLongAddress;
if (FunctionTypeName [0] == '_' ) {
fprintf (FvMapFile, " 0x%010llx ", (unsigned long long) (ImageBaseAddress + FunctionAddress - LinkTimeBaseAddress));
fprintf (FvMapFile, "%s\n", FunctionTypeName);
}
} else {
sscanf (Line, "%s %s %llx %s", KeyWord, FunctionName, &TempLongAddress, FunctionTypeName);
FunctionAddress = (UINT64) TempLongAddress;
if (FunctionTypeName [1] == '\0' && (FunctionTypeName [0] == 'f' || FunctionTypeName [0] == 'F')) {
fprintf (FvMapFile, " 0x%010llx ", (unsigned long long) (ImageBaseAddress + FunctionAddress - LinkTimeBaseAddress));
fprintf (FvMapFile, "%s\n", FunctionName);
}
}
} else if (FunctionType == 2) {
sscanf (Line, "%s %s %llx %s", KeyWord, FunctionName, &TempLongAddress, FunctionTypeName);
FunctionAddress = (UINT64) TempLongAddress;
if (FunctionTypeName [1] == '\0' && (FunctionTypeName [0] == 'f' || FunctionTypeName [0] == 'F')) {
fprintf (FvMapFile, " 0x%010llx ", (unsigned long long) (ImageBaseAddress + FunctionAddress - LinkTimeBaseAddress));
fprintf (FvMapFile, "%s\n", FunctionName);
}
}
}
//
// Close PeMap file
//
fprintf (FvMapFile, "\n\n");
fclose (PeMapFile);
return EFI_SUCCESS;
}
STATIC
BOOLEAN
AdjustInternalFfsPadding (
IN OUT EFI_FFS_FILE_HEADER *FfsFile,
IN OUT MEMORY_FILE *FvImage,
IN UINTN Alignment,
IN OUT UINTN *FileSize
)
/*++
Routine Description:
This function looks for a dedicated alignment padding section in the FFS, and
shrinks it to the size required to line up subsequent sections correctly.
Arguments:
FfsFile A pointer to Ffs file image.
FvImage The memory image of the FV to adjust it to.
Alignment Current file alignment
FileSize Reference to a variable holding the size of the FFS file
Returns:
TRUE Padding section was found and updated successfully
FALSE Otherwise
--*/
{
EFI_FILE_SECTION_POINTER PadSection;
UINT8 *Remainder;
EFI_STATUS Status;
UINT32 FfsHeaderLength;
UINT32 FfsFileLength;
UINT32 PadSize;
UINTN Misalignment;
EFI_FFS_INTEGRITY_CHECK *IntegrityCheck;
//
// Figure out the misalignment: all FFS sections are aligned relative to the
// start of the FFS payload, so use that as the base of the misalignment
// computation.
//
FfsHeaderLength = GetFfsHeaderLength(FfsFile);
Misalignment = (UINTN) FvImage->CurrentFilePointer -
(UINTN) FvImage->FileImage + FfsHeaderLength;
Misalignment &= Alignment - 1;
if (Misalignment == 0) {
// Nothing to do, return success
return TRUE;
}
//
// We only apply this optimization to FFS files with the FIXED attribute set,
// since the FFS will not be loadable at arbitrary offsets anymore after
// we adjust the size of the padding section.
//
if ((FfsFile->Attributes & FFS_ATTRIB_FIXED) == 0) {
return FALSE;
}
//
// Look for a dedicated padding section that we can adjust to compensate
// for the misalignment. If such a padding section exists, it precedes all
// sections with alignment requirements, and so the adjustment will correct
// all of them.
//
Status = GetSectionByType (FfsFile, EFI_SECTION_FREEFORM_SUBTYPE_GUID, 1,
&PadSection);
if (EFI_ERROR (Status) ||
CompareGuid (&PadSection.FreeformSubtypeSection->SubTypeGuid,
&mEfiFfsSectionAlignmentPaddingGuid) != 0) {
return FALSE;
}
//
// Find out if the size of the padding section is sufficient to compensate
// for the misalignment.
//
PadSize = GetSectionFileLength (PadSection.CommonHeader);
if (Misalignment > PadSize - sizeof (EFI_FREEFORM_SUBTYPE_GUID_SECTION)) {
return FALSE;
}
//
// Move the remainder of the FFS file towards the front, and adjust the
// file size output parameter.
//
Remainder = (UINT8 *) PadSection.CommonHeader + PadSize;
memmove (Remainder - Misalignment, Remainder,
*FileSize - (UINTN) (Remainder - (UINTN) FfsFile));
*FileSize -= Misalignment;
//
// Update the padding section's length with the new values. Note that the
// padding is always < 64 KB, so we can ignore EFI_COMMON_SECTION_HEADER2
// ExtendedSize.
//
PadSize -= Misalignment;
PadSection.CommonHeader->Size[0] = (UINT8) (PadSize & 0xff);
PadSection.CommonHeader->Size[1] = (UINT8) ((PadSize & 0xff00) >> 8);
PadSection.CommonHeader->Size[2] = (UINT8) ((PadSize & 0xff0000) >> 16);
//
// Update the FFS header with the new overall length
//
FfsFileLength = GetFfsFileLength (FfsFile) - Misalignment;
if (FfsHeaderLength > sizeof(EFI_FFS_FILE_HEADER)) {
((EFI_FFS_FILE_HEADER2 *)FfsFile)->ExtendedSize = FfsFileLength;
} else {
FfsFile->Size[0] = (UINT8) (FfsFileLength & 0x000000FF);
FfsFile->Size[1] = (UINT8) ((FfsFileLength & 0x0000FF00) >> 8);
FfsFile->Size[2] = (UINT8) ((FfsFileLength & 0x00FF0000) >> 16);
}
//
// Clear the alignment bits: these have become meaningless now that we have
// adjusted the padding section.
//
FfsFile->Attributes &= ~(FFS_ATTRIB_DATA_ALIGNMENT | FFS_ATTRIB_DATA_ALIGNMENT2);
//
// Recalculate the FFS header checksum. Instead of setting Header and State
// both to zero, set Header to (UINT8)(-State) so State preserves its original
// value
//
IntegrityCheck = &FfsFile->IntegrityCheck;
IntegrityCheck->Checksum.Header = (UINT8) (0x100 - FfsFile->State);
IntegrityCheck->Checksum.File = 0;
IntegrityCheck->Checksum.Header = CalculateChecksum8 (
(UINT8 *) FfsFile, FfsHeaderLength);
if (FfsFile->Attributes & FFS_ATTRIB_CHECKSUM) {
//
// Ffs header checksum = zero, so only need to calculate ffs body.
//
IntegrityCheck->Checksum.File = CalculateChecksum8 (
(UINT8 *) FfsFile + FfsHeaderLength,
FfsFileLength - FfsHeaderLength);
} else {
IntegrityCheck->Checksum.File = FFS_FIXED_CHECKSUM;
}
return TRUE;
}
EFI_STATUS
AddFile (
IN OUT MEMORY_FILE *FvImage,
IN FV_INFO *FvInfo,
IN UINTN Index,
IN OUT EFI_FFS_FILE_HEADER **VtfFileImage,
IN FILE *FvMapFile,
IN FILE *FvReportFile
)
/*++
Routine Description:
This function adds a file to the FV image. The file will pad to the
appropriate alignment if required.
Arguments:
FvImage The memory image of the FV to add it to. The current offset
must be valid.
FvInfo Pointer to information about the FV.
Index The file in the FvInfo file list to add.
VtfFileImage A pointer to the VTF file within the FvImage. If this is equal
to the end of the FvImage then no VTF previously found.
FvMapFile Pointer to FvMap File
FvReportFile Pointer to FvReport File
Returns:
EFI_SUCCESS The function completed successfully.
EFI_INVALID_PARAMETER One of the input parameters was invalid.
EFI_ABORTED An error occurred.
EFI_OUT_OF_RESOURCES Insufficient resources exist to complete the add.
--*/
{
FILE *NewFile;
UINTN FileSize;
UINT8 *FileBuffer;
UINTN NumBytesRead;
UINT32 CurrentFileAlignment;
EFI_STATUS Status;
UINTN Index1;
UINT8 FileGuidString[PRINTED_GUID_BUFFER_SIZE];
Index1 = 0;
//
// Verify input parameters.
//
if (FvImage == NULL || FvInfo == NULL || FvInfo->FvFiles[Index][0] == 0 || VtfFileImage == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Read the file to add
//
NewFile = fopen (LongFilePath (FvInfo->FvFiles[Index]), "rb");
if (NewFile == NULL) {
Error (NULL, 0, 0001, "Error opening file", FvInfo->FvFiles[Index]);
return EFI_ABORTED;
}
//
// Get the file size
//
FileSize = _filelength (fileno (NewFile));
//
// Read the file into a buffer
//
FileBuffer = malloc (FileSize);
if (FileBuffer == NULL) {
fclose (NewFile);
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated!");
return EFI_OUT_OF_RESOURCES;
}
NumBytesRead = fread (FileBuffer, sizeof (UINT8), FileSize, NewFile);
//
// Done with the file, from this point on we will just use the buffer read.
//
fclose (NewFile);
//
// Verify read successful
//
if (NumBytesRead != sizeof (UINT8) * FileSize) {
free (FileBuffer);
Error (NULL, 0, 0004, "Error reading file", FvInfo->FvFiles[Index]);
return EFI_ABORTED;
}
//
// For None PI Ffs file, directly add them into FvImage.
//
if (!FvInfo->IsPiFvImage) {
memcpy (FvImage->CurrentFilePointer, FileBuffer, FileSize);
if (FvInfo->SizeofFvFiles[Index] > FileSize) {
FvImage->CurrentFilePointer += FvInfo->SizeofFvFiles[Index];
} else {
FvImage->CurrentFilePointer += FileSize;
}
goto Done;
}
//
// Verify Ffs file
//
Status = VerifyFfsFile ((EFI_FFS_FILE_HEADER *)FileBuffer);
if (EFI_ERROR (Status)) {
free (FileBuffer);
Error (NULL, 0, 3000, "Invalid", "%s is not a valid FFS file.", FvInfo->FvFiles[Index]);
return EFI_INVALID_PARAMETER;
}
//
// Verify space exists to add the file
//
if (FileSize > (UINTN) ((UINTN) *VtfFileImage - (UINTN) FvImage->CurrentFilePointer)) {
free (FileBuffer);
Error (NULL, 0, 4002, "Resource", "FV space is full, not enough room to add file %s.", FvInfo->FvFiles[Index]);
return EFI_OUT_OF_RESOURCES;
}
//
// Verify the input file is the duplicated file in this Fv image
//
for (Index1 = 0; Index1 < Index; Index1 ++) {
if (CompareGuid ((EFI_GUID *) FileBuffer, &mFileGuidArray [Index1]) == 0) {
Error (NULL, 0, 2000, "Invalid parameter", "the %dth file and %uth file have the same file GUID.", (unsigned) Index1 + 1, (unsigned) Index + 1);
PrintGuid ((EFI_GUID *) FileBuffer);
free (FileBuffer);
return EFI_INVALID_PARAMETER;
}
}
CopyMem (&mFileGuidArray [Index], FileBuffer, sizeof (EFI_GUID));
//
// Update the file state based on polarity of the FV.
//
UpdateFfsFileState (
(EFI_FFS_FILE_HEADER *) FileBuffer,
(EFI_FIRMWARE_VOLUME_HEADER *) FvImage->FileImage
);
//
// Check if alignment is required
//
ReadFfsAlignment ((EFI_FFS_FILE_HEADER *) FileBuffer, &CurrentFileAlignment);
//
// Find the largest alignment of all the FFS files in the FV
//
if (CurrentFileAlignment > MaxFfsAlignment) {
MaxFfsAlignment = CurrentFileAlignment;
}
//
// If we have a VTF file, add it at the top.
//
if (IsVtfFile ((EFI_FFS_FILE_HEADER *) FileBuffer)) {
if ((UINTN) *VtfFileImage == (UINTN) FvImage->Eof) {
//
// No previous VTF, add this one.
//
*VtfFileImage = (EFI_FFS_FILE_HEADER *) (UINTN) ((UINTN) FvImage->FileImage + FvInfo->Size - FileSize);
//
// Sanity check. The file MUST align appropriately
//
if (((UINTN) *VtfFileImage + GetFfsHeaderLength((EFI_FFS_FILE_HEADER *)FileBuffer) - (UINTN) FvImage->FileImage) % (1 << CurrentFileAlignment)) {
Error (NULL, 0, 3000, "Invalid", "VTF file cannot be aligned on a %u-byte boundary.", (unsigned) (1 << CurrentFileAlignment));
free (FileBuffer);
return EFI_ABORTED;
}
//
// Rebase the PE or TE image in FileBuffer of FFS file for XIP
// Rebase for the debug genfvmap tool
//
Status = FfsRebase (FvInfo, FvInfo->FvFiles[Index], (EFI_FFS_FILE_HEADER *) FileBuffer, (UINTN) *VtfFileImage - (UINTN) FvImage->FileImage, FvMapFile);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "Could not rebase %s.", FvInfo->FvFiles[Index]);
return Status;
}
//
// copy VTF File
//
memcpy (*VtfFileImage, FileBuffer, FileSize);
PrintGuidToBuffer ((EFI_GUID *) FileBuffer, FileGuidString, sizeof (FileGuidString), TRUE);
fprintf (FvReportFile, "0x%08X %s\n", (unsigned)(UINTN) (((UINT8 *)*VtfFileImage) - (UINTN)FvImage->FileImage), FileGuidString);
free (FileBuffer);
DebugMsg (NULL, 0, 9, "Add VTF FFS file in FV image", NULL);
return EFI_SUCCESS;
} else {
//
// Already found a VTF file.
//
Error (NULL, 0, 3000, "Invalid", "multiple VTF files are not permitted within a single FV.");
free (FileBuffer);
return EFI_ABORTED;
}
}
//
// Add pad file if necessary
//
if (!AdjustInternalFfsPadding ((EFI_FFS_FILE_HEADER *) FileBuffer, FvImage,
1 << CurrentFileAlignment, &FileSize)) {
Status = AddPadFile (FvImage, 1 << CurrentFileAlignment, *VtfFileImage, NULL, FileSize);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 4002, "Resource", "FV space is full, could not add pad file for data alignment property.");
free (FileBuffer);
return EFI_ABORTED;
}
}
//
// Add file
//
if ((UINTN) (FvImage->CurrentFilePointer + FileSize) <= (UINTN) (*VtfFileImage)) {
//
// Rebase the PE or TE image in FileBuffer of FFS file for XIP.
// Rebase Bs and Rt drivers for the debug genfvmap tool.
//
Status = FfsRebase (FvInfo, FvInfo->FvFiles[Index], (EFI_FFS_FILE_HEADER *) FileBuffer, (UINTN) FvImage->CurrentFilePointer - (UINTN) FvImage->FileImage, FvMapFile);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "Could not rebase %s.", FvInfo->FvFiles[Index]);
return Status;
}
//
// Copy the file
//
memcpy (FvImage->CurrentFilePointer, FileBuffer, FileSize);
PrintGuidToBuffer ((EFI_GUID *) FileBuffer, FileGuidString, sizeof (FileGuidString), TRUE);
fprintf (FvReportFile, "0x%08X %s\n", (unsigned) (FvImage->CurrentFilePointer - FvImage->FileImage), FileGuidString);
FvImage->CurrentFilePointer += FileSize;
} else {
Error (NULL, 0, 4002, "Resource", "FV space is full, cannot add file %s.", FvInfo->FvFiles[Index]);
free (FileBuffer);
return EFI_ABORTED;
}
//
// Make next file start at QWord Boundary
//
while (((UINTN) FvImage->CurrentFilePointer & (EFI_FFS_FILE_HEADER_ALIGNMENT - 1)) != 0) {
FvImage->CurrentFilePointer++;
}
Done:
//
// Free allocated memory.
//
free (FileBuffer);
return EFI_SUCCESS;
}
EFI_STATUS
PadFvImage (
IN MEMORY_FILE *FvImage,
IN EFI_FFS_FILE_HEADER *VtfFileImage
)
/*++
Routine Description:
This function places a pad file between the last file in the FV and the VTF
file if the VTF file exists.
Arguments:
FvImage Memory file for the FV memory image
VtfFileImage The address of the VTF file. If this is the end of the FV
image, no VTF exists and no pad file is needed.
Returns:
EFI_SUCCESS Completed successfully.
EFI_INVALID_PARAMETER One of the input parameters was NULL.
--*/
{
EFI_FFS_FILE_HEADER *PadFile;
UINTN FileSize;
UINT32 FfsHeaderSize;
//
// If there is no VTF or the VTF naturally follows the previous file without a
// pad file, then there's nothing to do
//
if ((UINTN) VtfFileImage == (UINTN) FvImage->Eof || \
((UINTN) VtfFileImage == (UINTN) FvImage->CurrentFilePointer)) {
return EFI_SUCCESS;
}
if ((UINTN) VtfFileImage < (UINTN) FvImage->CurrentFilePointer) {
return EFI_INVALID_PARAMETER;
}
//
// Pad file starts at beginning of free space
//
PadFile = (EFI_FFS_FILE_HEADER *) FvImage->CurrentFilePointer;
//
// write PadFile FFS header with PadType, don't need to set PAD file guid in its header.
//
PadFile->Type = EFI_FV_FILETYPE_FFS_PAD;
PadFile->Attributes = 0;
//
// FileSize includes the EFI_FFS_FILE_HEADER
//
FileSize = (UINTN) VtfFileImage - (UINTN) FvImage->CurrentFilePointer;
if (FileSize >= MAX_FFS_SIZE) {
PadFile->Attributes |= FFS_ATTRIB_LARGE_FILE;
memset(PadFile->Size, 0, sizeof(UINT8) * 3);
((EFI_FFS_FILE_HEADER2 *)PadFile)->ExtendedSize = FileSize;
FfsHeaderSize = sizeof(EFI_FFS_FILE_HEADER2);
mIsLargeFfs = TRUE;
} else {
PadFile->Size[0] = (UINT8) (FileSize & 0x000000FF);
PadFile->Size[1] = (UINT8) ((FileSize & 0x0000FF00) >> 8);
PadFile->Size[2] = (UINT8) ((FileSize & 0x00FF0000) >> 16);
FfsHeaderSize = sizeof(EFI_FFS_FILE_HEADER);
}
//
// Fill in checksums and state, must be zero during checksum calculation.
//
PadFile->IntegrityCheck.Checksum.Header = 0;
PadFile->IntegrityCheck.Checksum.File = 0;
PadFile->State = 0;
PadFile->IntegrityCheck.Checksum.Header = CalculateChecksum8 ((UINT8 *) PadFile, FfsHeaderSize);
PadFile->IntegrityCheck.Checksum.File = FFS_FIXED_CHECKSUM;
PadFile->State = EFI_FILE_HEADER_CONSTRUCTION | EFI_FILE_HEADER_VALID | EFI_FILE_DATA_VALID;
UpdateFfsFileState (
(EFI_FFS_FILE_HEADER *) PadFile,
(EFI_FIRMWARE_VOLUME_HEADER *) FvImage->FileImage
);
//
// Update the current FV pointer
//
FvImage->CurrentFilePointer = FvImage->Eof;
return EFI_SUCCESS;
}
EFI_STATUS
UpdateResetVector (
IN MEMORY_FILE *FvImage,
IN FV_INFO *FvInfo,
IN EFI_FFS_FILE_HEADER *VtfFile
)
/*++
Routine Description:
This parses the FV looking for the PEI core and then plugs the address into
the SALE_ENTRY point of the BSF/VTF for IPF and does BUGBUG TBD action to
complete an IA32 Bootstrap FV.
Arguments:
FvImage Memory file for the FV memory image
FvInfo Information read from INF file.
VtfFile Pointer to the VTF file in the FV image.
Returns:
EFI_SUCCESS Function Completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
EFI_NOT_FOUND PEI Core file not found.
--*/
{
EFI_FFS_FILE_HEADER *PeiCoreFile;
EFI_FFS_FILE_HEADER *SecCoreFile;
EFI_STATUS Status;
EFI_FILE_SECTION_POINTER Pe32Section;
UINT32 EntryPoint;
UINT32 BaseOfCode;
UINT16 MachineType;
EFI_PHYSICAL_ADDRESS PeiCorePhysicalAddress;
EFI_PHYSICAL_ADDRESS SecCorePhysicalAddress;
INT32 Ia32SecEntryOffset;
UINT32 *Ia32ResetAddressPtr;
UINT8 *BytePointer;
UINT8 *BytePointer2;
UINT16 *WordPointer;
UINT16 CheckSum;
UINT32 IpiVector;
UINTN Index;
EFI_FFS_FILE_STATE SavedState;
BOOLEAN Vtf0Detected;
UINT32 FfsHeaderSize;
UINT32 SecHeaderSize;
//
// Verify input parameters
//
if (FvImage == NULL || FvInfo == NULL || VtfFile == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Initialize FV library
//
InitializeFvLib (FvImage->FileImage, FvInfo->Size);
//
// Verify VTF file
//
Status = VerifyFfsFile (VtfFile);
if (EFI_ERROR (Status)) {
return EFI_INVALID_PARAMETER;
}
if (
(((UINTN)FvImage->Eof - (UINTN)FvImage->FileImage) >=
IA32_X64_VTF_SIGNATURE_OFFSET) &&
(*(UINT32 *)(VOID*)((UINTN) FvImage->Eof -
IA32_X64_VTF_SIGNATURE_OFFSET) ==
IA32_X64_VTF0_SIGNATURE)
) {
Vtf0Detected = TRUE;
} else {
Vtf0Detected = FALSE;
}
//
// Find the Sec Core
//
Status = GetFileByType (EFI_FV_FILETYPE_SECURITY_CORE, 1, &SecCoreFile);
if (EFI_ERROR (Status) || SecCoreFile == NULL) {
if (Vtf0Detected) {
//
// If the SEC core file is not found, but the VTF-0 signature
// is found, we'll treat it as a VTF-0 'Volume Top File'.
// This means no modifications are required to the VTF.
//
return EFI_SUCCESS;
}
Error (NULL, 0, 3000, "Invalid", "could not find the SEC core file in the FV.");
return EFI_ABORTED;
}
//
// Sec Core found, now find PE32 section
//
Status = GetSectionByType (SecCoreFile, EFI_SECTION_PE32, 1, &Pe32Section);
if (Status == EFI_NOT_FOUND) {
Status = GetSectionByType (SecCoreFile, EFI_SECTION_TE, 1, &Pe32Section);
}
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "could not find a PE32 section in the SEC core file.");
return EFI_ABORTED;
}
SecHeaderSize = GetSectionHeaderLength(Pe32Section.CommonHeader);
Status = GetPe32Info (
(VOID *) ((UINTN) Pe32Section.Pe32Section + SecHeaderSize),
&EntryPoint,
&BaseOfCode,
&MachineType
);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "could not get the PE32 entry point for the SEC core.");
return EFI_ABORTED;
}
if (
Vtf0Detected &&
(MachineType == EFI_IMAGE_MACHINE_IA32 ||
MachineType == EFI_IMAGE_MACHINE_X64)
) {
//
// If the SEC core code is IA32 or X64 and the VTF-0 signature
// is found, we'll treat it as a VTF-0 'Volume Top File'.
// This means no modifications are required to the VTF.
//
return EFI_SUCCESS;
}
//
// Physical address is FV base + offset of PE32 + offset of the entry point
//
SecCorePhysicalAddress = FvInfo->BaseAddress;
SecCorePhysicalAddress += (UINTN) Pe32Section.Pe32Section + SecHeaderSize - (UINTN) FvImage->FileImage;
SecCorePhysicalAddress += EntryPoint;
DebugMsg (NULL, 0, 9, "SecCore physical entry point address", "Address = 0x%llX", (unsigned long long) SecCorePhysicalAddress);
//
// Find the PEI Core
//
PeiCorePhysicalAddress = 0;
Status = GetFileByType (EFI_FV_FILETYPE_PEI_CORE, 1, &PeiCoreFile);
if (!EFI_ERROR (Status) && (PeiCoreFile != NULL)) {
//
// PEI Core found, now find PE32 or TE section
//
Status = GetSectionByType (PeiCoreFile, EFI_SECTION_PE32, 1, &Pe32Section);
if (Status == EFI_NOT_FOUND) {
Status = GetSectionByType (PeiCoreFile, EFI_SECTION_TE, 1, &Pe32Section);
}
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "could not find either a PE32 or a TE section in PEI core file.");
return EFI_ABORTED;
}
SecHeaderSize = GetSectionHeaderLength(Pe32Section.CommonHeader);
Status = GetPe32Info (
(VOID *) ((UINTN) Pe32Section.Pe32Section + SecHeaderSize),
&EntryPoint,
&BaseOfCode,
&MachineType
);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "could not get the PE32 entry point for the PEI core.");
return EFI_ABORTED;
}
//
// Physical address is FV base + offset of PE32 + offset of the entry point
//
PeiCorePhysicalAddress = FvInfo->BaseAddress;
PeiCorePhysicalAddress += (UINTN) Pe32Section.Pe32Section + SecHeaderSize - (UINTN) FvImage->FileImage;
PeiCorePhysicalAddress += EntryPoint;
DebugMsg (NULL, 0, 9, "PeiCore physical entry point address", "Address = 0x%llX", (unsigned long long) PeiCorePhysicalAddress);
}
if (MachineType == EFI_IMAGE_MACHINE_IA32 || MachineType == EFI_IMAGE_MACHINE_X64) {
if (PeiCorePhysicalAddress != 0) {
//
// Get the location to update
//
Ia32ResetAddressPtr = (UINT32 *) ((UINTN) FvImage->Eof - IA32_PEI_CORE_ENTRY_OFFSET);
//
// Write lower 32 bits of physical address for Pei Core entry
//
*Ia32ResetAddressPtr = (UINT32) PeiCorePhysicalAddress;
}
//
// Write SecCore Entry point relative address into the jmp instruction in reset vector.
//
Ia32ResetAddressPtr = (UINT32 *) ((UINTN) FvImage->Eof - IA32_SEC_CORE_ENTRY_OFFSET);
Ia32SecEntryOffset = (INT32) (SecCorePhysicalAddress - (FV_IMAGES_TOP_ADDRESS - IA32_SEC_CORE_ENTRY_OFFSET + 2));
if (Ia32SecEntryOffset <= -65536) {
Error (NULL, 0, 3000, "Invalid", "The SEC EXE file size is too large, it must be less than 64K.");
return STATUS_ERROR;
}
*(UINT16 *) Ia32ResetAddressPtr = (UINT16) Ia32SecEntryOffset;
//
// Update the BFV base address
//
Ia32ResetAddressPtr = (UINT32 *) ((UINTN) FvImage->Eof - 4);
*Ia32ResetAddressPtr = (UINT32) (FvInfo->BaseAddress);
DebugMsg (NULL, 0, 9, "update BFV base address in the top FV image", "BFV base address = 0x%llX.", (unsigned long long) FvInfo->BaseAddress);
//
// Update the Startup AP in the FVH header block ZeroVector region.
//
BytePointer = (UINT8 *) ((UINTN) FvImage->FileImage);
if (FvInfo->Size <= 0x10000) {
BytePointer2 = m64kRecoveryStartupApDataArray;
} else if (FvInfo->Size <= 0x20000) {
BytePointer2 = m128kRecoveryStartupApDataArray;
} else {
BytePointer2 = m128kRecoveryStartupApDataArray;
//
// Find the position to place Ap reset vector, the offset
// between the position and the end of Fvrecovery.fv file
// should not exceed 128kB to prevent Ap reset vector from
// outside legacy E and F segment
//
Status = FindApResetVectorPosition (FvImage, &BytePointer);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "FV image does not have enough space to place AP reset vector. The FV image needs to reserve at least 4KB of unused space.");
return EFI_ABORTED;
}
}
for (Index = 0; Index < SIZEOF_STARTUP_DATA_ARRAY; Index++) {
BytePointer[Index] = BytePointer2[Index];
}
//
// Calculate the checksum
//
CheckSum = 0x0000;
WordPointer = (UINT16 *) (BytePointer);
for (Index = 0; Index < SIZEOF_STARTUP_DATA_ARRAY / 2; Index++) {
CheckSum = (UINT16) (CheckSum + ((UINT16) *WordPointer));
WordPointer++;
}
//
// Update the checksum field
//
WordPointer = (UINT16 *) (BytePointer + SIZEOF_STARTUP_DATA_ARRAY - 2);
*WordPointer = (UINT16) (0x10000 - (UINT32) CheckSum);
//
// IpiVector at the 4k aligned address in the top 2 blocks in the PEI FV.
//
IpiVector = (UINT32) (FV_IMAGES_TOP_ADDRESS - ((UINTN) FvImage->Eof - (UINTN) BytePointer));
DebugMsg (NULL, 0, 9, "Startup AP Vector address", "IpiVector at 0x%X", (unsigned) IpiVector);
if ((IpiVector & 0xFFF) != 0) {
Error (NULL, 0, 3000, "Invalid", "Startup AP Vector address are not 4K aligned, because the FV size is not 4K aligned");
return EFI_ABORTED;
}
IpiVector = IpiVector >> 12;
IpiVector = IpiVector & 0xFF;
//
// Write IPI Vector at Offset FvrecoveryFileSize - 8
//
Ia32ResetAddressPtr = (UINT32 *) ((UINTN) FvImage->Eof - 8);
*Ia32ResetAddressPtr = IpiVector;
} else if (MachineType == EFI_IMAGE_MACHINE_ARMT) {
//
// Since the ARM reset vector is in the FV Header you really don't need a
// Volume Top File, but if you have one for some reason don't crash...
//
} else if (MachineType == EFI_IMAGE_MACHINE_AARCH64) {
//
// Since the AArch64 reset vector is in the FV Header you really don't need a
// Volume Top File, but if you have one for some reason don't crash...
//
} else {
Error (NULL, 0, 3000, "Invalid", "machine type=0x%X in PEI core.", MachineType);
return EFI_ABORTED;
}
//
// Now update file checksum
//
SavedState = VtfFile->State;
VtfFile->IntegrityCheck.Checksum.File = 0;
VtfFile->State = 0;
if (VtfFile->Attributes & FFS_ATTRIB_CHECKSUM) {
FfsHeaderSize = GetFfsHeaderLength(VtfFile);
VtfFile->IntegrityCheck.Checksum.File = CalculateChecksum8 (
(UINT8 *) ((UINT8 *)VtfFile + FfsHeaderSize),
GetFfsFileLength (VtfFile) - FfsHeaderSize
);
} else {
VtfFile->IntegrityCheck.Checksum.File = FFS_FIXED_CHECKSUM;
}
VtfFile->State = SavedState;
return EFI_SUCCESS;
}
EFI_STATUS
FindCorePeSection(
IN VOID *FvImageBuffer,
IN UINT64 FvSize,
IN EFI_FV_FILETYPE FileType,
OUT EFI_FILE_SECTION_POINTER *Pe32Section
)
/*++
Routine Description:
Recursively searches the FV for the FFS file of specified type (typically
SEC or PEI core) and extracts the PE32 section for further processing.
Arguments:
FvImageBuffer Buffer containing FV data
FvSize Size of the FV
FileType Type of FFS file to search for
Pe32Section PE32 section pointer when FFS file is found.
Returns:
EFI_SUCCESS Function Completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
EFI_NOT_FOUND Core file not found.
--*/
{
EFI_STATUS Status;
EFI_FIRMWARE_VOLUME_HEADER *OrigFvHeader;
UINT32 OrigFvLength;
EFI_FFS_FILE_HEADER *CoreFfsFile;
UINTN FvImageFileCount;
EFI_FFS_FILE_HEADER *FvImageFile;
UINTN EncapFvSectionCount;
EFI_FILE_SECTION_POINTER EncapFvSection;
EFI_FIRMWARE_VOLUME_HEADER *EncapsulatedFvHeader;
if (Pe32Section == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Initialize FV library, saving previous values
//
OrigFvHeader = (EFI_FIRMWARE_VOLUME_HEADER *)NULL;
GetFvHeader (&OrigFvHeader, &OrigFvLength);
InitializeFvLib(FvImageBuffer, (UINT32)FvSize);
//
// First see if we can obtain the file directly in outer FV
//
Status = GetFileByType(FileType, 1, &CoreFfsFile);
if (!EFI_ERROR(Status) && (CoreFfsFile != NULL) ) {
//
// Core found, now find PE32 or TE section
//
Status = GetSectionByType(CoreFfsFile, EFI_SECTION_PE32, 1, Pe32Section);
if (EFI_ERROR(Status)) {
Status = GetSectionByType(CoreFfsFile, EFI_SECTION_TE, 1, Pe32Section);
}
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "could not find a PE32 section in the core file.");
return EFI_ABORTED;
}
//
// Core PE/TE section, found, return
//
Status = EFI_SUCCESS;
goto EarlyExit;
}
//
// File was not found, look for FV Image file
//
// iterate through all FV image files in outer FV
for (FvImageFileCount = 1;; FvImageFileCount++) {
Status = GetFileByType(EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE, FvImageFileCount, &FvImageFile);
if (EFI_ERROR(Status) || (FvImageFile == NULL) ) {
// exit FV image file loop, no more found
break;
}
// Found an fv image file, look for an FV image section. The PI spec does not
// preclude multiple FV image sections so we loop accordingly.
for (EncapFvSectionCount = 1;; EncapFvSectionCount++) {
// Look for the next FV image section. The section search code will
// iterate into encapsulation sections. For example, it will iterate
// into an EFI_SECTION_GUID_DEFINED encapsulation section to find the
// EFI_SECTION_FIRMWARE_VOLUME_IMAGE sections contained therein.
Status = GetSectionByType(FvImageFile, EFI_SECTION_FIRMWARE_VOLUME_IMAGE, EncapFvSectionCount, &EncapFvSection);
if (EFI_ERROR(Status)) {
// exit section inner loop, no more found
break;
}
EncapsulatedFvHeader = (EFI_FIRMWARE_VOLUME_HEADER *)((UINT8 *)EncapFvSection.FVImageSection + GetSectionHeaderLength(EncapFvSection.FVImageSection));
// recurse to search the encapsulated FV for this core file type
Status = FindCorePeSection(EncapsulatedFvHeader, EncapsulatedFvHeader->FvLength, FileType, Pe32Section);
if (!EFI_ERROR(Status)) {
// we found the core in the capsulated image, success
goto EarlyExit;
}
} // end encapsulated fv image section loop
} // end fv image file loop
// core was not found
Status = EFI_NOT_FOUND;
EarlyExit:
// restore FV lib values
if(OrigFvHeader != NULL) {
InitializeFvLib(OrigFvHeader, OrigFvLength);
}
return Status;
}
EFI_STATUS
GetCoreMachineType(
IN EFI_FILE_SECTION_POINTER Pe32Section,
OUT UINT16 *CoreMachineType
)
/*++
Routine Description:
Returns the machine type of a P32 image, typically SEC or PEI core.
Arguments:
Pe32Section PE32 section data
CoreMachineType The extracted machine type
Returns:
EFI_SUCCESS Function Completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
--*/
{
EFI_STATUS Status;
UINT32 EntryPoint;
UINT32 BaseOfCode;
if (CoreMachineType == NULL) {
return EFI_INVALID_PARAMETER;
}
Status = GetPe32Info(
(VOID *)((UINTN)Pe32Section.Pe32Section + GetSectionHeaderLength(Pe32Section.CommonHeader)),
&EntryPoint,
&BaseOfCode,
CoreMachineType
);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "could not get the PE32 machine type for the core.");
return EFI_ABORTED;
}
return EFI_SUCCESS;
}
EFI_STATUS
GetCoreEntryPointAddress(
IN VOID *FvImageBuffer,
IN FV_INFO *FvInfo,
IN EFI_FILE_SECTION_POINTER Pe32Section,
OUT EFI_PHYSICAL_ADDRESS *CoreEntryAddress
)
/*++
Routine Description:
Returns the physical address of the core (SEC or PEI) entry point.
Arguments:
FvImageBuffer Pointer to buffer containing FV data
FvInfo Info for the parent FV
Pe32Section PE32 section data
CoreEntryAddress The extracted core entry physical address
Returns:
EFI_SUCCESS Function Completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
--*/
{
EFI_STATUS Status;
UINT32 EntryPoint;
UINT32 BaseOfCode;
UINT16 MachineType;
EFI_PHYSICAL_ADDRESS EntryPhysicalAddress;
if (CoreEntryAddress == NULL) {
return EFI_INVALID_PARAMETER;
}
Status = GetPe32Info(
(VOID *)((UINTN)Pe32Section.Pe32Section + GetSectionHeaderLength(Pe32Section.CommonHeader)),
&EntryPoint,
&BaseOfCode,
&MachineType
);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "could not get the PE32 entry point for the core.");
return EFI_ABORTED;
}
//
// Physical address is FV base + offset of PE32 + offset of the entry point
//
EntryPhysicalAddress = FvInfo->BaseAddress;
EntryPhysicalAddress += (UINTN)Pe32Section.Pe32Section + GetSectionHeaderLength(Pe32Section.CommonHeader) - (UINTN)FvImageBuffer;
EntryPhysicalAddress += EntryPoint;
*CoreEntryAddress = EntryPhysicalAddress;
return EFI_SUCCESS;
}
EFI_STATUS
UpdateArmResetVectorIfNeeded (
IN MEMORY_FILE *FvImage,
IN FV_INFO *FvInfo
)
/*++
Routine Description:
This parses the FV looking for SEC and patches that address into the
beginning of the FV header.
For ARM32 the reset vector is at 0x00000000 or 0xFFFF0000.
For AArch64 the reset vector is at 0x00000000.
This would commonly map to the first entry in the ROM.
ARM32 Exceptions:
Reset +0
Undefined +4
SWI +8
Prefetch Abort +12
Data Abort +16
IRQ +20
FIQ +24
We support two schemes on ARM.
1) Beginning of the FV is the reset vector
2) Reset vector is data bytes FDF file and that code branches to reset vector
in the beginning of the FV (fixed size offset).
Need to have the jump for the reset vector at location zero.
We also need to store the address or PEI (if it exists).
We stub out a return from interrupt in case the debugger
is using SWI (not done for AArch64, not enough space in struct).
The optional entry to the common exception handler is
to support full featured exception handling from ROM and is currently
not support by this tool.
Arguments:
FvImage Memory file for the FV memory image
FvInfo Information read from INF file.
Returns:
EFI_SUCCESS Function Completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
EFI_NOT_FOUND PEI Core file not found.
--*/
{
EFI_STATUS Status;
EFI_FILE_SECTION_POINTER SecPe32;
EFI_FILE_SECTION_POINTER PeiPe32;
BOOLEAN UpdateVectorSec = FALSE;
BOOLEAN UpdateVectorPei = FALSE;
UINT16 MachineType = 0;
EFI_PHYSICAL_ADDRESS SecCoreEntryAddress = 0;
UINT16 PeiMachineType = 0;
EFI_PHYSICAL_ADDRESS PeiCoreEntryAddress = 0;
//
// Verify input parameters
//
if (FvImage == NULL || FvInfo == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Locate an SEC Core instance and if found extract the machine type and entry point address
//
Status = FindCorePeSection(FvImage->FileImage, FvInfo->Size, EFI_FV_FILETYPE_SECURITY_CORE, &SecPe32);
if (!EFI_ERROR(Status)) {
Status = GetCoreMachineType(SecPe32, &MachineType);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 machine type for SEC Core.");
return EFI_ABORTED;
}
Status = GetCoreEntryPointAddress(FvImage->FileImage, FvInfo, SecPe32, &SecCoreEntryAddress);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 entry point address for SEC Core.");
return EFI_ABORTED;
}
VerboseMsg("UpdateArmResetVectorIfNeeded found SEC core entry at 0x%llx", (unsigned long long)SecCoreEntryAddress);
UpdateVectorSec = TRUE;
}
//
// Locate a PEI Core instance and if found extract the machine type and entry point address
//
Status = FindCorePeSection(FvImage->FileImage, FvInfo->Size, EFI_FV_FILETYPE_PEI_CORE, &PeiPe32);
if (!EFI_ERROR(Status)) {
Status = GetCoreMachineType(PeiPe32, &PeiMachineType);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 machine type for PEI Core.");
return EFI_ABORTED;
}
Status = GetCoreEntryPointAddress(FvImage->FileImage, FvInfo, PeiPe32, &PeiCoreEntryAddress);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 entry point address for PEI Core.");
return EFI_ABORTED;
}
VerboseMsg("UpdateArmResetVectorIfNeeded found PEI core entry at 0x%llx", (unsigned long long)PeiCoreEntryAddress);
// if we previously found an SEC Core make sure machine types match
if (UpdateVectorSec && (MachineType != PeiMachineType)) {
Error(NULL, 0, 3000, "Invalid", "SEC and PEI machine types do not match, can't update reset vector");
return EFI_ABORTED;
}
else {
MachineType = PeiMachineType;
}
UpdateVectorPei = TRUE;
}
if (!UpdateVectorSec && !UpdateVectorPei) {
return EFI_SUCCESS;
}
if (MachineType == EFI_IMAGE_MACHINE_ARMT) {
// ARM: Array of 4 UINT32s:
// 0 - is branch relative to SEC entry point
// 1 - PEI Entry Point
// 2 - movs pc,lr for a SWI handler
// 3 - Place holder for Common Exception Handler
UINT32 ResetVector[4];
memset(ResetVector, 0, sizeof (ResetVector));
// if we found an SEC core entry point then generate a branch instruction
// to it and populate a debugger SWI entry as well
if (UpdateVectorSec) {
VerboseMsg("UpdateArmResetVectorIfNeeded updating ARM SEC vector");
// B SecEntryPoint - signed_immed_24 part +/-32MB offset
// on ARM, the PC is always 8 ahead, so we're not really jumping from the base address, but from base address + 8
ResetVector[0] = (INT32)(SecCoreEntryAddress - FvInfo->BaseAddress - 8) >> 2;
if (ResetVector[0] > 0x00FFFFFF) {
Error(NULL, 0, 3000, "Invalid", "SEC Entry point must be within 32MB of the start of the FV");
return EFI_ABORTED;
}
// Add opcode for an unconditional branch with no link. i.e.: " B SecEntryPoint"
ResetVector[0] |= ARMT_UNCONDITIONAL_JUMP_INSTRUCTION;
// SWI handler movs pc,lr. Just in case a debugger uses SWI
ResetVector[2] = 0xE1B0F07E;
// Place holder to support a common interrupt handler from ROM.
// Currently not supported. For this to be used the reset vector would not be in this FV
// and the exception vectors would be hard coded in the ROM and just through this address
// to find a common handler in the a module in the FV.
ResetVector[3] = 0;
}
// if a PEI core entry was found place its address in the vector area
if (UpdateVectorPei) {
VerboseMsg("UpdateArmResetVectorIfNeeded updating ARM PEI address");
// Address of PEI Core, if we have one
ResetVector[1] = (UINT32)PeiCoreEntryAddress;
}
//
// Copy to the beginning of the FV
//
memcpy(FvImage->FileImage, ResetVector, sizeof (ResetVector));
} else if (MachineType == EFI_IMAGE_MACHINE_AARCH64) {
// AArch64: Used as UINT64 ResetVector[2]
// 0 - is branch relative to SEC entry point
// 1 - PEI Entry Point
UINT64 ResetVector[2];
memset(ResetVector, 0, sizeof (ResetVector));
/* NOTE:
ARMT above has an entry in ResetVector[2] for SWI. The way we are using the ResetVector
array at the moment, for AArch64, does not allow us space for this as the header only
allows for a fixed amount of bytes at the start. If we are sure that UEFI will live
within the first 4GB of addressable RAM we could potentially adopt the same ResetVector
layout as above. But for the moment we replace the four 32bit vectors with two 64bit
vectors in the same area of the Image heasder. This allows UEFI to start from a 64bit
base.
*/
// if we found an SEC core entry point then generate a branch instruction to it
if (UpdateVectorSec) {
VerboseMsg("UpdateArmResetVectorIfNeeded updating AArch64 SEC vector");
ResetVector[0] = (UINT64)(SecCoreEntryAddress - FvInfo->BaseAddress) >> 2;
// B SecEntryPoint - signed_immed_26 part +/-128MB offset
if (ResetVector[0] > 0x03FFFFFF) {
Error(NULL, 0, 3000, "Invalid", "SEC Entry point must be within 128MB of the start of the FV");
return EFI_ABORTED;
}
// Add opcode for an unconditional branch with no link. i.e.: " B SecEntryPoint"
ResetVector[0] |= ARM64_UNCONDITIONAL_JUMP_INSTRUCTION;
}
// if a PEI core entry was found place its address in the vector area
if (UpdateVectorPei) {
VerboseMsg("UpdateArmResetVectorIfNeeded updating AArch64 PEI address");
// Address of PEI Core, if we have one
ResetVector[1] = (UINT64)PeiCoreEntryAddress;
}
//
// Copy to the beginning of the FV
//
memcpy(FvImage->FileImage, ResetVector, sizeof (ResetVector));
} else {
Error(NULL, 0, 3000, "Invalid", "Unknown machine type");
return EFI_ABORTED;
}
return EFI_SUCCESS;
}
EFI_STATUS
UpdateRiscvResetVectorIfNeeded (
MEMORY_FILE *FvImage,
FV_INFO *FvInfo
)
/*++
Routine Description:
This parses the FV looking for SEC and patches that address into the
beginning of the FV header.
For RISC-V ISA, the reset vector is at 0xfff~ff00h or 200h
Arguments:
FvImage Memory file for the FV memory image/
FvInfo Information read from INF file.
Returns:
EFI_SUCCESS Function Completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
EFI_NOT_FOUND PEI Core file not found.
--*/
{
EFI_STATUS Status;
UINT16 MachineType;
EFI_FILE_SECTION_POINTER SecPe32;
EFI_PHYSICAL_ADDRESS SecCoreEntryAddress;
UINT32 bSecCore;
UINT32 tmp;
//
// Verify input parameters
//
if (FvImage == NULL || FvInfo == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// Initialize FV library
//
InitializeFvLib (FvImage->FileImage, FvInfo->Size);
//
// Find the Sec Core
//
Status = FindCorePeSection(FvImage->FileImage, FvInfo->Size, EFI_FV_FILETYPE_SECURITY_CORE, &SecPe32);
if(EFI_ERROR(Status)) {
printf("skip because Secutiry Core not found\n");
return EFI_SUCCESS;
}
DebugMsg (NULL, 0, 9, "Update SEC core in FV Header", NULL);
Status = GetCoreMachineType(SecPe32, &MachineType);
if(EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 machine type for SEC core.");
return EFI_ABORTED;
}
if (MachineType != EFI_IMAGE_MACHINE_RISCV64) {
Error(NULL, 0, 3000, "Invalid", "Could not update SEC core because Machine type is not RiscV.");
return EFI_ABORTED;
}
Status = GetCoreEntryPointAddress(FvImage->FileImage, FvInfo, SecPe32, &SecCoreEntryAddress);
if(EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 entry point address for SEC Core.");
return EFI_ABORTED;
}
VerboseMsg("SecCore entry point Address = 0x%llX", (unsigned long long) SecCoreEntryAddress);
VerboseMsg("BaseAddress = 0x%llX", (unsigned long long) FvInfo->BaseAddress);
bSecCore = (UINT32)(SecCoreEntryAddress - FvInfo->BaseAddress);
VerboseMsg("offset = 0x%llX", bSecCore);
if(bSecCore > 0x0fffff) {
Error(NULL, 0, 3000, "Invalid", "SEC Entry point must be within 1MB of start of the FV");
return EFI_ABORTED;
}
tmp = bSecCore;
bSecCore = 0;
//J-type
bSecCore = (tmp&0x100000)<<11; //imm[20] at bit[31]
bSecCore |= (tmp&0x0007FE)<<20; //imm[10:1] at bit[30:21]
bSecCore |= (tmp&0x000800)<<9; //imm[11] at bit[20]
bSecCore |= (tmp&0x0FF000); //imm[19:12] at bit[19:12]
bSecCore |= 0x6F; //JAL opcode
memcpy(FvImage->FileImage, &bSecCore, sizeof(bSecCore));
return EFI_SUCCESS;
}
EFI_STATUS
GetPe32Info (
IN UINT8 *Pe32,
OUT UINT32 *EntryPoint,
OUT UINT32 *BaseOfCode,
OUT UINT16 *MachineType
)
/*++
Routine Description:
Retrieves the PE32 entry point offset and machine type from PE image or TeImage.
See EfiImage.h for machine types. The entry point offset is from the beginning
of the PE32 buffer passed in.
Arguments:
Pe32 Beginning of the PE32.
EntryPoint Offset from the beginning of the PE32 to the image entry point.
BaseOfCode Base address of code.
MachineType Magic number for the machine type.
Returns:
EFI_SUCCESS Function completed successfully.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
EFI_UNSUPPORTED The operation is unsupported.
--*/
{
EFI_IMAGE_DOS_HEADER *DosHeader;
EFI_IMAGE_OPTIONAL_HEADER_UNION *ImgHdr;
EFI_TE_IMAGE_HEADER *TeHeader;
//
// Verify input parameters
//
if (Pe32 == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// First check whether it is one TE Image.
//
TeHeader = (EFI_TE_IMAGE_HEADER *) Pe32;
if (TeHeader->Signature == EFI_TE_IMAGE_HEADER_SIGNATURE) {
//
// By TeImage Header to get output
//
*EntryPoint = TeHeader->AddressOfEntryPoint + sizeof (EFI_TE_IMAGE_HEADER) - TeHeader->StrippedSize;
*BaseOfCode = TeHeader->BaseOfCode + sizeof (EFI_TE_IMAGE_HEADER) - TeHeader->StrippedSize;
*MachineType = TeHeader->Machine;
} else {
//
// Then check whether
// First is the DOS header
//
DosHeader = (EFI_IMAGE_DOS_HEADER *) Pe32;
//
// Verify DOS header is expected
//
if (DosHeader->e_magic != EFI_IMAGE_DOS_SIGNATURE) {
Error (NULL, 0, 3000, "Invalid", "Unknown magic number in the DOS header, 0x%04X.", DosHeader->e_magic);
return EFI_UNSUPPORTED;
}
//
// Immediately following is the NT header.
//
ImgHdr = (EFI_IMAGE_OPTIONAL_HEADER_UNION *) ((UINTN) Pe32 + DosHeader->e_lfanew);
//
// Verify NT header is expected
//
if (ImgHdr->Pe32.Signature != EFI_IMAGE_NT_SIGNATURE) {
Error (NULL, 0, 3000, "Invalid", "Unrecognized image signature 0x%08X.", (unsigned) ImgHdr->Pe32.Signature);
return EFI_UNSUPPORTED;
}
//
// Get output
//
*EntryPoint = ImgHdr->Pe32.OptionalHeader.AddressOfEntryPoint;
*BaseOfCode = ImgHdr->Pe32.OptionalHeader.BaseOfCode;
*MachineType = ImgHdr->Pe32.FileHeader.Machine;
}
//
// Verify machine type is supported
//
if ((*MachineType != EFI_IMAGE_MACHINE_IA32) && (*MachineType != EFI_IMAGE_MACHINE_X64) && (*MachineType != EFI_IMAGE_MACHINE_EBC) &&
(*MachineType != EFI_IMAGE_MACHINE_ARMT) && (*MachineType != EFI_IMAGE_MACHINE_AARCH64) &&
(*MachineType != EFI_IMAGE_MACHINE_RISCV64)) {
Error (NULL, 0, 3000, "Invalid", "Unrecognized machine type in the PE32 file.");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
}
EFI_STATUS
GenerateFvImage (
IN CHAR8 *InfFileImage,
IN UINTN InfFileSize,
IN CHAR8 *FvFileName,
IN CHAR8 *MapFileName
)
/*++
Routine Description:
This is the main function which will be called from application.
Arguments:
InfFileImage Buffer containing the INF file contents.
InfFileSize Size of the contents of the InfFileImage buffer.
FvFileName Requested name for the FV file.
MapFileName Fv map file to log fv driver information.
Returns:
EFI_SUCCESS Function completed successfully.
EFI_OUT_OF_RESOURCES Could not allocate required resources.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
--*/
{
EFI_STATUS Status;
MEMORY_FILE InfMemoryFile;
MEMORY_FILE FvImageMemoryFile;
UINTN Index;
EFI_FIRMWARE_VOLUME_HEADER *FvHeader;
EFI_FFS_FILE_HEADER *VtfFileImage;
UINT8 *FvBufferHeader; // to make sure fvimage header 8 type alignment.
UINT8 *FvImage;
UINTN FvImageSize;
FILE *FvFile;
CHAR8 *FvMapName;
FILE *FvMapFile;
EFI_FIRMWARE_VOLUME_EXT_HEADER *FvExtHeader;
FILE *FvExtHeaderFile;
UINTN FileSize;
CHAR8 *FvReportName;
FILE *FvReportFile;
FvBufferHeader = NULL;
FvFile = NULL;
FvMapName = NULL;
FvMapFile = NULL;
FvReportName = NULL;
FvReportFile = NULL;
if (InfFileImage != NULL) {
//
// Initialize file structures
//
InfMemoryFile.FileImage = InfFileImage;
InfMemoryFile.CurrentFilePointer = InfFileImage;
InfMemoryFile.Eof = InfFileImage + InfFileSize;
//
// Parse the FV inf file for header information
//
Status = ParseFvInf (&InfMemoryFile, &mFvDataInfo);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 0003, "Error parsing file", "the input FV INF file.");
return Status;
}
}
//
// Update the file name return values
//
if (FvFileName == NULL && mFvDataInfo.FvName[0] != '\0') {
FvFileName = mFvDataInfo.FvName;
}
if (FvFileName == NULL) {
Error (NULL, 0, 1001, "Missing option", "Output file name");
return EFI_ABORTED;
}
if (mFvDataInfo.FvBlocks[0].Length == 0) {
Error (NULL, 0, 1001, "Missing required argument", "Block Size");
return EFI_ABORTED;
}
//
// Debug message Fv File System Guid
//
if (mFvDataInfo.FvFileSystemGuidSet) {
DebugMsg (NULL, 0, 9, "FV File System Guid", "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X",
(unsigned) mFvDataInfo.FvFileSystemGuid.Data1,
mFvDataInfo.FvFileSystemGuid.Data2,
mFvDataInfo.FvFileSystemGuid.Data3,
mFvDataInfo.FvFileSystemGuid.Data4[0],
mFvDataInfo.FvFileSystemGuid.Data4[1],
mFvDataInfo.FvFileSystemGuid.Data4[2],
mFvDataInfo.FvFileSystemGuid.Data4[3],
mFvDataInfo.FvFileSystemGuid.Data4[4],
mFvDataInfo.FvFileSystemGuid.Data4[5],
mFvDataInfo.FvFileSystemGuid.Data4[6],
mFvDataInfo.FvFileSystemGuid.Data4[7]);
}
//
// Add PI FV extension header
//
FvExtHeader = NULL;
FvExtHeaderFile = NULL;
if (mFvDataInfo.FvExtHeaderFile[0] != 0) {
//
// Open the FV Extension Header file
//
FvExtHeaderFile = fopen (LongFilePath (mFvDataInfo.FvExtHeaderFile), "rb");
if (FvExtHeaderFile == NULL) {
Error (NULL, 0, 0001, "Error opening file", mFvDataInfo.FvExtHeaderFile);
return EFI_ABORTED;
}
//
// Get the file size
//
FileSize = _filelength (fileno (FvExtHeaderFile));
//
// Allocate a buffer for the FV Extension Header
//
FvExtHeader = malloc(FileSize);
if (FvExtHeader == NULL) {
fclose (FvExtHeaderFile);
return EFI_OUT_OF_RESOURCES;
}
//
// Read the FV Extension Header
//
fread (FvExtHeader, sizeof (UINT8), FileSize, FvExtHeaderFile);
fclose (FvExtHeaderFile);
//
// See if there is an override for the FV Name GUID
//
if (mFvDataInfo.FvNameGuidSet) {
memcpy (&FvExtHeader->FvName, &mFvDataInfo.FvNameGuid, sizeof (EFI_GUID));
}
memcpy (&mFvDataInfo.FvNameGuid, &FvExtHeader->FvName, sizeof (EFI_GUID));
mFvDataInfo.FvNameGuidSet = TRUE;
} else if (mFvDataInfo.FvNameGuidSet) {
//
// Allocate a buffer for the FV Extension Header
//
FvExtHeader = malloc(sizeof (EFI_FIRMWARE_VOLUME_EXT_HEADER));
if (FvExtHeader == NULL) {
return EFI_OUT_OF_RESOURCES;
}
memcpy (&FvExtHeader->FvName, &mFvDataInfo.FvNameGuid, sizeof (EFI_GUID));
FvExtHeader->ExtHeaderSize = sizeof (EFI_FIRMWARE_VOLUME_EXT_HEADER);
}
//
// Debug message Fv Name Guid
//
if (mFvDataInfo.FvNameGuidSet) {
DebugMsg (NULL, 0, 9, "FV Name Guid", "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X",
(unsigned) mFvDataInfo.FvNameGuid.Data1,
mFvDataInfo.FvNameGuid.Data2,
mFvDataInfo.FvNameGuid.Data3,
mFvDataInfo.FvNameGuid.Data4[0],
mFvDataInfo.FvNameGuid.Data4[1],
mFvDataInfo.FvNameGuid.Data4[2],
mFvDataInfo.FvNameGuid.Data4[3],
mFvDataInfo.FvNameGuid.Data4[4],
mFvDataInfo.FvNameGuid.Data4[5],
mFvDataInfo.FvNameGuid.Data4[6],
mFvDataInfo.FvNameGuid.Data4[7]);
}
if (CompareGuid (&mFvDataInfo.FvFileSystemGuid, &mEfiFirmwareFileSystem2Guid) == 0 ||
CompareGuid (&mFvDataInfo.FvFileSystemGuid, &mEfiFirmwareFileSystem3Guid) == 0) {
mFvDataInfo.IsPiFvImage = TRUE;
}
//
// FvMap file to log the function address of all modules in one Fvimage
//
if (MapFileName != NULL) {
if (strlen (MapFileName) > MAX_LONG_FILE_PATH - 1) {
Error (NULL, 0, 1003, "Invalid option value", "MapFileName %s is too long!", MapFileName);
Status = EFI_ABORTED;
goto Finish;
}
FvMapName = malloc (strlen (MapFileName) + 1);
if (FvMapName == NULL) {
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated!");
Status = EFI_OUT_OF_RESOURCES;
goto Finish;
}
strcpy (FvMapName, MapFileName);
} else {
if (strlen (FvFileName) + strlen (".map") > MAX_LONG_FILE_PATH - 1) {
Error (NULL, 0, 1003, "Invalid option value", "FvFileName %s is too long!", FvFileName);
Status = EFI_ABORTED;
goto Finish;
}
FvMapName = malloc (strlen (FvFileName) + strlen (".map") + 1);
if (FvMapName == NULL) {
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated!");
Status = EFI_OUT_OF_RESOURCES;
goto Finish;
}
strcpy (FvMapName, FvFileName);
strcat (FvMapName, ".map");
}
VerboseMsg ("FV Map file name is %s", FvMapName);
//
// FvReport file to log the FV information in one Fvimage
//
if (strlen (FvFileName) + strlen (".txt") > MAX_LONG_FILE_PATH - 1) {
Error (NULL, 0, 1003, "Invalid option value", "FvFileName %s is too long!", FvFileName);
Status = EFI_ABORTED;
goto Finish;
}
FvReportName = malloc (strlen (FvFileName) + strlen (".txt") + 1);
if (FvReportName == NULL) {
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated!");
Status = EFI_OUT_OF_RESOURCES;
goto Finish;
}
strcpy (FvReportName, FvFileName);
strcat (FvReportName, ".txt");
//
// Calculate the FV size and Update Fv Size based on the actual FFS files.
// And Update mFvDataInfo data.
//
Status = CalculateFvSize (&mFvDataInfo);
if (EFI_ERROR (Status)) {
goto Finish;
}
VerboseMsg ("the generated FV image size is %u bytes", (unsigned) mFvDataInfo.Size);
//
// support fv image and empty fv image
//
FvImageSize = mFvDataInfo.Size;
//
// Allocate the FV, assure FvImage Header 8 byte alignment
//
FvBufferHeader = malloc (FvImageSize + sizeof (UINT64));
if (FvBufferHeader == NULL) {
Status = EFI_OUT_OF_RESOURCES;
goto Finish;
}
FvImage = (UINT8 *) (((UINTN) FvBufferHeader + 7) & ~7);
//
// Initialize the FV to the erase polarity
//
if (mFvDataInfo.FvAttributes == 0) {
//
// Set Default Fv Attribute
//
mFvDataInfo.FvAttributes = FV_DEFAULT_ATTRIBUTE;
}
if (mFvDataInfo.FvAttributes & EFI_FVB2_ERASE_POLARITY) {
memset (FvImage, -1, FvImageSize);
} else {
memset (FvImage, 0, FvImageSize);
}
//
// Initialize FV header
//
FvHeader = (EFI_FIRMWARE_VOLUME_HEADER *) FvImage;
//
// Initialize the zero vector to all zeros.
//
memset (FvHeader->ZeroVector, 0, 16);
//
// Copy the Fv file system GUID
//
memcpy (&FvHeader->FileSystemGuid, &mFvDataInfo.FvFileSystemGuid, sizeof (EFI_GUID));
FvHeader->FvLength = FvImageSize;
FvHeader->Signature = EFI_FVH_SIGNATURE;
FvHeader->Attributes = mFvDataInfo.FvAttributes;
FvHeader->Revision = EFI_FVH_REVISION;
FvHeader->ExtHeaderOffset = 0;
FvHeader->Reserved[0] = 0;
//
// Copy firmware block map
//
for (Index = 0; mFvDataInfo.FvBlocks[Index].Length != 0; Index++) {
FvHeader->BlockMap[Index].NumBlocks = mFvDataInfo.FvBlocks[Index].NumBlocks;
FvHeader->BlockMap[Index].Length = mFvDataInfo.FvBlocks[Index].Length;
}
//
// Add block map terminator
//
FvHeader->BlockMap[Index].NumBlocks = 0;
FvHeader->BlockMap[Index].Length = 0;
//
// Complete the header
//
FvHeader->HeaderLength = (UINT16) (((UINTN) &(FvHeader->BlockMap[Index + 1])) - (UINTN) FvImage);
FvHeader->Checksum = 0;
FvHeader->Checksum = CalculateChecksum16 ((UINT16 *) FvHeader, FvHeader->HeaderLength / sizeof (UINT16));
//
// If there is no FFS file, generate one empty FV
//
if (mFvDataInfo.FvFiles[0][0] == 0 && !mFvDataInfo.FvNameGuidSet) {
goto WriteFile;
}
//
// Initialize our "file" view of the buffer
//
FvImageMemoryFile.FileImage = (CHAR8 *)FvImage;
FvImageMemoryFile.CurrentFilePointer = (CHAR8 *)FvImage + FvHeader->HeaderLength;
FvImageMemoryFile.Eof = (CHAR8 *)FvImage + FvImageSize;
//
// Initialize the FV library.
//
InitializeFvLib (FvImageMemoryFile.FileImage, FvImageSize);
//
// Initialize the VTF file address.
//
VtfFileImage = (EFI_FFS_FILE_HEADER *) FvImageMemoryFile.Eof;
//
// Open FvMap file
//
FvMapFile = fopen (LongFilePath (FvMapName), "w");
if (FvMapFile == NULL) {
Error (NULL, 0, 0001, "Error opening file", FvMapName);
Status = EFI_ABORTED;
goto Finish;
}
//
// Open FvReport file
//
FvReportFile = fopen (LongFilePath (FvReportName), "w");
if (FvReportFile == NULL) {
Error (NULL, 0, 0001, "Error opening file", FvReportName);
Status = EFI_ABORTED;
goto Finish;
}
//
// record FV size information into FvMap file.
//
if (mFvTotalSize != 0) {
fprintf (FvMapFile, EFI_FV_TOTAL_SIZE_STRING);
fprintf (FvMapFile, " = 0x%x\n", (unsigned) mFvTotalSize);
}
if (mFvTakenSize != 0) {
fprintf (FvMapFile, EFI_FV_TAKEN_SIZE_STRING);
fprintf (FvMapFile, " = 0x%x\n", (unsigned) mFvTakenSize);
}
if (mFvTotalSize != 0 && mFvTakenSize != 0) {
fprintf (FvMapFile, EFI_FV_SPACE_SIZE_STRING);
fprintf (FvMapFile, " = 0x%x\n\n", (unsigned) (mFvTotalSize - mFvTakenSize));
}
//
// record FV size information to FvReportFile.
//
fprintf (FvReportFile, "%s = 0x%x\n", EFI_FV_TOTAL_SIZE_STRING, (unsigned) mFvTotalSize);
fprintf (FvReportFile, "%s = 0x%x\n", EFI_FV_TAKEN_SIZE_STRING, (unsigned) mFvTakenSize);
//
// Add PI FV extension header
//
if (FvExtHeader != NULL) {
//
// Add FV Extended Header contents to the FV as a PAD file
//
AddPadFile (&FvImageMemoryFile, 4, VtfFileImage, FvExtHeader, 0);
//
// Fv Extension header change update Fv Header Check sum
//
FvHeader->Checksum = 0;
FvHeader->Checksum = CalculateChecksum16 ((UINT16 *) FvHeader, FvHeader->HeaderLength / sizeof (UINT16));
}
//
// Add files to FV
//
for (Index = 0; mFvDataInfo.FvFiles[Index][0] != 0; Index++) {
//
// Add the file
//
Status = AddFile (&FvImageMemoryFile, &mFvDataInfo, Index, &VtfFileImage, FvMapFile, FvReportFile);
//
// Exit if error detected while adding the file
//
if (EFI_ERROR (Status)) {
goto Finish;
}
}
//
// If there is a VTF file, some special actions need to occur.
//
if ((UINTN) VtfFileImage != (UINTN) FvImageMemoryFile.Eof) {
//
// Pad from the end of the last file to the beginning of the VTF file.
// If the left space is less than sizeof (EFI_FFS_FILE_HEADER)?
//
Status = PadFvImage (&FvImageMemoryFile, VtfFileImage);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 4002, "Resource", "FV space is full, cannot add pad file between the last file and the VTF file.");
goto Finish;
}
if (!mArm && !mRiscV) {
//
// Update reset vector (SALE_ENTRY for IPF)
// Now for IA32 and IA64 platform, the fv which has bsf file must have the
// EndAddress of 0xFFFFFFFF (unless the section was rebased).
// Thus, only this type fv needs to update the reset vector.
// If the PEI Core is found, the VTF file will probably get
// corrupted by updating the entry point.
//
if (mFvDataInfo.ForceRebase == 1 ||
(mFvDataInfo.BaseAddress + mFvDataInfo.Size) == FV_IMAGES_TOP_ADDRESS) {
Status = UpdateResetVector (&FvImageMemoryFile, &mFvDataInfo, VtfFileImage);
if (EFI_ERROR(Status)) {
Error (NULL, 0, 3000, "Invalid", "Could not update the reset vector.");
goto Finish;
}
DebugMsg (NULL, 0, 9, "Update Reset vector in VTF file", NULL);
}
}
}
if (mArm) {
Status = UpdateArmResetVectorIfNeeded (&FvImageMemoryFile, &mFvDataInfo);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "Could not update the reset vector.");
goto Finish;
}
//
// Update Checksum for FvHeader
//
FvHeader->Checksum = 0;
FvHeader->Checksum = CalculateChecksum16 ((UINT16 *) FvHeader, FvHeader->HeaderLength / sizeof (UINT16));
}
if (mRiscV) {
//
// Update RISCV reset vector.
//
Status = UpdateRiscvResetVectorIfNeeded (&FvImageMemoryFile, &mFvDataInfo);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "Could not update the reset vector for RISC-V.");
goto Finish;
}
//
// Update Checksum for FvHeader
//
FvHeader->Checksum = 0;
FvHeader->Checksum = CalculateChecksum16 ((UINT16 *) FvHeader, FvHeader->HeaderLength / sizeof (UINT16));
}
//
// Update FV Alignment attribute to the largest alignment of all the FFS files in the FV
//
if (((FvHeader->Attributes & EFI_FVB2_WEAK_ALIGNMENT) != EFI_FVB2_WEAK_ALIGNMENT) &&
(((FvHeader->Attributes & EFI_FVB2_ALIGNMENT) >> 16)) < MaxFfsAlignment) {
FvHeader->Attributes = ((MaxFfsAlignment << 16) | (FvHeader->Attributes & 0xFFFF));
//
// Update Checksum for FvHeader
//
FvHeader->Checksum = 0;
FvHeader->Checksum = CalculateChecksum16 ((UINT16 *) FvHeader, FvHeader->HeaderLength / sizeof (UINT16));
}
//
// If there are large FFS in FV, the file system GUID should set to system 3 GUID.
//
if (mIsLargeFfs && CompareGuid (&FvHeader->FileSystemGuid, &mEfiFirmwareFileSystem2Guid) == 0) {
memcpy (&FvHeader->FileSystemGuid, &mEfiFirmwareFileSystem3Guid, sizeof (EFI_GUID));
FvHeader->Checksum = 0;
FvHeader->Checksum = CalculateChecksum16 ((UINT16 *) FvHeader, FvHeader->HeaderLength / sizeof (UINT16));
}
WriteFile:
//
// Write fv file
//
FvFile = fopen (LongFilePath (FvFileName), "wb");
if (FvFile == NULL) {
Error (NULL, 0, 0001, "Error opening file", FvFileName);
Status = EFI_ABORTED;
goto Finish;
}
if (fwrite (FvImage, 1, FvImageSize, FvFile) != FvImageSize) {
Error (NULL, 0, 0002, "Error writing file", FvFileName);
Status = EFI_ABORTED;
goto Finish;
}
Finish:
if (FvBufferHeader != NULL) {
free (FvBufferHeader);
}
if (FvExtHeader != NULL) {
free (FvExtHeader);
}
if (FvMapName != NULL) {
free (FvMapName);
}
if (FvReportName != NULL) {
free (FvReportName);
}
if (FvFile != NULL) {
fflush (FvFile);
fclose (FvFile);
}
if (FvMapFile != NULL) {
fflush (FvMapFile);
fclose (FvMapFile);
}
if (FvReportFile != NULL) {
fflush (FvReportFile);
fclose (FvReportFile);
}
return Status;
}
EFI_STATUS
UpdatePeiCoreEntryInFit (
IN FIT_TABLE *FitTablePtr,
IN UINT64 PeiCorePhysicalAddress
)
/*++
Routine Description:
This function is used to update the Pei Core address in FIT, this can be used by Sec core to pass control from
Sec to Pei Core
Arguments:
FitTablePtr - The pointer of FIT_TABLE.
PeiCorePhysicalAddress - The address of Pei Core entry.
Returns:
EFI_SUCCESS - The PEI_CORE FIT entry was updated successfully.
EFI_NOT_FOUND - Not found the PEI_CORE FIT entry.
--*/
{
FIT_TABLE *TmpFitPtr;
UINTN Index;
UINTN NumFitComponents;
TmpFitPtr = FitTablePtr;
NumFitComponents = TmpFitPtr->CompSize;
for (Index = 0; Index < NumFitComponents; Index++) {
if ((TmpFitPtr->CvAndType & FIT_TYPE_MASK) == COMP_TYPE_FIT_PEICORE) {
TmpFitPtr->CompAddress = PeiCorePhysicalAddress;
return EFI_SUCCESS;
}
TmpFitPtr++;
}
return EFI_NOT_FOUND;
}
VOID
UpdateFitCheckSum (
IN FIT_TABLE *FitTablePtr
)
/*++
Routine Description:
This function is used to update the checksum for FIT.
Arguments:
FitTablePtr - The pointer of FIT_TABLE.
Returns:
None.
--*/
{
if ((FitTablePtr->CvAndType & CHECKSUM_BIT_MASK) >> 7) {
FitTablePtr->CheckSum = 0;
FitTablePtr->CheckSum = CalculateChecksum8 ((UINT8 *) FitTablePtr, FitTablePtr->CompSize * 16);
}
}
EFI_STATUS
CalculateFvSize (
FV_INFO *FvInfoPtr
)
/*++
Routine Description:
Calculate the FV size and Update Fv Size based on the actual FFS files.
And Update FvInfo data.
Arguments:
FvInfoPtr - The pointer to FV_INFO structure.
Returns:
EFI_ABORTED - Ffs Image Error
EFI_SUCCESS - Successfully update FvSize
--*/
{
UINTN CurrentOffset;
UINTN OrigOffset;
UINTN Index;
FILE *fpin;
UINTN FfsFileSize;
UINTN FvExtendHeaderSize;
UINT32 FfsAlignment;
UINT32 FfsHeaderSize;
EFI_FFS_FILE_HEADER FfsHeader;
UINTN VtfFileSize;
UINTN MaxPadFileSize;
FvExtendHeaderSize = 0;
MaxPadFileSize = 0;
VtfFileSize = 0;
fpin = NULL;
Index = 0;
//
// Compute size for easy access later
//
FvInfoPtr->Size = 0;
for (Index = 0; FvInfoPtr->FvBlocks[Index].NumBlocks > 0 && FvInfoPtr->FvBlocks[Index].Length > 0; Index++) {
FvInfoPtr->Size += FvInfoPtr->FvBlocks[Index].NumBlocks * FvInfoPtr->FvBlocks[Index].Length;
}
//
// Calculate the required sizes for all FFS files.
//
CurrentOffset = sizeof (EFI_FIRMWARE_VOLUME_HEADER);
for (Index = 1;; Index ++) {
CurrentOffset += sizeof (EFI_FV_BLOCK_MAP_ENTRY);
if (FvInfoPtr->FvBlocks[Index].NumBlocks == 0 || FvInfoPtr->FvBlocks[Index].Length == 0) {
break;
}
}
//
// Calculate PI extension header
//
if (mFvDataInfo.FvExtHeaderFile[0] != '\0') {
fpin = fopen (LongFilePath (mFvDataInfo.FvExtHeaderFile), "rb");
if (fpin == NULL) {
Error (NULL, 0, 0001, "Error opening file", mFvDataInfo.FvExtHeaderFile);
return EFI_ABORTED;
}
FvExtendHeaderSize = _filelength (fileno (fpin));
fclose (fpin);
if (sizeof (EFI_FFS_FILE_HEADER) + FvExtendHeaderSize >= MAX_FFS_SIZE) {
CurrentOffset += sizeof (EFI_FFS_FILE_HEADER2) + FvExtendHeaderSize;
mIsLargeFfs = TRUE;
} else {
CurrentOffset += sizeof (EFI_FFS_FILE_HEADER) + FvExtendHeaderSize;
}
CurrentOffset = (CurrentOffset + 7) & (~7);
} else if (mFvDataInfo.FvNameGuidSet) {
CurrentOffset += sizeof (EFI_FFS_FILE_HEADER) + sizeof (EFI_FIRMWARE_VOLUME_EXT_HEADER);
CurrentOffset = (CurrentOffset + 7) & (~7);
}
//
// Accumulate every FFS file size.
//
for (Index = 0; FvInfoPtr->FvFiles[Index][0] != 0; Index++) {
//
// Open FFS file
//
fpin = NULL;
fpin = fopen (LongFilePath (FvInfoPtr->FvFiles[Index]), "rb");
if (fpin == NULL) {
Error (NULL, 0, 0001, "Error opening file", FvInfoPtr->FvFiles[Index]);
return EFI_ABORTED;
}
//
// Get the file size
//
FfsFileSize = _filelength (fileno (fpin));
if (FfsFileSize >= MAX_FFS_SIZE) {
FfsHeaderSize = sizeof(EFI_FFS_FILE_HEADER2);
mIsLargeFfs = TRUE;
} else {
FfsHeaderSize = sizeof(EFI_FFS_FILE_HEADER);
}
//
// Read Ffs File header
//
fread (&FfsHeader, sizeof (UINT8), sizeof (EFI_FFS_FILE_HEADER), fpin);
//
// close file
//
fclose (fpin);
if (FvInfoPtr->IsPiFvImage) {
//
// Check whether this ffs file is vtf file
//
if (IsVtfFile (&FfsHeader)) {
if (VtfFileFlag) {
//
// One Fv image can't have two vtf files.
//
Error (NULL, 0, 3000,"Invalid", "One Fv image can't have two vtf files.");
return EFI_ABORTED;
}
VtfFileFlag = TRUE;
VtfFileSize = FfsFileSize;
continue;
}
//
// Get the alignment of FFS file
//
ReadFfsAlignment (&FfsHeader, &FfsAlignment);
FfsAlignment = 1 << FfsAlignment;
//
// Add Pad file
//
if (((CurrentOffset + FfsHeaderSize) % FfsAlignment) != 0) {
//
// Only EFI_FFS_FILE_HEADER is needed for a pad section.
//
OrigOffset = CurrentOffset;
CurrentOffset = (CurrentOffset + FfsHeaderSize + sizeof(EFI_FFS_FILE_HEADER) + FfsAlignment - 1) & ~(FfsAlignment - 1);
CurrentOffset -= FfsHeaderSize;
if ((CurrentOffset - OrigOffset) > MaxPadFileSize) {
MaxPadFileSize = CurrentOffset - OrigOffset;
}
}
}
//
// Add ffs file size
//
if (FvInfoPtr->SizeofFvFiles[Index] > FfsFileSize) {
CurrentOffset += FvInfoPtr->SizeofFvFiles[Index];
} else {
CurrentOffset += FfsFileSize;
}
//
// Make next ffs file start at QWord Boundary
//
if (FvInfoPtr->IsPiFvImage) {
CurrentOffset = (CurrentOffset + EFI_FFS_FILE_HEADER_ALIGNMENT - 1) & ~(EFI_FFS_FILE_HEADER_ALIGNMENT - 1);
}
}
CurrentOffset += VtfFileSize;
DebugMsg (NULL, 0, 9, "FvImage size", "The calculated fv image size is 0x%x and the current set fv image size is 0x%x", (unsigned) CurrentOffset, (unsigned) FvInfoPtr->Size);
if (FvInfoPtr->Size == 0) {
//
// Update FvInfo data
//
FvInfoPtr->FvBlocks[0].NumBlocks = CurrentOffset / FvInfoPtr->FvBlocks[0].Length + ((CurrentOffset % FvInfoPtr->FvBlocks[0].Length)?1:0);
FvInfoPtr->Size = FvInfoPtr->FvBlocks[0].NumBlocks * FvInfoPtr->FvBlocks[0].Length;
FvInfoPtr->FvBlocks[1].NumBlocks = 0;
FvInfoPtr->FvBlocks[1].Length = 0;
} else if (FvInfoPtr->Size < CurrentOffset) {
//
// Not invalid
//
Error (NULL, 0, 3000, "Invalid", "the required fv image size 0x%x exceeds the set fv image size 0x%x", (unsigned) CurrentOffset, (unsigned) FvInfoPtr->Size);
return EFI_INVALID_PARAMETER;
}
//
// Set Fv Size Information
//
mFvTotalSize = FvInfoPtr->Size;
mFvTakenSize = CurrentOffset;
if ((mFvTakenSize == mFvTotalSize) && (MaxPadFileSize > 0)) {
//
// This FV means TOP FFS has been taken. Then, check whether there is padding data for use.
//
mFvTakenSize = mFvTakenSize - MaxPadFileSize;
}
return EFI_SUCCESS;
}
EFI_STATUS
FfsRebaseImageRead (
IN VOID *FileHandle,
IN UINTN FileOffset,
IN OUT UINT32 *ReadSize,
OUT VOID *Buffer
)
/*++
Routine Description:
Support routine for the PE/COFF Loader that reads a buffer from a PE/COFF file
Arguments:
FileHandle - The handle to the PE/COFF file
FileOffset - The offset, in bytes, into the file to read
ReadSize - The number of bytes to read from the file starting at FileOffset
Buffer - A pointer to the buffer to read the data into.
Returns:
EFI_SUCCESS - ReadSize bytes of data were read into Buffer from the PE/COFF file starting at FileOffset
--*/
{
CHAR8 *Destination8;
CHAR8 *Source8;
UINT32 Length;
Destination8 = Buffer;
Source8 = (CHAR8 *) ((UINTN) FileHandle + FileOffset);
Length = *ReadSize;
while (Length--) {
*(Destination8++) = *(Source8++);
}
return EFI_SUCCESS;
}
EFI_STATUS
GetChildFvFromFfs (
IN FV_INFO *FvInfo,
IN EFI_FFS_FILE_HEADER *FfsFile,
IN UINTN XipOffset
)
/*++
Routine Description:
This function gets all child FvImages in the input FfsFile, and records
their base address to the parent image.
Arguments:
FvInfo A pointer to FV_INFO structure.
FfsFile A pointer to Ffs file image that may contain FvImage.
XipOffset The offset address to the parent FvImage base.
Returns:
EFI_SUCCESS Base address of child Fv image is recorded.
--*/
{
EFI_STATUS Status;
UINTN Index;
EFI_FILE_SECTION_POINTER SubFvSection;
EFI_FIRMWARE_VOLUME_HEADER *SubFvImageHeader;
EFI_PHYSICAL_ADDRESS SubFvBaseAddress;
EFI_FILE_SECTION_POINTER CorePe32;
UINT16 MachineType;
for (Index = 1;; Index++) {
//
// Find FV section
//
Status = GetSectionByType (FfsFile, EFI_SECTION_FIRMWARE_VOLUME_IMAGE, Index, &SubFvSection);
if (EFI_ERROR (Status)) {
break;
}
SubFvImageHeader = (EFI_FIRMWARE_VOLUME_HEADER *) ((UINT8 *) SubFvSection.FVImageSection + GetSectionHeaderLength(SubFvSection.FVImageSection));
//
// See if there's an SEC core in the child FV
Status = FindCorePeSection(SubFvImageHeader, SubFvImageHeader->FvLength, EFI_FV_FILETYPE_SECURITY_CORE, &CorePe32);
// if we couldn't find the SEC core, look for a PEI core
if (EFI_ERROR(Status)) {
Status = FindCorePeSection(SubFvImageHeader, SubFvImageHeader->FvLength, EFI_FV_FILETYPE_PEI_CORE, &CorePe32);
}
if (!EFI_ERROR(Status)) {
Status = GetCoreMachineType(CorePe32, &MachineType);
if (EFI_ERROR(Status)) {
Error(NULL, 0, 3000, "Invalid", "Could not get the PE32 machine type for SEC/PEI Core.");
return EFI_ABORTED;
}
// machine type is ARM, set a flag so ARM reset vector processing occurs
if ((MachineType == EFI_IMAGE_MACHINE_ARMT) || (MachineType == EFI_IMAGE_MACHINE_AARCH64)) {
VerboseMsg("Located ARM/AArch64 SEC/PEI core in child FV");
mArm = TRUE;
}
}
//
// Rebase on Flash
//
SubFvBaseAddress = FvInfo->BaseAddress + (UINTN) SubFvImageHeader - (UINTN) FfsFile + XipOffset;
mFvBaseAddress[mFvBaseAddressNumber ++ ] = SubFvBaseAddress;
}
return EFI_SUCCESS;
}
EFI_STATUS
FfsRebase (
IN OUT FV_INFO *FvInfo,
IN CHAR8 *FileName,
IN OUT EFI_FFS_FILE_HEADER *FfsFile,
IN UINTN XipOffset,
IN FILE *FvMapFile
)
/*++
Routine Description:
This function determines if a file is XIP and should be rebased. It will
rebase any PE32 sections found in the file using the base address.
Arguments:
FvInfo A pointer to FV_INFO structure.
FileName Ffs File PathName
FfsFile A pointer to Ffs file image.
XipOffset The offset address to use for rebasing the XIP file image.
FvMapFile FvMapFile to record the function address in one Fvimage
Returns:
EFI_SUCCESS The image was properly rebased.
EFI_INVALID_PARAMETER An input parameter is invalid.
EFI_ABORTED An error occurred while rebasing the input file image.
EFI_OUT_OF_RESOURCES Could not allocate a required resource.
EFI_NOT_FOUND No compressed sections could be found.
--*/
{
EFI_STATUS Status;
PE_COFF_LOADER_IMAGE_CONTEXT ImageContext;
PE_COFF_LOADER_IMAGE_CONTEXT OrigImageContext;
EFI_PHYSICAL_ADDRESS XipBase;
EFI_PHYSICAL_ADDRESS NewPe32BaseAddress;
UINTN Index;
EFI_FILE_SECTION_POINTER CurrentPe32Section;
EFI_FFS_FILE_STATE SavedState;
EFI_IMAGE_OPTIONAL_HEADER_UNION *ImgHdr;
EFI_TE_IMAGE_HEADER *TEImageHeader;
UINT8 *MemoryImagePointer;
EFI_IMAGE_SECTION_HEADER *SectionHeader;
CHAR8 PeFileName [MAX_LONG_FILE_PATH];
CHAR8 *Cptr;
FILE *PeFile;
UINT8 *PeFileBuffer;
UINT32 PeFileSize;
CHAR8 *PdbPointer;
UINT32 FfsHeaderSize;
UINT32 CurSecHdrSize;
Index = 0;
MemoryImagePointer = NULL;
TEImageHeader = NULL;
ImgHdr = NULL;
SectionHeader = NULL;
Cptr = NULL;
PeFile = NULL;
PeFileBuffer = NULL;
//
// Don't need to relocate image when BaseAddress is zero and no ForceRebase Flag specified.
//
if ((FvInfo->BaseAddress == 0) && (FvInfo->ForceRebase == -1)) {
return EFI_SUCCESS;
}
//
// If ForceRebase Flag specified to FALSE, will always not take rebase action.
//
if (FvInfo->ForceRebase == 0) {
return EFI_SUCCESS;
}
XipBase = FvInfo->BaseAddress + XipOffset;
//
// We only process files potentially containing PE32 sections.
//
switch (FfsFile->Type) {
case EFI_FV_FILETYPE_SECURITY_CORE:
case EFI_FV_FILETYPE_PEI_CORE:
case EFI_FV_FILETYPE_PEIM:
case EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER:
case EFI_FV_FILETYPE_DRIVER:
case EFI_FV_FILETYPE_DXE_CORE:
break;
case EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE:
//
// Rebase the inside FvImage.
//
GetChildFvFromFfs (FvInfo, FfsFile, XipOffset);
//
// Search PE/TE section in FV sectin.
//
break;
default:
return EFI_SUCCESS;
}
FfsHeaderSize = GetFfsHeaderLength(FfsFile);
//
// Rebase each PE32 section
//
Status = EFI_SUCCESS;
for (Index = 1;; Index++) {
//
// Init Value
//
NewPe32BaseAddress = 0;
//
// Find Pe Image
//
Status = GetSectionByType (FfsFile, EFI_SECTION_PE32, Index, &CurrentPe32Section);
if (EFI_ERROR (Status)) {
break;
}
CurSecHdrSize = GetSectionHeaderLength(CurrentPe32Section.CommonHeader);
//
// Initialize context
//
memset (&ImageContext, 0, sizeof (ImageContext));
ImageContext.Handle = (VOID *) ((UINTN) CurrentPe32Section.Pe32Section + CurSecHdrSize);
ImageContext.ImageRead = (PE_COFF_LOADER_READ_FILE) FfsRebaseImageRead;
Status = PeCoffLoaderGetImageInfo (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid PeImage", "The input file is %s and the return status is %x", FileName, (int) Status);
return Status;
}
if ( (ImageContext.Machine == EFI_IMAGE_MACHINE_ARMT) ||
(ImageContext.Machine == EFI_IMAGE_MACHINE_AARCH64) ) {
mArm = TRUE;
}
if (ImageContext.Machine == EFI_IMAGE_MACHINE_RISCV64) {
mRiscV = TRUE;
}
//
// Keep Image Context for PE image in FV
//
memcpy (&OrigImageContext, &ImageContext, sizeof (ImageContext));
//
// Get File PdbPointer
//
PdbPointer = PeCoffLoaderGetPdbPointer (ImageContext.Handle);
//
// Get PeHeader pointer
//
ImgHdr = (EFI_IMAGE_OPTIONAL_HEADER_UNION *)((UINTN) CurrentPe32Section.Pe32Section + CurSecHdrSize + ImageContext.PeCoffHeaderOffset);
//
// Calculate the PE32 base address, based on file type
//
switch (FfsFile->Type) {
case EFI_FV_FILETYPE_SECURITY_CORE:
case EFI_FV_FILETYPE_PEI_CORE:
case EFI_FV_FILETYPE_PEIM:
case EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER:
//
// Check if section-alignment and file-alignment match or not
//
if ((ImgHdr->Pe32.OptionalHeader.SectionAlignment != ImgHdr->Pe32.OptionalHeader.FileAlignment)) {
//
// Xip module has the same section alignment and file alignment.
//
Error (NULL, 0, 3000, "Invalid", "PE image Section-Alignment and File-Alignment do not match : %s.", FileName);
return EFI_ABORTED;
}
//
// PeImage has no reloc section. It will try to get reloc data from the original EFI image.
//
if (ImageContext.RelocationsStripped) {
//
// Construct the original efi file Name
//
if (strlen (FileName) >= MAX_LONG_FILE_PATH) {
Error (NULL, 0, 2000, "Invalid", "The file name %s is too long.", FileName);
return EFI_ABORTED;
}
strncpy (PeFileName, FileName, MAX_LONG_FILE_PATH - 1);
PeFileName[MAX_LONG_FILE_PATH - 1] = 0;
Cptr = PeFileName + strlen (PeFileName);
while (*Cptr != '.') {
Cptr --;
}
if (*Cptr != '.') {
Error (NULL, 0, 3000, "Invalid", "The file %s has no .reloc section.", FileName);
return EFI_ABORTED;
} else {
*(Cptr + 1) = 'e';
*(Cptr + 2) = 'f';
*(Cptr + 3) = 'i';
*(Cptr + 4) = '\0';
}
PeFile = fopen (LongFilePath (PeFileName), "rb");
if (PeFile == NULL) {
Warning (NULL, 0, 0, "Invalid", "The file %s has no .reloc section.", FileName);
//Error (NULL, 0, 3000, "Invalid", "The file %s has no .reloc section.", FileName);
//return EFI_ABORTED;
break;
}
//
// Get the file size
//
PeFileSize = _filelength (fileno (PeFile));
PeFileBuffer = (UINT8 *) malloc (PeFileSize);
if (PeFileBuffer == NULL) {
fclose (PeFile);
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated on rebase of %s", FileName);
return EFI_OUT_OF_RESOURCES;
}
//
// Read Pe File
//
fread (PeFileBuffer, sizeof (UINT8), PeFileSize, PeFile);
//
// close file
//
fclose (PeFile);
//
// Handle pointer to the original efi image.
//
ImageContext.Handle = PeFileBuffer;
Status = PeCoffLoaderGetImageInfo (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid PeImage", "The input file is %s and the return status is %x", FileName, (int) Status);
return Status;
}
ImageContext.RelocationsStripped = FALSE;
}
NewPe32BaseAddress = XipBase + (UINTN) CurrentPe32Section.Pe32Section + CurSecHdrSize - (UINTN)FfsFile;
break;
case EFI_FV_FILETYPE_DRIVER:
case EFI_FV_FILETYPE_DXE_CORE:
//
// Check if section-alignment and file-alignment match or not
//
if ((ImgHdr->Pe32.OptionalHeader.SectionAlignment != ImgHdr->Pe32.OptionalHeader.FileAlignment)) {
//
// Xip module has the same section alignment and file alignment.
//
Error (NULL, 0, 3000, "Invalid", "PE image Section-Alignment and File-Alignment do not match : %s.", FileName);
return EFI_ABORTED;
}
NewPe32BaseAddress = XipBase + (UINTN) CurrentPe32Section.Pe32Section + CurSecHdrSize - (UINTN)FfsFile;
break;
default:
//
// Not supported file type
//
return EFI_SUCCESS;
}
//
// Relocation doesn't exist
//
if (ImageContext.RelocationsStripped) {
Warning (NULL, 0, 0, "Invalid", "The file %s has no .reloc section.", FileName);
continue;
}
//
// Relocation exist and rebase
//
//
// Load and Relocate Image Data
//
MemoryImagePointer = (UINT8 *) malloc ((UINTN) ImageContext.ImageSize + ImageContext.SectionAlignment);
if (MemoryImagePointer == NULL) {
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated on rebase of %s", FileName);
return EFI_OUT_OF_RESOURCES;
}
memset ((VOID *) MemoryImagePointer, 0, (UINTN) ImageContext.ImageSize + ImageContext.SectionAlignment);
ImageContext.ImageAddress = ((UINTN) MemoryImagePointer + ImageContext.SectionAlignment - 1) & (~((UINTN) ImageContext.SectionAlignment - 1));
Status = PeCoffLoaderLoadImage (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "LocateImage() call failed on rebase of %s", FileName);
free ((VOID *) MemoryImagePointer);
return Status;
}
ImageContext.DestinationAddress = NewPe32BaseAddress;
Status = PeCoffLoaderRelocateImage (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "RelocateImage() call failed on rebase of %s Status=%d", FileName, Status);
free ((VOID *) MemoryImagePointer);
return Status;
}
//
// Copy Relocated data to raw image file.
//
SectionHeader = (EFI_IMAGE_SECTION_HEADER *) (
(UINTN) ImgHdr +
sizeof (UINT32) +
sizeof (EFI_IMAGE_FILE_HEADER) +
ImgHdr->Pe32.FileHeader.SizeOfOptionalHeader
);
for (Index = 0; Index < ImgHdr->Pe32.FileHeader.NumberOfSections; Index ++, SectionHeader ++) {
CopyMem (
(UINT8 *) CurrentPe32Section.Pe32Section + CurSecHdrSize + SectionHeader->PointerToRawData,
(VOID*) (UINTN) (ImageContext.ImageAddress + SectionHeader->VirtualAddress),
SectionHeader->SizeOfRawData
);
}
free ((VOID *) MemoryImagePointer);
MemoryImagePointer = NULL;
if (PeFileBuffer != NULL) {
free (PeFileBuffer);
PeFileBuffer = NULL;
}
//
// Update Image Base Address
//
if (ImgHdr->Pe32.OptionalHeader.Magic == EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC) {
ImgHdr->Pe32.OptionalHeader.ImageBase = (UINT32) NewPe32BaseAddress;
} else if (ImgHdr->Pe32Plus.OptionalHeader.Magic == EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC) {
ImgHdr->Pe32Plus.OptionalHeader.ImageBase = NewPe32BaseAddress;
} else {
Error (NULL, 0, 3000, "Invalid", "unknown PE magic signature %X in PE32 image %s",
ImgHdr->Pe32.OptionalHeader.Magic,
FileName
);
return EFI_ABORTED;
}
//
// Now update file checksum
//
if (FfsFile->Attributes & FFS_ATTRIB_CHECKSUM) {
SavedState = FfsFile->State;
FfsFile->IntegrityCheck.Checksum.File = 0;
FfsFile->State = 0;
FfsFile->IntegrityCheck.Checksum.File = CalculateChecksum8 (
(UINT8 *) ((UINT8 *)FfsFile + FfsHeaderSize),
GetFfsFileLength (FfsFile) - FfsHeaderSize
);
FfsFile->State = SavedState;
}
//
// Get this module function address from ModulePeMapFile and add them into FvMap file
//
//
// Default use FileName as map file path
//
if (PdbPointer == NULL) {
PdbPointer = FileName;
}
WriteMapFile (FvMapFile, PdbPointer, FfsFile, NewPe32BaseAddress, &OrigImageContext);
}
if (FfsFile->Type != EFI_FV_FILETYPE_SECURITY_CORE &&
FfsFile->Type != EFI_FV_FILETYPE_PEI_CORE &&
FfsFile->Type != EFI_FV_FILETYPE_PEIM &&
FfsFile->Type != EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER &&
FfsFile->Type != EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE
) {
//
// Only Peim code may have a TE section
//
return EFI_SUCCESS;
}
//
// Now process TE sections
//
for (Index = 1;; Index++) {
NewPe32BaseAddress = 0;
//
// Find Te Image
//
Status = GetSectionByType (FfsFile, EFI_SECTION_TE, Index, &CurrentPe32Section);
if (EFI_ERROR (Status)) {
break;
}
CurSecHdrSize = GetSectionHeaderLength(CurrentPe32Section.CommonHeader);
//
// Calculate the TE base address, the FFS file base plus the offset of the TE section less the size stripped off
// by GenTEImage
//
TEImageHeader = (EFI_TE_IMAGE_HEADER *) ((UINT8 *) CurrentPe32Section.Pe32Section + CurSecHdrSize);
//
// Initialize context, load image info.
//
memset (&ImageContext, 0, sizeof (ImageContext));
ImageContext.Handle = (VOID *) TEImageHeader;
ImageContext.ImageRead = (PE_COFF_LOADER_READ_FILE) FfsRebaseImageRead;
Status = PeCoffLoaderGetImageInfo (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid TeImage", "The input file is %s and the return status is %x", FileName, (int) Status);
return Status;
}
if ( (ImageContext.Machine == EFI_IMAGE_MACHINE_ARMT) ||
(ImageContext.Machine == EFI_IMAGE_MACHINE_AARCH64) ) {
mArm = TRUE;
}
//
// Keep Image Context for TE image in FV
//
memcpy (&OrigImageContext, &ImageContext, sizeof (ImageContext));
//
// Get File PdbPointer
//
PdbPointer = PeCoffLoaderGetPdbPointer (ImageContext.Handle);
//
// Set new rebased address.
//
NewPe32BaseAddress = XipBase + (UINTN) TEImageHeader + sizeof (EFI_TE_IMAGE_HEADER) \
- TEImageHeader->StrippedSize - (UINTN) FfsFile;
//
// if reloc is stripped, try to get the original efi image to get reloc info.
//
if (ImageContext.RelocationsStripped) {
//
// Construct the original efi file name
//
if (strlen (FileName) >= MAX_LONG_FILE_PATH) {
Error (NULL, 0, 2000, "Invalid", "The file name %s is too long.", FileName);
return EFI_ABORTED;
}
strncpy (PeFileName, FileName, MAX_LONG_FILE_PATH - 1);
PeFileName[MAX_LONG_FILE_PATH - 1] = 0;
Cptr = PeFileName + strlen (PeFileName);
while (*Cptr != '.') {
Cptr --;
}
if (*Cptr != '.') {
Error (NULL, 0, 3000, "Invalid", "The file %s has no .reloc section.", FileName);
return EFI_ABORTED;
} else {
*(Cptr + 1) = 'e';
*(Cptr + 2) = 'f';
*(Cptr + 3) = 'i';
*(Cptr + 4) = '\0';
}
PeFile = fopen (LongFilePath (PeFileName), "rb");
if (PeFile == NULL) {
Warning (NULL, 0, 0, "Invalid", "The file %s has no .reloc section.", FileName);
//Error (NULL, 0, 3000, "Invalid", "The file %s has no .reloc section.", FileName);
//return EFI_ABORTED;
} else {
//
// Get the file size
//
PeFileSize = _filelength (fileno (PeFile));
PeFileBuffer = (UINT8 *) malloc (PeFileSize);
if (PeFileBuffer == NULL) {
fclose (PeFile);
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated on rebase of %s", FileName);
return EFI_OUT_OF_RESOURCES;
}
//
// Read Pe File
//
fread (PeFileBuffer, sizeof (UINT8), PeFileSize, PeFile);
//
// close file
//
fclose (PeFile);
//
// Append reloc section into TeImage
//
ImageContext.Handle = PeFileBuffer;
Status = PeCoffLoaderGetImageInfo (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid TeImage", "The input file is %s and the return status is %x", FileName, (int) Status);
return Status;
}
ImageContext.RelocationsStripped = FALSE;
}
}
//
// Relocation doesn't exist
//
if (ImageContext.RelocationsStripped) {
Warning (NULL, 0, 0, "Invalid", "The file %s has no .reloc section.", FileName);
continue;
}
//
// Relocation exist and rebase
//
//
// Load and Relocate Image Data
//
MemoryImagePointer = (UINT8 *) malloc ((UINTN) ImageContext.ImageSize + ImageContext.SectionAlignment);
if (MemoryImagePointer == NULL) {
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated on rebase of %s", FileName);
return EFI_OUT_OF_RESOURCES;
}
memset ((VOID *) MemoryImagePointer, 0, (UINTN) ImageContext.ImageSize + ImageContext.SectionAlignment);
ImageContext.ImageAddress = ((UINTN) MemoryImagePointer + ImageContext.SectionAlignment - 1) & (~((UINTN) ImageContext.SectionAlignment - 1));
Status = PeCoffLoaderLoadImage (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "LocateImage() call failed on rebase of %s", FileName);
free ((VOID *) MemoryImagePointer);
return Status;
}
//
// Reloacate TeImage
//
ImageContext.DestinationAddress = NewPe32BaseAddress;
Status = PeCoffLoaderRelocateImage (&ImageContext);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 3000, "Invalid", "RelocateImage() call failed on rebase of TE image %s", FileName);
free ((VOID *) MemoryImagePointer);
return Status;
}
//
// Copy the relocated image into raw image file.
//
SectionHeader = (EFI_IMAGE_SECTION_HEADER *) (TEImageHeader + 1);
for (Index = 0; Index < TEImageHeader->NumberOfSections; Index ++, SectionHeader ++) {
if (!ImageContext.IsTeImage) {
CopyMem (
(UINT8 *) TEImageHeader + sizeof (EFI_TE_IMAGE_HEADER) - TEImageHeader->StrippedSize + SectionHeader->PointerToRawData,
(VOID*) (UINTN) (ImageContext.ImageAddress + SectionHeader->VirtualAddress),
SectionHeader->SizeOfRawData
);
} else {
CopyMem (
(UINT8 *) TEImageHeader + sizeof (EFI_TE_IMAGE_HEADER) - TEImageHeader->StrippedSize + SectionHeader->PointerToRawData,
(VOID*) (UINTN) (ImageContext.ImageAddress + sizeof (EFI_TE_IMAGE_HEADER) - TEImageHeader->StrippedSize + SectionHeader->VirtualAddress),
SectionHeader->SizeOfRawData
);
}
}
//
// Free the allocated memory resource
//
free ((VOID *) MemoryImagePointer);
MemoryImagePointer = NULL;
if (PeFileBuffer != NULL) {
free (PeFileBuffer);
PeFileBuffer = NULL;
}
//
// Update Image Base Address
//
TEImageHeader->ImageBase = NewPe32BaseAddress;
//
// Now update file checksum
//
if (FfsFile->Attributes & FFS_ATTRIB_CHECKSUM) {
SavedState = FfsFile->State;
FfsFile->IntegrityCheck.Checksum.File = 0;
FfsFile->State = 0;
FfsFile->IntegrityCheck.Checksum.File = CalculateChecksum8 (
(UINT8 *)((UINT8 *)FfsFile + FfsHeaderSize),
GetFfsFileLength (FfsFile) - FfsHeaderSize
);
FfsFile->State = SavedState;
}
//
// Get this module function address from ModulePeMapFile and add them into FvMap file
//
//
// Default use FileName as map file path
//
if (PdbPointer == NULL) {
PdbPointer = FileName;
}
WriteMapFile (
FvMapFile,
PdbPointer,
FfsFile,
NewPe32BaseAddress,
&OrigImageContext
);
}
return EFI_SUCCESS;
}
EFI_STATUS
FindApResetVectorPosition (
IN MEMORY_FILE *FvImage,
OUT UINT8 **Pointer
)
/*++
Routine Description:
Find the position in this FvImage to place Ap reset vector.
Arguments:
FvImage Memory file for the FV memory image.
Pointer Pointer to pointer to position.
Returns:
EFI_NOT_FOUND - No satisfied position is found.
EFI_SUCCESS - The suitable position is return.
--*/
{
EFI_FFS_FILE_HEADER *PadFile;
UINT32 Index;
EFI_STATUS Status;
UINT8 *FixPoint;
UINT32 FileLength;
for (Index = 1; ;Index ++) {
//
// Find Pad File to add ApResetVector info
//
Status = GetFileByType (EFI_FV_FILETYPE_FFS_PAD, Index, &PadFile);
if (EFI_ERROR (Status) || (PadFile == NULL)) {
//
// No Pad file to be found.
//
break;
}
//
// Get Pad file size.
//
FileLength = GetFfsFileLength(PadFile);
FileLength = (FileLength + EFI_FFS_FILE_HEADER_ALIGNMENT - 1) & ~(EFI_FFS_FILE_HEADER_ALIGNMENT - 1);
//
// FixPoint must be align on 0x1000 relative to FvImage Header
//
FixPoint = (UINT8*) PadFile + GetFfsHeaderLength(PadFile);
FixPoint = FixPoint + 0x1000 - (((UINTN) FixPoint - (UINTN) FvImage->FileImage) & 0xFFF);
//
// FixPoint be larger at the last place of one fv image.
//
while (((UINTN) FixPoint + SIZEOF_STARTUP_DATA_ARRAY - (UINTN) PadFile) <= FileLength) {
FixPoint += 0x1000;
}
FixPoint -= 0x1000;
if ((UINTN) FixPoint < ((UINTN) PadFile + GetFfsHeaderLength(PadFile))) {
//
// No alignment FixPoint in this Pad File.
//
continue;
}
if ((UINTN) FvImage->Eof - (UINTN)FixPoint <= 0x20000) {
//
// Find the position to place ApResetVector
//
*Pointer = FixPoint;
return EFI_SUCCESS;
}
}
return EFI_NOT_FOUND;
}
EFI_STATUS
ParseCapInf (
IN MEMORY_FILE *InfFile,
OUT CAP_INFO *CapInfo
)
/*++
Routine Description:
This function parses a Cap.INF file and copies info into a CAP_INFO structure.
Arguments:
InfFile Memory file image.
CapInfo Information read from INF file.
Returns:
EFI_SUCCESS INF file information successfully retrieved.
EFI_ABORTED INF file has an invalid format.
EFI_NOT_FOUND A required string was not found in the INF file.
--*/
{
CHAR8 Value[MAX_LONG_FILE_PATH];
UINT64 Value64;
UINTN Index, Number;
EFI_STATUS Status;
//
// Initialize Cap info
//
// memset (CapInfo, 0, sizeof (CAP_INFO));
//
//
// Read the Capsule Guid
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_CAPSULE_GUID_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
//
// Get the Capsule Guid
//
Status = StringToGuid (Value, &CapInfo->CapGuid);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s = %s", EFI_CAPSULE_GUID_STRING, Value);
return EFI_ABORTED;
}
DebugMsg (NULL, 0, 9, "Capsule Guid", "%s = %s", EFI_CAPSULE_GUID_STRING, Value);
}
//
// Read the Capsule Header Size
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_CAPSULE_HEADER_SIZE_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
Status = AsciiStringToUint64 (Value, FALSE, &Value64);
if (EFI_ERROR (Status)) {
Error (NULL, 0, 2000, "Invalid parameter", "%s = %s", EFI_CAPSULE_HEADER_SIZE_STRING, Value);
return EFI_ABORTED;
}
CapInfo->HeaderSize = (UINT32) Value64;
DebugMsg (NULL, 0, 9, "Capsule Header size", "%s = %s", EFI_CAPSULE_HEADER_SIZE_STRING, Value);
}
//
// Read the Capsule Flag
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_CAPSULE_FLAGS_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
if (strstr (Value, "PopulateSystemTable") != NULL) {
CapInfo->Flags |= CAPSULE_FLAGS_PERSIST_ACROSS_RESET | CAPSULE_FLAGS_POPULATE_SYSTEM_TABLE;
if (strstr (Value, "InitiateReset") != NULL) {
CapInfo->Flags |= CAPSULE_FLAGS_INITIATE_RESET;
}
} else if (strstr (Value, "PersistAcrossReset") != NULL) {
CapInfo->Flags |= CAPSULE_FLAGS_PERSIST_ACROSS_RESET;
if (strstr (Value, "InitiateReset") != NULL) {
CapInfo->Flags |= CAPSULE_FLAGS_INITIATE_RESET;
}
} else {
Error (NULL, 0, 2000, "Invalid parameter", "invalid Flag setting for %s.", EFI_CAPSULE_FLAGS_STRING);
return EFI_ABORTED;
}
DebugMsg (NULL, 0, 9, "Capsule Flag", Value);
}
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_OEM_CAPSULE_FLAGS_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
Status = AsciiStringToUint64 (Value, FALSE, &Value64);
if (EFI_ERROR (Status) || Value64 > 0xffff) {
Error (NULL, 0, 2000, "Invalid parameter",
"invalid Flag setting for %s. Must be integer value between 0x0000 and 0xffff.",
EFI_OEM_CAPSULE_FLAGS_STRING);
return EFI_ABORTED;
}
CapInfo->Flags |= Value64;
DebugMsg (NULL, 0, 9, "Capsule Extend Flag", Value);
}
//
// Read Capsule File name
//
Status = FindToken (InfFile, OPTIONS_SECTION_STRING, EFI_FILE_NAME_STRING, 0, Value);
if (Status == EFI_SUCCESS) {
//
// Get output file name
//
strcpy (CapInfo->CapName, Value);
}
//
// Read the Capsule FileImage
//
Number = 0;
for (Index = 0; Index < MAX_NUMBER_OF_FILES_IN_CAP; Index++) {
if (CapInfo->CapFiles[Index][0] != '\0') {
continue;
}
//
// Read the capsule file name
//
Status = FindToken (InfFile, FILES_SECTION_STRING, EFI_FILE_NAME_STRING, Number++, Value);
if (Status == EFI_SUCCESS) {
//
// Add the file
//
strcpy (CapInfo->CapFiles[Index], Value);
DebugMsg (NULL, 0, 9, "Capsule component file", "the %uth file name is %s", (unsigned) Index, CapInfo->CapFiles[Index]);
} else {
break;
}
}
if (Index == 0) {
Warning (NULL, 0, 0, "Capsule components are not specified.", NULL);
}
return EFI_SUCCESS;
}
EFI_STATUS
GenerateCapImage (
IN CHAR8 *InfFileImage,
IN UINTN InfFileSize,
IN CHAR8 *CapFileName
)
/*++
Routine Description:
This is the main function which will be called from application to create UEFI Capsule image.
Arguments:
InfFileImage Buffer containing the INF file contents.
InfFileSize Size of the contents of the InfFileImage buffer.
CapFileName Requested name for the Cap file.
Returns:
EFI_SUCCESS Function completed successfully.
EFI_OUT_OF_RESOURCES Could not allocate required resources.
EFI_ABORTED Error encountered.
EFI_INVALID_PARAMETER A required parameter was NULL.
--*/
{
UINT32 CapSize;
UINT8 *CapBuffer;
EFI_CAPSULE_HEADER *CapsuleHeader;
MEMORY_FILE InfMemoryFile;
UINT32 FileSize;
UINT32 Index;
FILE *fpin, *fpout;
EFI_STATUS Status;
if (InfFileImage != NULL) {
//
// Initialize file structures
//
InfMemoryFile.FileImage = InfFileImage;
InfMemoryFile.CurrentFilePointer = InfFileImage;
InfMemoryFile.Eof = InfFileImage + InfFileSize;
//
// Parse the Cap inf file for header information
//
Status = ParseCapInf (&InfMemoryFile, &mCapDataInfo);
if (Status != EFI_SUCCESS) {
return Status;
}
}
if (mCapDataInfo.HeaderSize == 0) {
//
// make header size align 16 bytes.
//
mCapDataInfo.HeaderSize = sizeof (EFI_CAPSULE_HEADER);
mCapDataInfo.HeaderSize = (mCapDataInfo.HeaderSize + 0xF) & ~0xF;
}
if (mCapDataInfo.HeaderSize < sizeof (EFI_CAPSULE_HEADER)) {
Error (NULL, 0, 2000, "Invalid parameter", "The specified HeaderSize cannot be less than the size of EFI_CAPSULE_HEADER.");
return EFI_INVALID_PARAMETER;
}
if (CapFileName == NULL && mCapDataInfo.CapName[0] != '\0') {
CapFileName = mCapDataInfo.CapName;
}
if (CapFileName == NULL) {
Error (NULL, 0, 2001, "Missing required argument", "Output Capsule file name");
return EFI_INVALID_PARAMETER;
}
//
// Set Default Capsule Guid value
//
if (CompareGuid (&mCapDataInfo.CapGuid, &mZeroGuid) == 0) {
memcpy (&mCapDataInfo.CapGuid, &mDefaultCapsuleGuid, sizeof (EFI_GUID));
}
//
// Calculate the size of capsule image.
//
Index = 0;
FileSize = 0;
CapSize = mCapDataInfo.HeaderSize;
while (mCapDataInfo.CapFiles [Index][0] != '\0') {
fpin = fopen (LongFilePath (mCapDataInfo.CapFiles[Index]), "rb");
if (fpin == NULL) {
Error (NULL, 0, 0001, "Error opening file", mCapDataInfo.CapFiles[Index]);
return EFI_ABORTED;
}
FileSize = _filelength (fileno (fpin));
CapSize += FileSize;
fclose (fpin);
Index ++;
}
//
// Allocate buffer for capsule image.
//
CapBuffer = (UINT8 *) malloc (CapSize);
if (CapBuffer == NULL) {
Error (NULL, 0, 4001, "Resource", "memory cannot be allocated for creating the capsule.");
return EFI_OUT_OF_RESOURCES;
}
//
// Initialize the capsule header to zero
//
memset (CapBuffer, 0, mCapDataInfo.HeaderSize);
//
// create capsule header and get capsule body
//
CapsuleHeader = (EFI_CAPSULE_HEADER *) CapBuffer;
memcpy (&CapsuleHeader->CapsuleGuid, &mCapDataInfo.CapGuid, sizeof (EFI_GUID));
CapsuleHeader->HeaderSize = mCapDataInfo.HeaderSize;
CapsuleHeader->Flags = mCapDataInfo.Flags;
CapsuleHeader->CapsuleImageSize = CapSize;
Index = 0;
FileSize = 0;
CapSize = CapsuleHeader->HeaderSize;
while (mCapDataInfo.CapFiles [Index][0] != '\0') {
fpin = fopen (LongFilePath (mCapDataInfo.CapFiles[Index]), "rb");
if (fpin == NULL) {
Error (NULL, 0, 0001, "Error opening file", mCapDataInfo.CapFiles[Index]);
free (CapBuffer);
return EFI_ABORTED;
}
FileSize = _filelength (fileno (fpin));
fread (CapBuffer + CapSize, 1, FileSize, fpin);
fclose (fpin);
Index ++;
CapSize += FileSize;
}
//
// write capsule data into the output file
//
fpout = fopen (LongFilePath (CapFileName), "wb");
if (fpout == NULL) {
Error (NULL, 0, 0001, "Error opening file", CapFileName);
free (CapBuffer);
return EFI_ABORTED;
}
fwrite (CapBuffer, 1, CapSize, fpout);
fclose (fpout);
free (CapBuffer);
VerboseMsg ("The size of the generated capsule image is %u bytes", (unsigned) CapSize);
return EFI_SUCCESS;
}
|
190603.c | /*
* $Id: stdio_init_exit.c,v 1.33 2006-01-08 12:04:24 clib2devs Exp $
*/
#ifndef _STDIO_HEADERS_H
#include "stdio_headers.h"
#endif /* _STDIO_HEADERS_H */
#ifndef _UNISTD_HEADERS_H
#include "unistd_headers.h"
#endif /* _UNISTD_HEADERS_H */
/****************************************************************************/
#ifndef _STDLIB_MEMORY_H
#include "stdlib_memory.h"
#endif /* _STDLIB_MEMORY_H */
/****************************************************************************/
#ifndef _STDLIB_CONSTRUCTOR_H
#include "stdlib_constructor.h"
#endif /* _STDLIB_CONSTRUCTOR_H */
/****************************************************************************/
/* The file handle table. */
struct iob **NOCOMMON __iob;
int NOCOMMON __num_iob;
/****************************************************************************/
/* The file descriptor table. */
struct fd **NOCOMMON __fd;
int NOCOMMON __num_fd;
/****************************************************************************/
void __close_all_files(void)
{
int i;
ENTER();
__stdio_lock();
if (__num_iob > 0)
{
for (i = 0; i < __num_iob; i++)
{
if (FLAG_IS_SET(__iob[i]->iob_Flags, IOBF_IN_USE))
fclose((FILE *)__iob[i]);
}
__num_iob = 0;
}
if (__num_fd > 0)
{
for (i = 0; i < __num_fd; i++)
{
if (FLAG_IS_SET(__fd[i]->fd_Flags, FDF_IN_USE))
close(i);
}
__num_fd = 0;
}
__stdio_unlock();
LEAVE();
}
/****************************************************************************/
STDIO_DESTRUCTOR(stdio_exit)
{
ENTER();
__close_all_files();
LEAVE();
}
/****************************************************************************/
STDIO_CONSTRUCTOR(stdio_init)
{
const int num_standard_files = (STDERR_FILENO - STDIN_FILENO + 1);
BOOL success = FALSE;
ENTER();
if (__stdio_lock_init() < 0)
goto out;
if (__grow_iob_table(num_standard_files) < 0)
goto out;
if (__grow_fd_table(num_standard_files) < 0)
goto out;
success = TRUE;
out:
SHOWVALUE(success);
LEAVE();
if (success)
CONSTRUCTOR_SUCCEED();
else
CONSTRUCTOR_FAIL();
}
|
118907.c | // Copyright 2016 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <lib/fdio/spawn.h>
#include <stdio.h>
#include <zircon/status.h>
#include <zircon/syscalls.h>
#include <zircon/syscalls/object.h>
#include <zircon/types.h>
int main(int argc, char** argv) {
if (argc < 2) {
fprintf(stderr, "usage: %s /path/to/binary [args...]\n", argv[0]);
return 1;
}
zx_time_t start = zx_clock_get_monotonic();
zx_handle_t proc = ZX_HANDLE_INVALID;
zx_status_t status = fdio_spawn(ZX_HANDLE_INVALID, FDIO_SPAWN_CLONE_ALL, argv[1],
(const char* const*)argv + 1, &proc);
if (status != ZX_OK) {
fprintf(stderr, "error: Failed to spawn '%s': %d (%s)\n", argv[1], status,
zx_status_get_string(status));
return 1;
}
status = zx_object_wait_one(proc, ZX_PROCESS_TERMINATED, ZX_TIME_INFINITE, NULL);
zx_time_t stop = zx_clock_get_monotonic();
if (status != ZX_OK) {
fprintf(stderr, "error: Failed to wait for process termination: %d (%s)\n", status,
zx_status_get_string(status));
return 1;
}
zx_info_process_v2_t proc_info;
status = zx_object_get_info(proc, ZX_INFO_PROCESS_V2, &proc_info, sizeof(proc_info), NULL, NULL);
zx_handle_close(proc);
if (status != ZX_OK) {
fprintf(stderr, "error: Failed to get return code: %d (%s)\n", status,
zx_status_get_string(status));
return 1;
}
if (proc_info.return_code != 0) {
fprintf(stderr, "error: %s exited with nonzero return code: %d\n", argv[1],
(int)proc_info.return_code);
}
zx_duration_t delta = stop - start;
uint64_t secs = delta / ZX_SEC(1);
uint64_t usecs = (delta - secs * ZX_SEC(1)) / ZX_USEC(1);
printf("real\t%ld.%06lds\n", secs, usecs);
return (int)proc_info.return_code;
}
|
865399.c | /*
* srtp.c
*
* the secure real-time transport protocol
*
* David A. McGrew
* Cisco Systems, Inc.
*/
/*
*
* Copyright (c) 2001-2017, Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Cisco Systems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Leave this as the top level import. Ensures the existence of defines
#include "config.h"
#include "srtp_priv.h"
#include "crypto_types.h"
#include "err.h"
#include "ekt.h" /* for SRTP Encrypted Key Transport */
#include "alloc.h" /* for srtp_crypto_alloc() */
#ifdef GCM
#include "aes_gcm.h" /* for AES GCM mode */
#endif
#ifdef OPENSSL_KDF
#include <openssl/kdf.h>
#include "aes_icm_ext.h"
#endif
#include <limits.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#elif defined(HAVE_WINSOCK2_H)
#include <winsock2.h>
#endif
/* the debug module for srtp */
srtp_debug_module_t mod_srtp = {
0, /* debugging is off by default */
"srtp" /* printable name for module */
};
#define octets_in_rtp_header 12
#define uint32s_in_rtp_header 3
#define octets_in_rtcp_header 8
#define uint32s_in_rtcp_header 2
#define octets_in_rtp_extn_hdr 4
static srtp_err_status_t srtp_validate_rtp_header(void *rtp_hdr,
int *pkt_octet_len)
{
srtp_hdr_t *hdr = (srtp_hdr_t *)rtp_hdr;
int rtp_header_len;
if (*pkt_octet_len < octets_in_rtp_header)
return srtp_err_status_bad_param;
/* Check RTP header length */
rtp_header_len = octets_in_rtp_header + 4 * hdr->cc;
if (hdr->x == 1)
rtp_header_len += octets_in_rtp_extn_hdr;
if (*pkt_octet_len < rtp_header_len)
return srtp_err_status_bad_param;
/* Verifing profile length. */
if (hdr->x == 1) {
srtp_hdr_xtnd_t *xtn_hdr =
(srtp_hdr_xtnd_t *)((uint32_t *)hdr + uint32s_in_rtp_header +
hdr->cc);
int profile_len = ntohs(xtn_hdr->length);
rtp_header_len += profile_len * 4;
/* profile length counts the number of 32-bit words */
if (*pkt_octet_len < rtp_header_len)
return srtp_err_status_bad_param;
}
return srtp_err_status_ok;
}
const char *srtp_get_version_string()
{
/*
* Simply return the autotools generated string
*/
return SRTP_VER_STRING;
}
unsigned int srtp_get_version()
{
unsigned int major = 0, minor = 0, micro = 0;
unsigned int rv = 0;
int parse_rv;
/*
* Parse the autotools generated version
*/
parse_rv = sscanf(SRTP_VERSION, "%u.%u.%u", &major, &minor, µ);
if (parse_rv != 3) {
/*
* We're expected to parse all 3 version levels.
* If not, then this must not be an official release.
* Return all zeros on the version
*/
return (0);
}
/*
* We allow 8 bits for the major and minor, while
* allowing 16 bits for the micro. 16 bits for the micro
* may be beneficial for a continuous delivery model
* in the future.
*/
rv |= (major & 0xFF) << 24;
rv |= (minor & 0xFF) << 16;
rv |= micro & 0xFF;
return rv;
}
srtp_err_status_t srtp_stream_dealloc(srtp_stream_ctx_t *stream,
const srtp_stream_ctx_t *stream_template)
{
srtp_err_status_t status;
unsigned int i = 0;
srtp_session_keys_t *session_keys = NULL;
srtp_session_keys_t *template_session_keys = NULL;
/*
* we use a conservative deallocation strategy - if any deallocation
* fails, then we report that fact without trying to deallocate
* anything else
*/
if (stream->session_keys) {
for (i = 0; i < stream->num_master_keys; i++) {
session_keys = &stream->session_keys[i];
if (stream_template &&
stream->num_master_keys == stream_template->num_master_keys) {
template_session_keys = &stream_template->session_keys[i];
} else {
template_session_keys = NULL;
}
/*
* deallocate cipher, if it is not the same as that in template
*/
if (template_session_keys &&
session_keys->rtp_cipher == template_session_keys->rtp_cipher) {
/* do nothing */
} else if (session_keys->rtp_cipher) {
status = srtp_cipher_dealloc(session_keys->rtp_cipher);
if (status)
return status;
}
/*
* deallocate auth function, if it is not the same as that in
* template
*/
if (template_session_keys &&
session_keys->rtp_auth == template_session_keys->rtp_auth) {
/* do nothing */
} else if (session_keys->rtp_auth) {
status = srtp_auth_dealloc(session_keys->rtp_auth);
if (status)
return status;
}
if (template_session_keys &&
session_keys->rtp_xtn_hdr_cipher ==
template_session_keys->rtp_xtn_hdr_cipher) {
/* do nothing */
} else if (session_keys->rtp_xtn_hdr_cipher) {
status = srtp_cipher_dealloc(session_keys->rtp_xtn_hdr_cipher);
if (status)
return status;
}
/*
* deallocate rtcp cipher, if it is not the same as that in
* template
*/
if (template_session_keys &&
session_keys->rtcp_cipher ==
template_session_keys->rtcp_cipher) {
/* do nothing */
} else if (session_keys->rtcp_cipher) {
status = srtp_cipher_dealloc(session_keys->rtcp_cipher);
if (status)
return status;
}
/*
* deallocate rtcp auth function, if it is not the same as that in
* template
*/
if (template_session_keys &&
session_keys->rtcp_auth == template_session_keys->rtcp_auth) {
/* do nothing */
} else if (session_keys->rtcp_auth) {
status = srtp_auth_dealloc(session_keys->rtcp_auth);
if (status)
return status;
}
/*
* zeroize the salt value
*/
octet_string_set_to_zero(session_keys->salt, SRTP_AEAD_SALT_LEN);
octet_string_set_to_zero(session_keys->c_salt, SRTP_AEAD_SALT_LEN);
if (session_keys->mki_id) {
octet_string_set_to_zero(session_keys->mki_id,
session_keys->mki_size);
srtp_crypto_free(session_keys->mki_id);
session_keys->mki_id = NULL;
}
/*
* deallocate key usage limit, if it is not the same as that in
* template
*/
if (template_session_keys &&
session_keys->limit == template_session_keys->limit) {
/* do nothing */
} else if (session_keys->limit) {
srtp_crypto_free(session_keys->limit);
}
}
srtp_crypto_free(stream->session_keys);
}
status = srtp_rdbx_dealloc(&stream->rtp_rdbx);
if (status)
return status;
/* DAM - need to deallocate EKT here */
if (stream_template &&
stream->enc_xtn_hdr == stream_template->enc_xtn_hdr) {
/* do nothing */
} else if (stream->enc_xtn_hdr) {
srtp_crypto_free(stream->enc_xtn_hdr);
}
/* deallocate srtp stream context */
srtp_crypto_free(stream);
return srtp_err_status_ok;
}
srtp_err_status_t srtp_stream_alloc(srtp_stream_ctx_t **str_ptr,
const srtp_policy_t *p)
{
srtp_stream_ctx_t *str;
srtp_err_status_t stat;
unsigned int i = 0;
srtp_session_keys_t *session_keys = NULL;
/*
* This function allocates the stream context, rtp and rtcp ciphers
* and auth functions, and key limit structure. If there is a
* failure during allocation, we free all previously allocated
* memory and return a failure code. The code could probably
* be improved, but it works and should be clear.
*/
/* allocate srtp stream and set str_ptr */
str = (srtp_stream_ctx_t *)srtp_crypto_alloc(sizeof(srtp_stream_ctx_t));
if (str == NULL)
return srtp_err_status_alloc_fail;
*str_ptr = str;
/*
*To keep backwards API compatible if someone is using multiple master
* keys then key should be set to NULL
*/
if (p->key != NULL) {
str->num_master_keys = 1;
} else {
str->num_master_keys = p->num_master_keys;
}
str->session_keys = (srtp_session_keys_t *)srtp_crypto_alloc(
sizeof(srtp_session_keys_t) * str->num_master_keys);
if (str->session_keys == NULL) {
srtp_stream_dealloc(str, NULL);
return srtp_err_status_alloc_fail;
}
for (i = 0; i < str->num_master_keys; i++) {
session_keys = &str->session_keys[i];
/* allocate cipher */
stat = srtp_crypto_kernel_alloc_cipher(
p->rtp.cipher_type, &session_keys->rtp_cipher,
p->rtp.cipher_key_len, p->rtp.auth_tag_len);
if (stat) {
srtp_stream_dealloc(str, NULL);
return stat;
}
/* allocate auth function */
stat = srtp_crypto_kernel_alloc_auth(
p->rtp.auth_type, &session_keys->rtp_auth, p->rtp.auth_key_len,
p->rtp.auth_tag_len);
if (stat) {
srtp_stream_dealloc(str, NULL);
return stat;
}
/*
* ...and now the RTCP-specific initialization - first, allocate
* the cipher
*/
stat = srtp_crypto_kernel_alloc_cipher(
p->rtcp.cipher_type, &session_keys->rtcp_cipher,
p->rtcp.cipher_key_len, p->rtcp.auth_tag_len);
if (stat) {
srtp_stream_dealloc(str, NULL);
return stat;
}
/* allocate auth function */
stat = srtp_crypto_kernel_alloc_auth(
p->rtcp.auth_type, &session_keys->rtcp_auth, p->rtcp.auth_key_len,
p->rtcp.auth_tag_len);
if (stat) {
srtp_stream_dealloc(str, NULL);
return stat;
}
session_keys->mki_id = NULL;
/* allocate key limit structure */
session_keys->limit = (srtp_key_limit_ctx_t *)srtp_crypto_alloc(
sizeof(srtp_key_limit_ctx_t));
if (session_keys->limit == NULL) {
srtp_stream_dealloc(str, NULL);
return srtp_err_status_alloc_fail;
}
}
/* allocate ekt data associated with stream */
stat = srtp_ekt_alloc(&str->ekt, p->ekt);
if (stat) {
srtp_stream_dealloc(str, NULL);
return stat;
}
if (p->enc_xtn_hdr && p->enc_xtn_hdr_count > 0) {
srtp_cipher_type_id_t enc_xtn_hdr_cipher_type;
int enc_xtn_hdr_cipher_key_len;
str->enc_xtn_hdr = (int *)srtp_crypto_alloc(p->enc_xtn_hdr_count *
sizeof(p->enc_xtn_hdr[0]));
if (!str->enc_xtn_hdr) {
srtp_stream_dealloc(str, NULL);
return srtp_err_status_alloc_fail;
}
memcpy(str->enc_xtn_hdr, p->enc_xtn_hdr,
p->enc_xtn_hdr_count * sizeof(p->enc_xtn_hdr[0]));
str->enc_xtn_hdr_count = p->enc_xtn_hdr_count;
/*
* For GCM ciphers, the corresponding ICM cipher is used for header
* extensions encryption.
*/
switch (p->rtp.cipher_type) {
case SRTP_AES_GCM_128:
enc_xtn_hdr_cipher_type = SRTP_AES_ICM_128;
enc_xtn_hdr_cipher_key_len = SRTP_AES_ICM_128_KEY_LEN_WSALT;
break;
case SRTP_AES_GCM_256:
enc_xtn_hdr_cipher_type = SRTP_AES_ICM_256;
enc_xtn_hdr_cipher_key_len = SRTP_AES_ICM_256_KEY_LEN_WSALT;
break;
default:
enc_xtn_hdr_cipher_type = p->rtp.cipher_type;
enc_xtn_hdr_cipher_key_len = p->rtp.cipher_key_len;
break;
}
for (i = 0; i < str->num_master_keys; i++) {
session_keys = &str->session_keys[i];
/* allocate cipher for extensions header encryption */
stat = srtp_crypto_kernel_alloc_cipher(
enc_xtn_hdr_cipher_type, &session_keys->rtp_xtn_hdr_cipher,
enc_xtn_hdr_cipher_key_len, 0);
if (stat) {
srtp_stream_dealloc(str, NULL);
return stat;
}
}
} else {
for (i = 0; i < str->num_master_keys; i++) {
session_keys = &str->session_keys[i];
session_keys->rtp_xtn_hdr_cipher = NULL;
}
str->enc_xtn_hdr = NULL;
str->enc_xtn_hdr_count = 0;
}
return srtp_err_status_ok;
}
/*
* srtp_stream_clone(stream_template, new) allocates a new stream and
* initializes it using the cipher and auth of the stream_template
*
* the only unique data in a cloned stream is the replay database and
* the SSRC
*/
srtp_err_status_t srtp_stream_clone(const srtp_stream_ctx_t *stream_template,
uint32_t ssrc,
srtp_stream_ctx_t **str_ptr)
{
srtp_err_status_t status;
srtp_stream_ctx_t *str;
unsigned int i = 0;
srtp_session_keys_t *session_keys = NULL;
const srtp_session_keys_t *template_session_keys = NULL;
debug_print(mod_srtp, "cloning stream (SSRC: 0x%08x)", ntohl(ssrc));
/* allocate srtp stream and set str_ptr */
str = (srtp_stream_ctx_t *)srtp_crypto_alloc(sizeof(srtp_stream_ctx_t));
if (str == NULL)
return srtp_err_status_alloc_fail;
*str_ptr = str;
str->num_master_keys = stream_template->num_master_keys;
str->session_keys = (srtp_session_keys_t *)srtp_crypto_alloc(
sizeof(srtp_session_keys_t) * str->num_master_keys);
if (str->session_keys == NULL) {
srtp_stream_dealloc(*str_ptr, stream_template);
*str_ptr = NULL;
return srtp_err_status_alloc_fail;
}
for (i = 0; i < stream_template->num_master_keys; i++) {
session_keys = &str->session_keys[i];
template_session_keys = &stream_template->session_keys[i];
/* set cipher and auth pointers to those of the template */
session_keys->rtp_cipher = template_session_keys->rtp_cipher;
session_keys->rtp_auth = template_session_keys->rtp_auth;
session_keys->rtp_xtn_hdr_cipher =
template_session_keys->rtp_xtn_hdr_cipher;
session_keys->rtcp_cipher = template_session_keys->rtcp_cipher;
session_keys->rtcp_auth = template_session_keys->rtcp_auth;
session_keys->mki_size = template_session_keys->mki_size;
if (template_session_keys->mki_size == 0) {
session_keys->mki_id = NULL;
} else {
session_keys->mki_id =
srtp_crypto_alloc(template_session_keys->mki_size);
if (session_keys->mki_id == NULL) {
srtp_stream_dealloc(*str_ptr, stream_template);
*str_ptr = NULL;
return srtp_err_status_init_fail;
}
memcpy(session_keys->mki_id, template_session_keys->mki_id,
session_keys->mki_size);
}
/* Copy the salt values */
memcpy(session_keys->salt, template_session_keys->salt,
SRTP_AEAD_SALT_LEN);
memcpy(session_keys->c_salt, template_session_keys->c_salt,
SRTP_AEAD_SALT_LEN);
/* set key limit to point to that of the template */
status = srtp_key_limit_clone(template_session_keys->limit,
&session_keys->limit);
if (status) {
srtp_stream_dealloc(*str_ptr, stream_template);
*str_ptr = NULL;
return status;
}
}
/* initialize replay databases */
status = srtp_rdbx_init(
&str->rtp_rdbx, srtp_rdbx_get_window_size(&stream_template->rtp_rdbx));
if (status) {
srtp_stream_dealloc(*str_ptr, stream_template);
*str_ptr = NULL;
return status;
}
srtp_rdb_init(&str->rtcp_rdb);
str->allow_repeat_tx = stream_template->allow_repeat_tx;
/* set ssrc to that provided */
str->ssrc = ssrc;
/* reset pending ROC */
str->pending_roc = 0;
/* set direction and security services */
str->direction = stream_template->direction;
str->rtp_services = stream_template->rtp_services;
str->rtcp_services = stream_template->rtcp_services;
/* set pointer to EKT data associated with stream */
str->ekt = stream_template->ekt;
/* copy information about extensions header encryption */
str->enc_xtn_hdr = stream_template->enc_xtn_hdr;
str->enc_xtn_hdr_count = stream_template->enc_xtn_hdr_count;
/* defensive coding */
str->next = NULL;
return srtp_err_status_ok;
}
/*
* key derivation functions, internal to libSRTP
*
* srtp_kdf_t is a key derivation context
*
* srtp_kdf_init(&kdf, cipher_id, k, keylen) initializes kdf to use cipher
* described by cipher_id, with the master key k with length in octets keylen.
*
* srtp_kdf_generate(&kdf, l, kl, keylen) derives the key
* corresponding to label l and puts it into kl; the length
* of the key in octets is provided as keylen. this function
* should be called once for each subkey that is derived.
*
* srtp_kdf_clear(&kdf) zeroizes and deallocates the kdf state
*/
typedef enum {
label_rtp_encryption = 0x00,
label_rtp_msg_auth = 0x01,
label_rtp_salt = 0x02,
label_rtcp_encryption = 0x03,
label_rtcp_msg_auth = 0x04,
label_rtcp_salt = 0x05,
label_rtp_header_encryption = 0x06,
label_rtp_header_salt = 0x07
} srtp_prf_label;
#define MAX_SRTP_KEY_LEN 256
#if defined(OPENSSL) && defined(OPENSSL_KDF)
#define MAX_SRTP_AESKEY_LEN 32
#define MAX_SRTP_SALT_LEN 14
/*
* srtp_kdf_t represents a key derivation function. The SRTP
* default KDF is the only one implemented at present.
*/
typedef struct {
uint8_t master_key[MAX_SRTP_AESKEY_LEN];
uint8_t master_salt[MAX_SRTP_SALT_LEN];
const EVP_CIPHER *evp;
} srtp_kdf_t;
static srtp_err_status_t srtp_kdf_init(srtp_kdf_t *kdf,
const uint8_t *key,
int key_len,
int salt_len)
{
memset(kdf, 0x0, sizeof(srtp_kdf_t));
/* The NULL cipher has zero key length */
if (key_len == 0)
return srtp_err_status_ok;
if ((key_len > MAX_SRTP_AESKEY_LEN) || (salt_len > MAX_SRTP_SALT_LEN)) {
return srtp_err_status_bad_param;
}
switch (key_len) {
case SRTP_AES_256_KEYSIZE:
kdf->evp = EVP_aes_256_ctr();
break;
case SRTP_AES_192_KEYSIZE:
kdf->evp = EVP_aes_192_ctr();
break;
case SRTP_AES_128_KEYSIZE:
kdf->evp = EVP_aes_128_ctr();
break;
default:
return srtp_err_status_bad_param;
break;
}
memcpy(kdf->master_key, key, key_len);
memcpy(kdf->master_salt, key + key_len, salt_len);
return srtp_err_status_ok;
}
static srtp_err_status_t srtp_kdf_generate(srtp_kdf_t *kdf,
srtp_prf_label label,
uint8_t *key,
unsigned int length)
{
int ret;
/* The NULL cipher will not have an EVP */
if (!kdf->evp)
return srtp_err_status_ok;
octet_string_set_to_zero(key, length);
/*
* Invoke the OpenSSL SRTP KDF function
* This is useful if OpenSSL is in FIPS mode and FIP
* compliance is required for SRTP.
*/
ret = kdf_srtp(kdf->evp, (char *)&kdf->master_key,
(char *)&kdf->master_salt, NULL, NULL, label, (char *)key);
if (ret == -1) {
return (srtp_err_status_algo_fail);
}
return srtp_err_status_ok;
}
static srtp_err_status_t srtp_kdf_clear(srtp_kdf_t *kdf)
{
octet_string_set_to_zero(kdf->master_key, MAX_SRTP_AESKEY_LEN);
octet_string_set_to_zero(kdf->master_salt, MAX_SRTP_SALT_LEN);
kdf->evp = NULL;
return srtp_err_status_ok;
}
#else /* if OPENSSL_KDF */
/*
* srtp_kdf_t represents a key derivation function. The SRTP
* default KDF is the only one implemented at present.
*/
typedef struct {
srtp_cipher_t *cipher; /* cipher used for key derivation */
} srtp_kdf_t;
static srtp_err_status_t srtp_kdf_init(srtp_kdf_t *kdf,
const uint8_t *key,
int key_len)
{
srtp_cipher_type_id_t cipher_id;
srtp_err_status_t stat;
switch (key_len) {
case SRTP_AES_ICM_256_KEY_LEN_WSALT:
cipher_id = SRTP_AES_ICM_256;
break;
case SRTP_AES_ICM_192_KEY_LEN_WSALT:
cipher_id = SRTP_AES_ICM_192;
break;
case SRTP_AES_ICM_128_KEY_LEN_WSALT:
cipher_id = SRTP_AES_ICM_128;
break;
default:
return srtp_err_status_bad_param;
break;
}
stat = srtp_crypto_kernel_alloc_cipher(cipher_id, &kdf->cipher, key_len, 0);
if (stat)
return stat;
stat = srtp_cipher_init(kdf->cipher, key);
if (stat) {
srtp_cipher_dealloc(kdf->cipher);
return stat;
}
return srtp_err_status_ok;
}
static srtp_err_status_t srtp_kdf_generate(srtp_kdf_t *kdf,
srtp_prf_label label,
uint8_t *key,
unsigned int length)
{
srtp_err_status_t status;
v128_t nonce;
/* set eigth octet of nonce to <label>, set the rest of it to zero */
v128_set_to_zero(&nonce);
nonce.v8[7] = label;
status = srtp_cipher_set_iv(kdf->cipher, (uint8_t *)&nonce,
srtp_direction_encrypt);
if (status)
return status;
/* generate keystream output */
octet_string_set_to_zero(key, length);
status = srtp_cipher_encrypt(kdf->cipher, key, &length);
if (status)
return status;
return srtp_err_status_ok;
}
static srtp_err_status_t srtp_kdf_clear(srtp_kdf_t *kdf)
{
srtp_err_status_t status;
status = srtp_cipher_dealloc(kdf->cipher);
if (status)
return status;
kdf->cipher = NULL;
return srtp_err_status_ok;
}
#endif /* else OPENSSL_KDF */
/*
* end of key derivation functions
*/
/* Get the base key length corresponding to a given combined key+salt
* length for the given cipher.
* TODO: key and salt lengths should be separate fields in the policy. */
static inline int base_key_length(const srtp_cipher_type_t *cipher,
int key_length)
{
switch (cipher->id) {
case SRTP_AES_ICM_128:
case SRTP_AES_ICM_192:
case SRTP_AES_ICM_256:
/* The legacy modes are derived from
* the configured key length on the policy */
return key_length - SRTP_SALT_LEN;
break;
case SRTP_AES_GCM_128:
return key_length - SRTP_AEAD_SALT_LEN;
break;
case SRTP_AES_GCM_256:
return key_length - SRTP_AEAD_SALT_LEN;
break;
default:
return key_length;
break;
}
}
unsigned int srtp_validate_policy_master_keys(const srtp_policy_t *policy)
{
unsigned long i = 0;
if (policy->key == NULL) {
if (policy->num_master_keys <= 0)
return 0;
if (policy->num_master_keys > SRTP_MAX_NUM_MASTER_KEYS)
return 0;
for (i = 0; i < policy->num_master_keys; i++) {
if (policy->keys[i]->key == NULL)
return 0;
if (policy->keys[i]->mki_size > SRTP_MAX_MKI_LEN)
return 0;
}
}
return 1;
}
srtp_session_keys_t *srtp_get_session_keys_with_mki_index(
srtp_stream_ctx_t *stream,
unsigned int use_mki,
unsigned int mki_index)
{
if (use_mki) {
if (mki_index >= stream->num_master_keys) {
return NULL;
}
return &stream->session_keys[mki_index];
}
return &stream->session_keys[0];
}
unsigned int srtp_inject_mki(uint8_t *mki_tag_location,
srtp_session_keys_t *session_keys,
unsigned int use_mki)
{
unsigned int mki_size = 0;
if (use_mki) {
mki_size = session_keys->mki_size;
if (mki_size != 0) {
// Write MKI into memory
memcpy(mki_tag_location, session_keys->mki_id, mki_size);
}
}
return mki_size;
}
srtp_err_status_t srtp_stream_init_all_master_keys(
srtp_stream_ctx_t *srtp,
unsigned char *key,
srtp_master_key_t **keys,
const unsigned int max_master_keys)
{
unsigned int i = 0;
srtp_err_status_t status = srtp_err_status_ok;
srtp_master_key_t single_master_key;
if (key != NULL) {
srtp->num_master_keys = 1;
single_master_key.key = key;
single_master_key.mki_id = NULL;
single_master_key.mki_size = 0;
status = srtp_stream_init_keys(srtp, &single_master_key, 0);
} else {
srtp->num_master_keys = max_master_keys;
for (i = 0; i < srtp->num_master_keys && i < SRTP_MAX_NUM_MASTER_KEYS;
i++) {
status = srtp_stream_init_keys(srtp, keys[i], i);
if (status) {
return status;
}
}
}
return status;
}
srtp_err_status_t srtp_stream_init_keys(srtp_stream_ctx_t *srtp,
srtp_master_key_t *master_key,
const unsigned int current_mki_index)
{
srtp_err_status_t stat;
srtp_kdf_t kdf;
uint8_t tmp_key[MAX_SRTP_KEY_LEN];
int kdf_keylen = 30, rtp_keylen, rtcp_keylen;
int rtp_base_key_len, rtp_salt_len;
int rtcp_base_key_len, rtcp_salt_len;
srtp_session_keys_t *session_keys = NULL;
unsigned char *key = master_key->key;
/* If RTP or RTCP have a key length > AES-128, assume matching kdf. */
/* TODO: kdf algorithm, master key length, and master salt length should
* be part of srtp_policy_t.
*/
session_keys = &srtp->session_keys[current_mki_index];
/* initialize key limit to maximum value */
#ifdef NO_64BIT_MATH
{
uint64_t temp;
temp = make64(UINT_MAX, UINT_MAX);
srtp_key_limit_set(session_keys->limit, temp);
}
#else
srtp_key_limit_set(session_keys->limit, 0xffffffffffffLL);
#endif
if (master_key->mki_size != 0) {
session_keys->mki_id = srtp_crypto_alloc(master_key->mki_size);
if (session_keys->mki_id == NULL) {
return srtp_err_status_init_fail;
}
memcpy(session_keys->mki_id, master_key->mki_id, master_key->mki_size);
} else {
session_keys->mki_id = NULL;
}
session_keys->mki_size = master_key->mki_size;
rtp_keylen = srtp_cipher_get_key_length(session_keys->rtp_cipher);
rtcp_keylen = srtp_cipher_get_key_length(session_keys->rtcp_cipher);
rtp_base_key_len =
base_key_length(session_keys->rtp_cipher->type, rtp_keylen);
rtp_salt_len = rtp_keylen - rtp_base_key_len;
if (rtp_keylen > kdf_keylen) {
kdf_keylen = 46; /* AES-CTR mode is always used for KDF */
}
if (rtcp_keylen > kdf_keylen) {
kdf_keylen = 46; /* AES-CTR mode is always used for KDF */
}
debug_print(mod_srtp, "srtp key len: %d", rtp_keylen);
debug_print(mod_srtp, "srtcp key len: %d", rtcp_keylen);
debug_print(mod_srtp, "base key len: %d", rtp_base_key_len);
debug_print(mod_srtp, "kdf key len: %d", kdf_keylen);
debug_print(mod_srtp, "rtp salt len: %d", rtp_salt_len);
/*
* Make sure the key given to us is 'zero' appended. GCM
* mode uses a shorter master SALT (96 bits), but still relies on
* the legacy CTR mode KDF, which uses a 112 bit master SALT.
*/
memset(tmp_key, 0x0, MAX_SRTP_KEY_LEN);
memcpy(tmp_key, key, (rtp_base_key_len + rtp_salt_len));
/* initialize KDF state */
#if defined(OPENSSL) && defined(OPENSSL_KDF)
stat = srtp_kdf_init(&kdf, (const uint8_t *)tmp_key, rtp_base_key_len,
rtp_salt_len);
#else
stat = srtp_kdf_init(&kdf, (const uint8_t *)tmp_key, kdf_keylen);
#endif
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
/* generate encryption key */
stat = srtp_kdf_generate(&kdf, label_rtp_encryption, tmp_key,
rtp_base_key_len);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
debug_print(mod_srtp, "cipher key: %s",
srtp_octet_string_hex_string(tmp_key, rtp_base_key_len));
/*
* if the cipher in the srtp context uses a salt, then we need
* to generate the salt value
*/
if (rtp_salt_len > 0) {
debug_print0(mod_srtp, "found rtp_salt_len > 0, generating salt");
/* generate encryption salt, put after encryption key */
stat = srtp_kdf_generate(&kdf, label_rtp_salt,
tmp_key + rtp_base_key_len, rtp_salt_len);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
memcpy(session_keys->salt, tmp_key + rtp_base_key_len,
SRTP_AEAD_SALT_LEN);
}
if (rtp_salt_len > 0) {
debug_print(mod_srtp, "cipher salt: %s",
srtp_octet_string_hex_string(tmp_key + rtp_base_key_len,
rtp_salt_len));
}
/* initialize cipher */
stat = srtp_cipher_init(session_keys->rtp_cipher, tmp_key);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
if (session_keys->rtp_xtn_hdr_cipher) {
/* generate extensions header encryption key */
int rtp_xtn_hdr_keylen;
int rtp_xtn_hdr_base_key_len;
int rtp_xtn_hdr_salt_len;
srtp_kdf_t tmp_kdf;
srtp_kdf_t *xtn_hdr_kdf;
if (session_keys->rtp_xtn_hdr_cipher->type !=
session_keys->rtp_cipher->type) {
/*
* With GCM ciphers, the header extensions are still encrypted using
* the corresponding ICM cipher.
* See https://tools.ietf.org/html/rfc7714#section-8.3
*/
uint8_t tmp_xtn_hdr_key[MAX_SRTP_KEY_LEN];
rtp_xtn_hdr_keylen =
srtp_cipher_get_key_length(session_keys->rtp_xtn_hdr_cipher);
rtp_xtn_hdr_base_key_len = base_key_length(
session_keys->rtp_xtn_hdr_cipher->type, rtp_xtn_hdr_keylen);
rtp_xtn_hdr_salt_len =
rtp_xtn_hdr_keylen - rtp_xtn_hdr_base_key_len;
if (rtp_xtn_hdr_salt_len > rtp_salt_len) {
switch (session_keys->rtp_cipher->type->id) {
case SRTP_AES_GCM_128:
case SRTP_AES_GCM_256:
/*
* The shorter GCM salt is padded to the required ICM salt
* length.
*/
rtp_xtn_hdr_salt_len = rtp_salt_len;
break;
default:
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_bad_param;
}
}
memset(tmp_xtn_hdr_key, 0x0, MAX_SRTP_KEY_LEN);
memcpy(tmp_xtn_hdr_key, key,
(rtp_xtn_hdr_base_key_len + rtp_xtn_hdr_salt_len));
xtn_hdr_kdf = &tmp_kdf;
/* initialize KDF state */
#if defined(OPENSSL) && defined(OPENSSL_KDF)
stat =
srtp_kdf_init(xtn_hdr_kdf, (const uint8_t *)tmp_xtn_hdr_key,
rtp_xtn_hdr_base_key_len, rtp_xtn_hdr_salt_len);
#else
stat = srtp_kdf_init(xtn_hdr_kdf, (const uint8_t *)tmp_xtn_hdr_key,
kdf_keylen);
#endif
octet_string_set_to_zero(tmp_xtn_hdr_key, MAX_SRTP_KEY_LEN);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
} else {
/* Reuse main KDF. */
rtp_xtn_hdr_keylen = rtp_keylen;
rtp_xtn_hdr_base_key_len = rtp_base_key_len;
rtp_xtn_hdr_salt_len = rtp_salt_len;
xtn_hdr_kdf = &kdf;
}
stat = srtp_kdf_generate(xtn_hdr_kdf, label_rtp_header_encryption,
tmp_key, rtp_xtn_hdr_base_key_len);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
debug_print(
mod_srtp, "extensions cipher key: %s",
srtp_octet_string_hex_string(tmp_key, rtp_xtn_hdr_base_key_len));
/*
* if the cipher in the srtp context uses a salt, then we need
* to generate the salt value
*/
if (rtp_xtn_hdr_salt_len > 0) {
debug_print0(mod_srtp,
"found rtp_xtn_hdr_salt_len > 0, generating salt");
/* generate encryption salt, put after encryption key */
stat = srtp_kdf_generate(xtn_hdr_kdf, label_rtp_header_salt,
tmp_key + rtp_xtn_hdr_base_key_len,
rtp_xtn_hdr_salt_len);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
}
if (rtp_xtn_hdr_salt_len > 0) {
debug_print(
mod_srtp, "extensions cipher salt: %s",
srtp_octet_string_hex_string(tmp_key + rtp_xtn_hdr_base_key_len,
rtp_xtn_hdr_salt_len));
}
/* initialize extensions header cipher */
stat = srtp_cipher_init(session_keys->rtp_xtn_hdr_cipher, tmp_key);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
if (xtn_hdr_kdf != &kdf) {
/* release memory for custom header extension encryption kdf */
stat = srtp_kdf_clear(xtn_hdr_kdf);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
}
}
/* generate authentication key */
stat = srtp_kdf_generate(&kdf, label_rtp_msg_auth, tmp_key,
srtp_auth_get_key_length(session_keys->rtp_auth));
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
debug_print(mod_srtp, "auth key: %s",
srtp_octet_string_hex_string(
tmp_key, srtp_auth_get_key_length(session_keys->rtp_auth)));
/* initialize auth function */
stat = srtp_auth_init(session_keys->rtp_auth, tmp_key);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
/*
* ...now initialize SRTCP keys
*/
rtcp_base_key_len =
base_key_length(session_keys->rtcp_cipher->type, rtcp_keylen);
rtcp_salt_len = rtcp_keylen - rtcp_base_key_len;
debug_print(mod_srtp, "rtcp salt len: %d", rtcp_salt_len);
/* generate encryption key */
stat = srtp_kdf_generate(&kdf, label_rtcp_encryption, tmp_key,
rtcp_base_key_len);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
/*
* if the cipher in the srtp context uses a salt, then we need
* to generate the salt value
*/
if (rtcp_salt_len > 0) {
debug_print0(mod_srtp, "found rtcp_salt_len > 0, generating rtcp salt");
/* generate encryption salt, put after encryption key */
stat = srtp_kdf_generate(&kdf, label_rtcp_salt,
tmp_key + rtcp_base_key_len, rtcp_salt_len);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
memcpy(session_keys->c_salt, tmp_key + rtcp_base_key_len,
SRTP_AEAD_SALT_LEN);
}
debug_print(mod_srtp, "rtcp cipher key: %s",
srtp_octet_string_hex_string(tmp_key, rtcp_base_key_len));
if (rtcp_salt_len > 0) {
debug_print(mod_srtp, "rtcp cipher salt: %s",
srtp_octet_string_hex_string(tmp_key + rtcp_base_key_len,
rtcp_salt_len));
}
/* initialize cipher */
stat = srtp_cipher_init(session_keys->rtcp_cipher, tmp_key);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
/* generate authentication key */
stat = srtp_kdf_generate(&kdf, label_rtcp_msg_auth, tmp_key,
srtp_auth_get_key_length(session_keys->rtcp_auth));
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
debug_print(
mod_srtp, "rtcp auth key: %s",
srtp_octet_string_hex_string(
tmp_key, srtp_auth_get_key_length(session_keys->rtcp_auth)));
/* initialize auth function */
stat = srtp_auth_init(session_keys->rtcp_auth, tmp_key);
if (stat) {
/* zeroize temp buffer */
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
return srtp_err_status_init_fail;
}
/* clear memory then return */
stat = srtp_kdf_clear(&kdf);
octet_string_set_to_zero(tmp_key, MAX_SRTP_KEY_LEN);
if (stat)
return srtp_err_status_init_fail;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_stream_init(srtp_stream_ctx_t *srtp,
const srtp_policy_t *p)
{
srtp_err_status_t err;
debug_print(mod_srtp, "initializing stream (SSRC: 0x%08x)", p->ssrc.value);
/* initialize replay database */
/*
* window size MUST be at least 64. MAY be larger. Values more than
* 2^15 aren't meaningful due to how extended sequence numbers are
* calculated.
* Let a window size of 0 imply the default value.
*/
if (p->window_size != 0 &&
(p->window_size < 64 || p->window_size >= 0x8000))
return srtp_err_status_bad_param;
if (p->window_size != 0)
err = srtp_rdbx_init(&srtp->rtp_rdbx, p->window_size);
else
err = srtp_rdbx_init(&srtp->rtp_rdbx, 128);
if (err)
return err;
/* set the SSRC value */
srtp->ssrc = htonl(p->ssrc.value);
/* reset pending ROC */
srtp->pending_roc = 0;
/* set the security service flags */
srtp->rtp_services = p->rtp.sec_serv;
srtp->rtcp_services = p->rtcp.sec_serv;
/*
* set direction to unknown - this flag gets checked in srtp_protect(),
* srtp_unprotect(), srtp_protect_rtcp(), and srtp_unprotect_rtcp(), and
* gets set appropriately if it is set to unknown.
*/
srtp->direction = dir_unknown;
/* initialize SRTCP replay database */
srtp_rdb_init(&srtp->rtcp_rdb);
/* initialize allow_repeat_tx */
/* guard against uninitialized memory: allow only 0 or 1 here */
if (p->allow_repeat_tx != 0 && p->allow_repeat_tx != 1) {
srtp_rdbx_dealloc(&srtp->rtp_rdbx);
return srtp_err_status_bad_param;
}
srtp->allow_repeat_tx = p->allow_repeat_tx;
/* DAM - no RTCP key limit at present */
/* initialize keys */
err = srtp_stream_init_all_master_keys(srtp, p->key, p->keys,
p->num_master_keys);
if (err) {
srtp_rdbx_dealloc(&srtp->rtp_rdbx);
return err;
}
/*
* if EKT is in use, then initialize the EKT data associated with
* the stream
*/
err = srtp_ekt_stream_init_from_policy(srtp->ekt, p->ekt);
if (err) {
srtp_rdbx_dealloc(&srtp->rtp_rdbx);
return err;
}
return srtp_err_status_ok;
}
/*
* srtp_event_reporter is an event handler function that merely
* reports the events that are reported by the callbacks
*/
void srtp_event_reporter(srtp_event_data_t *data)
{
srtp_err_report(srtp_err_level_warning, "srtp: in stream 0x%x: ",
data->ssrc);
switch (data->event) {
case event_ssrc_collision:
srtp_err_report(srtp_err_level_warning, "\tSSRC collision\n");
break;
case event_key_soft_limit:
srtp_err_report(srtp_err_level_warning,
"\tkey usage soft limit reached\n");
break;
case event_key_hard_limit:
srtp_err_report(srtp_err_level_warning,
"\tkey usage hard limit reached\n");
break;
case event_packet_index_limit:
srtp_err_report(srtp_err_level_warning,
"\tpacket index limit reached\n");
break;
default:
srtp_err_report(srtp_err_level_warning,
"\tunknown event reported to handler\n");
}
}
/*
* srtp_event_handler is a global variable holding a pointer to the
* event handler function; this function is called for any unexpected
* event that needs to be handled out of the SRTP data path. see
* srtp_event_t in srtp.h for more info
*
* it is okay to set srtp_event_handler to NULL, but we set
* it to the srtp_event_reporter.
*/
static srtp_event_handler_func_t *srtp_event_handler = srtp_event_reporter;
srtp_err_status_t srtp_install_event_handler(srtp_event_handler_func_t func)
{
/*
* note that we accept NULL arguments intentionally - calling this
* function with a NULL arguments removes an event handler that's
* been previously installed
*/
/* set global event handling function */
srtp_event_handler = func;
return srtp_err_status_ok;
}
/*
* Check if the given extension header id is / should be encrypted.
* Returns 1 if yes, otherwise 0.
*/
static int srtp_protect_extension_header(srtp_stream_ctx_t *stream, int id)
{
int *enc_xtn_hdr = stream->enc_xtn_hdr;
int count = stream->enc_xtn_hdr_count;
if (!enc_xtn_hdr || count <= 0) {
return 0;
}
while (count > 0) {
if (*enc_xtn_hdr == id) {
return 1;
}
enc_xtn_hdr++;
count--;
}
return 0;
}
/*
* extensions header encryption RFC 6904
*/
static srtp_err_status_t srtp_process_header_encryption(
srtp_stream_ctx_t *stream,
srtp_hdr_xtnd_t *xtn_hdr,
srtp_session_keys_t *session_keys)
{
srtp_err_status_t status;
uint8_t keystream[257]; /* Maximum 2 bytes header + 255 bytes data. */
int keystream_pos;
uint8_t *xtn_hdr_data = ((uint8_t *)xtn_hdr) + octets_in_rtp_extn_hdr;
uint8_t *xtn_hdr_end =
xtn_hdr_data + (ntohs(xtn_hdr->length) * sizeof(uint32_t));
if (ntohs(xtn_hdr->profile_specific) == 0xbede) {
/* RFC 5285, section 4.2. One-Byte Header */
while (xtn_hdr_data < xtn_hdr_end) {
uint8_t xid = (*xtn_hdr_data & 0xf0) >> 4;
unsigned int xlen = (*xtn_hdr_data & 0x0f) + 1;
uint32_t xlen_with_header = 1 + xlen;
xtn_hdr_data++;
if (xtn_hdr_data + xlen > xtn_hdr_end)
return srtp_err_status_parse_err;
if (xid == 15) {
/* found header 15, stop further processing. */
break;
}
status = srtp_cipher_output(session_keys->rtp_xtn_hdr_cipher,
keystream, &xlen_with_header);
if (status)
return srtp_err_status_cipher_fail;
if (srtp_protect_extension_header(stream, xid)) {
keystream_pos = 1;
while (xlen > 0) {
*xtn_hdr_data ^= keystream[keystream_pos++];
xtn_hdr_data++;
xlen--;
}
} else {
xtn_hdr_data += xlen;
}
/* skip padding bytes. */
while (xtn_hdr_data < xtn_hdr_end && *xtn_hdr_data == 0) {
xtn_hdr_data++;
}
}
} else if ((ntohs(xtn_hdr->profile_specific) & 0xfff0) == 0x1000) {
/* RFC 5285, section 4.3. Two-Byte Header */
while (xtn_hdr_data + 1 < xtn_hdr_end) {
uint8_t xid = *xtn_hdr_data;
unsigned int xlen = *(xtn_hdr_data + 1);
uint32_t xlen_with_header = 2 + xlen;
xtn_hdr_data += 2;
if (xtn_hdr_data + xlen > xtn_hdr_end)
return srtp_err_status_parse_err;
status = srtp_cipher_output(session_keys->rtp_xtn_hdr_cipher,
keystream, &xlen_with_header);
if (status)
return srtp_err_status_cipher_fail;
if (xlen > 0 && srtp_protect_extension_header(stream, xid)) {
keystream_pos = 2;
while (xlen > 0) {
*xtn_hdr_data ^= keystream[keystream_pos++];
xtn_hdr_data++;
xlen--;
}
} else {
xtn_hdr_data += xlen;
}
/* skip padding bytes. */
while (xtn_hdr_data < xtn_hdr_end && *xtn_hdr_data == 0) {
xtn_hdr_data++;
}
}
} else {
/* unsupported extension header format. */
return srtp_err_status_parse_err;
}
return srtp_err_status_ok;
}
/*
* AEAD uses a new IV formation method. This function implements
* section 8.1. (SRTP IV Formation for AES-GCM) of RFC7714.
* The calculation is defined as, where (+) is the xor operation:
*
*
* 0 0 0 0 0 0 0 0 0 0 1 1
* 0 1 2 3 4 5 6 7 8 9 0 1
* +--+--+--+--+--+--+--+--+--+--+--+--+
* |00|00| SSRC | ROC | SEQ |---+
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* |
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* | Encryption Salt |->(+)
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* |
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* | Initialization Vector |<--+
* +--+--+--+--+--+--+--+--+--+--+--+--+*
*
* Input: *session_keys - pointer to SRTP stream context session keys,
* used to retrieve the SALT
* *iv - Pointer to receive the calculated IV
* *seq - The ROC and SEQ value to use for the
* IV calculation.
* *hdr - The RTP header, used to get the SSRC value
*
*/
static void srtp_calc_aead_iv(srtp_session_keys_t *session_keys,
v128_t *iv,
srtp_xtd_seq_num_t *seq,
srtp_hdr_t *hdr)
{
v128_t in;
v128_t salt;
#ifdef NO_64BIT_MATH
uint32_t local_roc = ((high32(*seq) << 16) | (low32(*seq) >> 16));
uint16_t local_seq = (uint16_t)(low32(*seq));
#else
uint32_t local_roc = (uint32_t)(*seq >> 16);
uint16_t local_seq = (uint16_t)*seq;
#endif
memset(&in, 0, sizeof(v128_t));
memset(&salt, 0, sizeof(v128_t));
in.v16[5] = htons(local_seq);
local_roc = htonl(local_roc);
memcpy(&in.v16[3], &local_roc, sizeof(local_roc));
/*
* Copy in the RTP SSRC value
*/
memcpy(&in.v8[2], &hdr->ssrc, 4);
debug_print(mod_srtp, "Pre-salted RTP IV = %s\n", v128_hex_string(&in));
/*
* Get the SALT value from the context
*/
memcpy(salt.v8, session_keys->salt, SRTP_AEAD_SALT_LEN);
debug_print(mod_srtp, "RTP SALT = %s\n", v128_hex_string(&salt));
/*
* Finally, apply tyhe SALT to the input
*/
v128_xor(iv, &in, &salt);
}
srtp_session_keys_t *srtp_get_session_keys(srtp_stream_ctx_t *stream,
uint8_t *hdr,
const unsigned int *pkt_octet_len,
unsigned int *mki_size)
{
unsigned int base_mki_start_location = *pkt_octet_len;
unsigned int mki_start_location = 0;
unsigned int tag_len = 0;
unsigned int i = 0;
// Determine the authentication tag size
if (stream->session_keys[0].rtp_cipher->algorithm == SRTP_AES_GCM_128 ||
stream->session_keys[0].rtp_cipher->algorithm == SRTP_AES_GCM_256) {
tag_len = 0;
} else {
tag_len = srtp_auth_get_tag_length(stream->session_keys[0].rtp_auth);
}
if (tag_len > base_mki_start_location) {
*mki_size = 0;
return NULL;
}
base_mki_start_location -= tag_len;
for (i = 0; i < stream->num_master_keys; i++) {
if (stream->session_keys[i].mki_size != 0 &&
stream->session_keys[i].mki_size <= base_mki_start_location) {
*mki_size = stream->session_keys[i].mki_size;
mki_start_location = base_mki_start_location - *mki_size;
if (memcmp(hdr + mki_start_location, stream->session_keys[i].mki_id,
*mki_size) == 0) {
return &stream->session_keys[i];
}
}
}
*mki_size = 0;
return NULL;
}
static srtp_err_status_t srtp_estimate_index(srtp_rdbx_t *rdbx,
uint32_t roc,
srtp_xtd_seq_num_t *est,
srtp_sequence_number_t seq,
int *delta)
{
#ifdef NO_64BIT_MATH
uint32_t internal_pkt_idx_reduced;
uint32_t external_pkt_idx_reduced;
uint32_t internal_roc;
uint32_t roc_difference;
#endif
#ifdef NO_64BIT_MATH
*est = (srtp_xtd_seq_num_t)make64(roc >> 16, (roc << 16) | seq);
*delta = low32(est) - rdbx->index;
#else
*est = (srtp_xtd_seq_num_t)(((uint64_t)roc) << 16) | seq;
*delta = (int)(*est - rdbx->index);
#endif
if (*est > rdbx->index) {
#ifdef NO_64BIT_MATH
internal_roc = (uint32_t)(rdbx->index >> 16);
roc_difference = roc - internal_roc;
if (roc_difference > 1) {
*delta = 0;
return srtp_err_status_pkt_idx_adv;
}
internal_pkt_idx_reduced = (uint32_t)(rdbx->index & 0xFFFF);
external_pkt_idx_reduced = (uint32_t)((roc_difference << 16) | seq);
if (external_pkt_idx_reduced - internal_pkt_idx_reduced >
seq_num_median) {
*delta = 0;
return srtp_err_status_pkt_idx_adv;
}
#else
if (*est - rdbx->index > seq_num_median) {
*delta = 0;
return srtp_err_status_pkt_idx_adv;
}
#endif
} else if (*est < rdbx->index) {
#ifdef NO_64BIT_MATH
internal_roc = (uint32_t)(rdbx->index >> 16);
roc_difference = internal_roc - roc;
if (roc_difference > 1) {
*delta = 0;
return srtp_err_status_pkt_idx_adv;
}
internal_pkt_idx_reduced =
(uint32_t)((roc_difference << 16) | rdbx->index & 0xFFFF);
external_pkt_idx_reduced = (uint32_t)(seq);
if (internal_pkt_idx_reduced - external_pkt_idx_reduced >
seq_num_median) {
*delta = 0;
return srtp_err_status_pkt_idx_old;
}
#else
if (rdbx->index - *est > seq_num_median) {
*delta = 0;
return srtp_err_status_pkt_idx_old;
}
#endif
}
return srtp_err_status_ok;
}
static srtp_err_status_t srtp_get_est_pkt_index(srtp_hdr_t *hdr,
srtp_stream_ctx_t *stream,
srtp_xtd_seq_num_t *est,
int *delta)
{
srtp_err_status_t result = srtp_err_status_ok;
if (stream->pending_roc) {
result = srtp_estimate_index(&stream->rtp_rdbx, stream->pending_roc,
est, ntohs(hdr->seq), delta);
} else {
/* estimate packet index from seq. num. in header */
*delta =
srtp_rdbx_estimate_index(&stream->rtp_rdbx, est, ntohs(hdr->seq));
}
#ifdef NO_64BIT_MATH
debug_print2(mod_srtp, "estimated u_packet index: %08x%08x", high32(*est),
low32(*est));
#else
debug_print(mod_srtp, "estimated u_packet index: %016" PRIx64, *est);
#endif
return result;
}
/*
* This function handles outgoing SRTP packets while in AEAD mode,
* which currently supports AES-GCM encryption. All packets are
* encrypted and authenticated.
*/
static srtp_err_status_t srtp_protect_aead(srtp_ctx_t *ctx,
srtp_stream_ctx_t *stream,
void *rtp_hdr,
unsigned int *pkt_octet_len,
srtp_session_keys_t *session_keys,
unsigned int use_mki)
{
srtp_hdr_t *hdr = (srtp_hdr_t *)rtp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
int enc_octet_len = 0; /* number of octets in encrypted portion */
srtp_xtd_seq_num_t est; /* estimated xtd_seq_num_t of *hdr */
int delta; /* delta of local pkt idx and that in hdr */
srtp_err_status_t status;
uint32_t tag_len;
v128_t iv;
unsigned int aad_len;
srtp_hdr_xtnd_t *xtn_hdr = NULL;
unsigned int mki_size = 0;
uint8_t *mki_location = NULL;
debug_print0(mod_srtp, "function srtp_protect_aead");
/*
* update the key usage limit, and check it to make sure that we
* didn't just hit either the soft limit or the hard limit, and call
* the event handler if we hit either.
*/
switch (srtp_key_limit_update(session_keys->limit)) {
case srtp_key_event_normal:
break;
case srtp_key_event_hard_limit:
srtp_handle_event(ctx, stream, event_key_hard_limit);
return srtp_err_status_key_expired;
case srtp_key_event_soft_limit:
default:
srtp_handle_event(ctx, stream, event_key_soft_limit);
break;
}
/* get tag length from stream */
tag_len = srtp_auth_get_tag_length(session_keys->rtp_auth);
/*
* find starting point for encryption and length of data to be
* encrypted - the encrypted portion starts after the rtp header
* extension, if present; otherwise, it starts after the last csrc,
* if any are present
*/
enc_start = (uint32_t *)hdr + uint32s_in_rtp_header + hdr->cc;
if (hdr->x == 1) {
xtn_hdr = (srtp_hdr_xtnd_t *)enc_start;
enc_start += (ntohs(xtn_hdr->length) + 1);
}
/* note: the passed size is without the auth tag */
if (!((uint8_t *)enc_start <= (uint8_t *)hdr + *pkt_octet_len))
return srtp_err_status_parse_err;
enc_octet_len =
(int)(*pkt_octet_len - ((uint8_t *)enc_start - (uint8_t *)hdr));
if (enc_octet_len < 0)
return srtp_err_status_parse_err;
/*
* estimate the packet index using the start of the replay window
* and the sequence number from the header
*/
delta = srtp_rdbx_estimate_index(&stream->rtp_rdbx, &est, ntohs(hdr->seq));
status = srtp_rdbx_check(&stream->rtp_rdbx, delta);
if (status) {
if (status != srtp_err_status_replay_fail || !stream->allow_repeat_tx) {
return status; /* we've been asked to reuse an index */
}
} else {
srtp_rdbx_add_index(&stream->rtp_rdbx, delta);
}
#ifdef NO_64BIT_MATH
debug_print2(mod_srtp, "estimated packet index: %08x%08x", high32(est),
low32(est));
#else
debug_print(mod_srtp, "estimated packet index: %016" PRIx64, est);
#endif
/*
* AEAD uses a new IV formation method
*/
srtp_calc_aead_iv(session_keys, &iv, &est, hdr);
/* shift est, put into network byte order */
#ifdef NO_64BIT_MATH
est = be64_to_cpu(
make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16));
#else
est = be64_to_cpu(est << 16);
#endif
status = srtp_cipher_set_iv(session_keys->rtp_cipher, (uint8_t *)&iv,
srtp_direction_encrypt);
if (!status && session_keys->rtp_xtn_hdr_cipher) {
iv.v32[0] = 0;
iv.v32[1] = hdr->ssrc;
iv.v64[1] = est;
status = srtp_cipher_set_iv(session_keys->rtp_xtn_hdr_cipher,
(uint8_t *)&iv, srtp_direction_encrypt);
}
if (status) {
return srtp_err_status_cipher_fail;
}
if (xtn_hdr && session_keys->rtp_xtn_hdr_cipher) {
/*
* extensions header encryption RFC 6904
*/
status = srtp_process_header_encryption(stream, xtn_hdr, session_keys);
if (status) {
return status;
}
}
/*
* Set the AAD over the RTP header
*/
aad_len = (uint8_t *)enc_start - (uint8_t *)hdr;
status =
srtp_cipher_set_aad(session_keys->rtp_cipher, (uint8_t *)hdr, aad_len);
if (status) {
return (srtp_err_status_cipher_fail);
}
/* Encrypt the payload */
status = srtp_cipher_encrypt(session_keys->rtp_cipher, (uint8_t *)enc_start,
(unsigned int *)&enc_octet_len);
if (status) {
return srtp_err_status_cipher_fail;
}
/*
* If we're doing GCM, we need to get the tag
* and append that to the output
*/
status =
srtp_cipher_get_tag(session_keys->rtp_cipher,
(uint8_t *)enc_start + enc_octet_len, &tag_len);
if (status) {
return (srtp_err_status_cipher_fail);
}
mki_location = (uint8_t *)hdr + *pkt_octet_len + tag_len;
mki_size = srtp_inject_mki(mki_location, session_keys, use_mki);
/* increase the packet length by the length of the auth tag */
*pkt_octet_len += tag_len;
/* increase the packet length by the length of the mki_size */
*pkt_octet_len += mki_size;
return srtp_err_status_ok;
}
/*
* This function handles incoming SRTP packets while in AEAD mode,
* which currently supports AES-GCM encryption. All packets are
* encrypted and authenticated. Note, the auth tag is at the end
* of the packet stream and is automatically checked by GCM
* when decrypting the payload.
*/
static srtp_err_status_t srtp_unprotect_aead(srtp_ctx_t *ctx,
srtp_stream_ctx_t *stream,
int delta,
srtp_xtd_seq_num_t est,
void *srtp_hdr,
unsigned int *pkt_octet_len,
srtp_session_keys_t *session_keys,
unsigned int mki_size)
{
srtp_hdr_t *hdr = (srtp_hdr_t *)srtp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */
v128_t iv;
srtp_err_status_t status;
int tag_len;
unsigned int aad_len;
srtp_hdr_xtnd_t *xtn_hdr = NULL;
debug_print0(mod_srtp, "function srtp_unprotect_aead");
#ifdef NO_64BIT_MATH
debug_print2(mod_srtp, "estimated u_packet index: %08x%08x", high32(est),
low32(est));
#else
debug_print(mod_srtp, "estimated u_packet index: %016" PRIx64, est);
#endif
/* get tag length from stream */
tag_len = srtp_auth_get_tag_length(session_keys->rtp_auth);
/*
* AEAD uses a new IV formation method
*/
srtp_calc_aead_iv(session_keys, &iv, &est, hdr);
status = srtp_cipher_set_iv(session_keys->rtp_cipher, (uint8_t *)&iv,
srtp_direction_decrypt);
if (!status && session_keys->rtp_xtn_hdr_cipher) {
iv.v32[0] = 0;
iv.v32[1] = hdr->ssrc;
#ifdef NO_64BIT_MATH
iv.v64[1] = be64_to_cpu(
make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16));
#else
iv.v64[1] = be64_to_cpu(est << 16);
#endif
status = srtp_cipher_set_iv(session_keys->rtp_xtn_hdr_cipher,
(uint8_t *)&iv, srtp_direction_encrypt);
}
if (status) {
return srtp_err_status_cipher_fail;
}
/*
* find starting point for decryption and length of data to be
* decrypted - the encrypted portion starts after the rtp header
* extension, if present; otherwise, it starts after the last csrc,
* if any are present
*/
enc_start = (uint32_t *)hdr + uint32s_in_rtp_header + hdr->cc;
if (hdr->x == 1) {
xtn_hdr = (srtp_hdr_xtnd_t *)enc_start;
enc_start += (ntohs(xtn_hdr->length) + 1);
}
if (!((uint8_t *)enc_start <=
(uint8_t *)hdr + (*pkt_octet_len - tag_len - mki_size)))
return srtp_err_status_parse_err;
/*
* We pass the tag down to the cipher when doing GCM mode
*/
enc_octet_len = (unsigned int)(*pkt_octet_len - mki_size -
((uint8_t *)enc_start - (uint8_t *)hdr));
/*
* Sanity check the encrypted payload length against
* the tag size. It must always be at least as large
* as the tag length.
*/
if (enc_octet_len < (unsigned int)tag_len) {
return srtp_err_status_cipher_fail;
}
/*
* update the key usage limit, and check it to make sure that we
* didn't just hit either the soft limit or the hard limit, and call
* the event handler if we hit either.
*/
switch (srtp_key_limit_update(session_keys->limit)) {
case srtp_key_event_normal:
break;
case srtp_key_event_soft_limit:
srtp_handle_event(ctx, stream, event_key_soft_limit);
break;
case srtp_key_event_hard_limit:
srtp_handle_event(ctx, stream, event_key_hard_limit);
return srtp_err_status_key_expired;
default:
break;
}
/*
* Set the AAD for AES-GCM, which is the RTP header
*/
aad_len = (uint8_t *)enc_start - (uint8_t *)hdr;
status =
srtp_cipher_set_aad(session_keys->rtp_cipher, (uint8_t *)hdr, aad_len);
if (status) {
return (srtp_err_status_cipher_fail);
}
/* Decrypt the ciphertext. This also checks the auth tag based
* on the AAD we just specified above */
status = srtp_cipher_decrypt(session_keys->rtp_cipher, (uint8_t *)enc_start,
&enc_octet_len);
if (status) {
return status;
}
if (xtn_hdr && session_keys->rtp_xtn_hdr_cipher) {
/*
* extensions header encryption RFC 6904
*/
status = srtp_process_header_encryption(stream, xtn_hdr, session_keys);
if (status) {
return status;
}
}
/*
* verify that stream is for received traffic - this check will
* detect SSRC collisions, since a stream that appears in both
* srtp_protect() and srtp_unprotect() will fail this test in one of
* those functions.
*
* we do this check *after* the authentication check, so that the
* latter check will catch any attempts to fool us into thinking
* that we've got a collision
*/
if (stream->direction != dir_srtp_receiver) {
if (stream->direction == dir_unknown) {
stream->direction = dir_srtp_receiver;
} else {
srtp_handle_event(ctx, stream, event_ssrc_collision);
}
}
/*
* if the stream is a 'provisional' one, in which the template context
* is used, then we need to allocate a new stream at this point, since
* the authentication passed
*/
if (stream == ctx->stream_template) {
srtp_stream_ctx_t *new_stream;
/*
* allocate and initialize a new stream
*
* note that we indicate failure if we can't allocate the new
* stream, and some implementations will want to not return
* failure here
*/
status =
srtp_stream_clone(ctx->stream_template, hdr->ssrc, &new_stream);
if (status) {
return status;
}
/* add new stream to the head of the stream_list */
new_stream->next = ctx->stream_list;
ctx->stream_list = new_stream;
/* set stream (the pointer used in this function) */
stream = new_stream;
}
/*
* the message authentication function passed, so add the packet
* index into the replay database
*/
srtp_rdbx_add_index(&stream->rtp_rdbx, delta);
/* decrease the packet length by the length of the auth tag */
*pkt_octet_len -= tag_len;
/* decrease the packet length by the length of the mki_size */
*pkt_octet_len -= mki_size;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_protect(srtp_ctx_t *ctx,
void *rtp_hdr,
int *pkt_octet_len)
{
return srtp_protect_mki(ctx, rtp_hdr, pkt_octet_len, 0, 0);
}
srtp_err_status_t srtp_protect_mki(srtp_ctx_t *ctx,
void *rtp_hdr,
int *pkt_octet_len,
unsigned int use_mki,
unsigned int mki_index)
{
srtp_hdr_t *hdr = (srtp_hdr_t *)rtp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
uint32_t *auth_start; /* pointer to start of auth. portion */
int enc_octet_len = 0; /* number of octets in encrypted portion */
srtp_xtd_seq_num_t est; /* estimated xtd_seq_num_t of *hdr */
int delta; /* delta of local pkt idx and that in hdr */
uint8_t *auth_tag = NULL; /* location of auth_tag within packet */
srtp_err_status_t status;
int tag_len;
srtp_stream_ctx_t *stream;
uint32_t prefix_len;
srtp_hdr_xtnd_t *xtn_hdr = NULL;
unsigned int mki_size = 0;
srtp_session_keys_t *session_keys = NULL;
uint8_t *mki_location = NULL;
int advance_packet_index = 0;
debug_print0(mod_srtp, "function srtp_protect");
/* we assume the hdr is 32-bit aligned to start */
/* Verify RTP header */
status = srtp_validate_rtp_header(rtp_hdr, pkt_octet_len);
if (status)
return status;
/* check the packet length - it must at least contain a full header */
if (*pkt_octet_len < octets_in_rtp_header)
return srtp_err_status_bad_param;
/*
* look up ssrc in srtp_stream list, and process the packet with
* the appropriate stream. if we haven't seen this stream before,
* there's a template key for this srtp_session, and the cipher
* supports key-sharing, then we assume that a new stream using
* that key has just started up
*/
stream = srtp_get_stream(ctx, hdr->ssrc);
if (stream == NULL) {
if (ctx->stream_template != NULL) {
srtp_stream_ctx_t *new_stream;
/* allocate and initialize a new stream */
status =
srtp_stream_clone(ctx->stream_template, hdr->ssrc, &new_stream);
if (status)
return status;
/* add new stream to the head of the stream_list */
new_stream->next = ctx->stream_list;
ctx->stream_list = new_stream;
/* set direction to outbound */
new_stream->direction = dir_srtp_sender;
/* set stream (the pointer used in this function) */
stream = new_stream;
} else {
/* no template stream, so we return an error */
return srtp_err_status_no_ctx;
}
}
/*
* verify that stream is for sending traffic - this check will
* detect SSRC collisions, since a stream that appears in both
* srtp_protect() and srtp_unprotect() will fail this test in one of
* those functions.
*/
if (stream->direction != dir_srtp_sender) {
if (stream->direction == dir_unknown) {
stream->direction = dir_srtp_sender;
} else {
srtp_handle_event(ctx, stream, event_ssrc_collision);
}
}
session_keys =
srtp_get_session_keys_with_mki_index(stream, use_mki, mki_index);
if (session_keys == NULL)
return srtp_err_status_bad_mki;
/*
* Check if this is an AEAD stream (GCM mode). If so, then dispatch
* the request to our AEAD handler.
*/
if (session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_128 ||
session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_256) {
return srtp_protect_aead(ctx, stream, rtp_hdr,
(unsigned int *)pkt_octet_len, session_keys,
use_mki);
}
/*
* update the key usage limit, and check it to make sure that we
* didn't just hit either the soft limit or the hard limit, and call
* the event handler if we hit either.
*/
switch (srtp_key_limit_update(session_keys->limit)) {
case srtp_key_event_normal:
break;
case srtp_key_event_soft_limit:
srtp_handle_event(ctx, stream, event_key_soft_limit);
break;
case srtp_key_event_hard_limit:
srtp_handle_event(ctx, stream, event_key_hard_limit);
return srtp_err_status_key_expired;
default:
break;
}
/* get tag length from stream */
tag_len = srtp_auth_get_tag_length(session_keys->rtp_auth);
/*
* find starting point for encryption and length of data to be
* encrypted - the encrypted portion starts after the rtp header
* extension, if present; otherwise, it starts after the last csrc,
* if any are present
*
* if we're not providing confidentiality, set enc_start to NULL
*/
if (stream->rtp_services & sec_serv_conf) {
enc_start = (uint32_t *)hdr + uint32s_in_rtp_header + hdr->cc;
if (hdr->x == 1) {
xtn_hdr = (srtp_hdr_xtnd_t *)enc_start;
enc_start += (ntohs(xtn_hdr->length) + 1);
}
/* note: the passed size is without the auth tag */
if (!((uint8_t *)enc_start <= (uint8_t *)hdr + *pkt_octet_len))
return srtp_err_status_parse_err;
enc_octet_len =
(int)(*pkt_octet_len - ((uint8_t *)enc_start - (uint8_t *)hdr));
if (enc_octet_len < 0)
return srtp_err_status_parse_err;
} else {
enc_start = NULL;
}
mki_location = (uint8_t *)hdr + *pkt_octet_len;
mki_size = srtp_inject_mki(mki_location, session_keys, use_mki);
/*
* if we're providing authentication, set the auth_start and auth_tag
* pointers to the proper locations; otherwise, set auth_start to NULL
* to indicate that no authentication is needed
*/
if (stream->rtp_services & sec_serv_auth) {
auth_start = (uint32_t *)hdr;
auth_tag = (uint8_t *)hdr + *pkt_octet_len + mki_size;
} else {
auth_start = NULL;
auth_tag = NULL;
}
/*
* estimate the packet index using the start of the replay window
* and the sequence number from the header
*/
status = srtp_get_est_pkt_index(hdr, stream, &est, &delta);
if (status && (status != srtp_err_status_pkt_idx_adv))
return status;
if (status == srtp_err_status_pkt_idx_adv)
advance_packet_index = 1;
if (advance_packet_index) {
srtp_rdbx_set_roc_seq(&stream->rtp_rdbx, (uint32_t)(est >> 16),
(uint16_t)(est & 0xFFFF));
stream->pending_roc = 0;
srtp_rdbx_add_index(&stream->rtp_rdbx, 0);
} else {
status = srtp_rdbx_check(&stream->rtp_rdbx, delta);
if (status) {
if (status != srtp_err_status_replay_fail ||
!stream->allow_repeat_tx)
return status; /* we've been asked to reuse an index */
}
srtp_rdbx_add_index(&stream->rtp_rdbx, delta);
}
#ifdef NO_64BIT_MATH
debug_print2(mod_srtp, "estimated packet index: %08x%08x", high32(est),
low32(est));
#else
debug_print(mod_srtp, "estimated packet index: %016" PRIx64, est);
#endif
/*
* if we're using rindael counter mode, set nonce and seq
*/
if (session_keys->rtp_cipher->type->id == SRTP_AES_ICM_128 ||
session_keys->rtp_cipher->type->id == SRTP_AES_ICM_192 ||
session_keys->rtp_cipher->type->id == SRTP_AES_ICM_256) {
v128_t iv;
iv.v32[0] = 0;
iv.v32[1] = hdr->ssrc;
#ifdef NO_64BIT_MATH
iv.v64[1] = be64_to_cpu(
make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16));
#else
iv.v64[1] = be64_to_cpu(est << 16);
#endif
status = srtp_cipher_set_iv(session_keys->rtp_cipher, (uint8_t *)&iv,
srtp_direction_encrypt);
if (!status && session_keys->rtp_xtn_hdr_cipher) {
status = srtp_cipher_set_iv(session_keys->rtp_xtn_hdr_cipher,
(uint8_t *)&iv, srtp_direction_encrypt);
}
} else {
v128_t iv;
/* otherwise, set the index to est */
#ifdef NO_64BIT_MATH
iv.v32[0] = 0;
iv.v32[1] = 0;
#else
iv.v64[0] = 0;
#endif
iv.v64[1] = be64_to_cpu(est);
status = srtp_cipher_set_iv(session_keys->rtp_cipher, (uint8_t *)&iv,
srtp_direction_encrypt);
if (!status && session_keys->rtp_xtn_hdr_cipher) {
status = srtp_cipher_set_iv(session_keys->rtp_xtn_hdr_cipher,
(uint8_t *)&iv, srtp_direction_encrypt);
}
}
if (status)
return srtp_err_status_cipher_fail;
/* shift est, put into network byte order */
#ifdef NO_64BIT_MATH
est = be64_to_cpu(
make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16));
#else
est = be64_to_cpu(est << 16);
#endif
/*
* if we're authenticating using a universal hash, put the keystream
* prefix into the authentication tag
*/
if (auth_start) {
prefix_len = srtp_auth_get_prefix_length(session_keys->rtp_auth);
if (prefix_len) {
status = srtp_cipher_output(session_keys->rtp_cipher, auth_tag,
&prefix_len);
if (status)
return srtp_err_status_cipher_fail;
debug_print(mod_srtp, "keystream prefix: %s",
srtp_octet_string_hex_string(auth_tag, prefix_len));
}
}
if (xtn_hdr && session_keys->rtp_xtn_hdr_cipher) {
/*
* extensions header encryption RFC 6904
*/
status = srtp_process_header_encryption(stream, xtn_hdr, session_keys);
if (status) {
return status;
}
}
/* if we're encrypting, exor keystream into the message */
if (enc_start) {
status =
srtp_cipher_encrypt(session_keys->rtp_cipher, (uint8_t *)enc_start,
(unsigned int *)&enc_octet_len);
if (status)
return srtp_err_status_cipher_fail;
}
/*
* if we're authenticating, run authentication function and put result
* into the auth_tag
*/
if (auth_start) {
/* initialize auth func context */
status = srtp_auth_start(session_keys->rtp_auth);
if (status)
return status;
/* run auth func over packet */
status = srtp_auth_update(session_keys->rtp_auth, (uint8_t *)auth_start,
*pkt_octet_len);
if (status)
return status;
/* run auth func over ROC, put result into auth_tag */
debug_print(mod_srtp, "estimated packet index: %016" PRIx64, est);
status = srtp_auth_compute(session_keys->rtp_auth, (uint8_t *)&est, 4,
auth_tag);
debug_print(mod_srtp, "srtp auth tag: %s",
srtp_octet_string_hex_string(auth_tag, tag_len));
if (status)
return srtp_err_status_auth_fail;
}
if (auth_tag) {
/* increase the packet length by the length of the auth tag */
*pkt_octet_len += tag_len;
}
if (use_mki) {
/* increate the packet length by the mki size */
*pkt_octet_len += mki_size;
}
return srtp_err_status_ok;
}
srtp_err_status_t srtp_unprotect(srtp_ctx_t *ctx,
void *srtp_hdr,
int *pkt_octet_len)
{
return srtp_unprotect_mki(ctx, srtp_hdr, pkt_octet_len, 0);
}
srtp_err_status_t srtp_unprotect_mki(srtp_ctx_t *ctx,
void *srtp_hdr,
int *pkt_octet_len,
unsigned int use_mki)
{
srtp_hdr_t *hdr = (srtp_hdr_t *)srtp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
uint32_t *auth_start; /* pointer to start of auth. portion */
unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */
uint8_t *auth_tag = NULL; /* location of auth_tag within packet */
srtp_xtd_seq_num_t est; /* estimated xtd_seq_num_t of *hdr */
int delta; /* delta of local pkt idx and that in hdr */
v128_t iv;
srtp_err_status_t status;
srtp_stream_ctx_t *stream;
uint8_t tmp_tag[SRTP_MAX_TAG_LEN];
uint32_t tag_len, prefix_len;
srtp_hdr_xtnd_t *xtn_hdr = NULL;
unsigned int mki_size = 0;
srtp_session_keys_t *session_keys = NULL;
int advance_packet_index = 0;
uint32_t roc_to_set = 0;
uint16_t seq_to_set = 0;
debug_print0(mod_srtp, "function srtp_unprotect");
/* we assume the hdr is 32-bit aligned to start */
/* Verify RTP header */
status = srtp_validate_rtp_header(srtp_hdr, pkt_octet_len);
if (status)
return status;
/* check the packet length - it must at least contain a full header */
if (*pkt_octet_len < octets_in_rtp_header)
return srtp_err_status_bad_param;
/*
* look up ssrc in srtp_stream list, and process the packet with
* the appropriate stream. if we haven't seen this stream before,
* there's only one key for this srtp_session, and the cipher
* supports key-sharing, then we assume that a new stream using
* that key has just started up
*/
stream = srtp_get_stream(ctx, hdr->ssrc);
if (stream == NULL) {
if (ctx->stream_template != NULL) {
stream = ctx->stream_template;
debug_print(mod_srtp, "using provisional stream (SSRC: 0x%08x)",
ntohl(hdr->ssrc));
/*
* set estimated packet index to sequence number from header,
* and set delta equal to the same value
*/
#ifdef NO_64BIT_MATH
est = (srtp_xtd_seq_num_t)make64(0, ntohs(hdr->seq));
delta = low32(est);
#else
est = (srtp_xtd_seq_num_t)ntohs(hdr->seq);
delta = (int)est;
#endif
} else {
/*
* no stream corresponding to SSRC found, and we don't do
* key-sharing, so return an error
*/
return srtp_err_status_no_ctx;
}
} else {
status = srtp_get_est_pkt_index(hdr, stream, &est, &delta);
if (status && (status != srtp_err_status_pkt_idx_adv))
return status;
if (status == srtp_err_status_pkt_idx_adv) {
advance_packet_index = 1;
roc_to_set = (uint32_t)(est >> 16);
seq_to_set = (uint16_t)(est & 0xFFFF);
}
/* check replay database */
if (!advance_packet_index) {
status = srtp_rdbx_check(&stream->rtp_rdbx, delta);
if (status)
return status;
}
}
#ifdef NO_64BIT_MATH
debug_print2(mod_srtp, "estimated u_packet index: %08x%08x", high32(est),
low32(est));
#else
debug_print(mod_srtp, "estimated u_packet index: %016" PRIx64, est);
#endif
/* Determine if MKI is being used and what session keys should be used */
if (use_mki) {
session_keys = srtp_get_session_keys(
stream, (uint8_t *)hdr, (const unsigned int *)pkt_octet_len,
&mki_size);
if (session_keys == NULL)
return srtp_err_status_bad_mki;
} else {
session_keys = &stream->session_keys[0];
}
/*
* Check if this is an AEAD stream (GCM mode). If so, then dispatch
* the request to our AEAD handler.
*/
if (session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_128 ||
session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_256) {
return srtp_unprotect_aead(ctx, stream, delta, est, srtp_hdr,
(unsigned int *)pkt_octet_len, session_keys,
mki_size);
}
/* get tag length from stream */
tag_len = srtp_auth_get_tag_length(session_keys->rtp_auth);
/*
* set the cipher's IV properly, depending on whatever cipher we
* happen to be using
*/
if (session_keys->rtp_cipher->type->id == SRTP_AES_ICM_128 ||
session_keys->rtp_cipher->type->id == SRTP_AES_ICM_192 ||
session_keys->rtp_cipher->type->id == SRTP_AES_ICM_256) {
/* aes counter mode */
iv.v32[0] = 0;
iv.v32[1] = hdr->ssrc; /* still in network order */
#ifdef NO_64BIT_MATH
iv.v64[1] = be64_to_cpu(
make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16));
#else
iv.v64[1] = be64_to_cpu(est << 16);
#endif
status = srtp_cipher_set_iv(session_keys->rtp_cipher, (uint8_t *)&iv,
srtp_direction_decrypt);
if (!status && session_keys->rtp_xtn_hdr_cipher) {
status = srtp_cipher_set_iv(session_keys->rtp_xtn_hdr_cipher,
(uint8_t *)&iv, srtp_direction_decrypt);
}
} else {
/* no particular format - set the iv to the pakcet index */
#ifdef NO_64BIT_MATH
iv.v32[0] = 0;
iv.v32[1] = 0;
#else
iv.v64[0] = 0;
#endif
iv.v64[1] = be64_to_cpu(est);
status = srtp_cipher_set_iv(session_keys->rtp_cipher, (uint8_t *)&iv,
srtp_direction_decrypt);
if (!status && session_keys->rtp_xtn_hdr_cipher) {
status = srtp_cipher_set_iv(session_keys->rtp_xtn_hdr_cipher,
(uint8_t *)&iv, srtp_direction_decrypt);
}
}
if (status)
return srtp_err_status_cipher_fail;
/* shift est, put into network byte order */
#ifdef NO_64BIT_MATH
est = be64_to_cpu(
make64((high32(est) << 16) | (low32(est) >> 16), low32(est) << 16));
#else
est = be64_to_cpu(est << 16);
#endif
/*
* find starting point for decryption and length of data to be
* decrypted - the encrypted portion starts after the rtp header
* extension, if present; otherwise, it starts after the last csrc,
* if any are present
*
* if we're not providing confidentiality, set enc_start to NULL
*/
if (stream->rtp_services & sec_serv_conf) {
enc_start = (uint32_t *)hdr + uint32s_in_rtp_header + hdr->cc;
if (hdr->x == 1) {
xtn_hdr = (srtp_hdr_xtnd_t *)enc_start;
enc_start += (ntohs(xtn_hdr->length) + 1);
}
if (!((uint8_t *)enc_start <=
(uint8_t *)hdr + (*pkt_octet_len - tag_len - mki_size)))
return srtp_err_status_parse_err;
enc_octet_len = (uint32_t)(*pkt_octet_len - tag_len - mki_size -
((uint8_t *)enc_start - (uint8_t *)hdr));
} else {
enc_start = NULL;
}
/*
* if we're providing authentication, set the auth_start and auth_tag
* pointers to the proper locations; otherwise, set auth_start to NULL
* to indicate that no authentication is needed
*/
if (stream->rtp_services & sec_serv_auth) {
auth_start = (uint32_t *)hdr;
auth_tag = (uint8_t *)hdr + *pkt_octet_len - tag_len;
} else {
auth_start = NULL;
auth_tag = NULL;
}
/*
* if we expect message authentication, run the authentication
* function and compare the result with the value of the auth_tag
*/
if (auth_start) {
/*
* if we're using a universal hash, then we need to compute the
* keystream prefix for encrypting the universal hash output
*
* if the keystream prefix length is zero, then we know that
* the authenticator isn't using a universal hash function
*/
if (session_keys->rtp_auth->prefix_len != 0) {
prefix_len = srtp_auth_get_prefix_length(session_keys->rtp_auth);
status = srtp_cipher_output(session_keys->rtp_cipher, tmp_tag,
&prefix_len);
debug_print(mod_srtp, "keystream prefix: %s",
srtp_octet_string_hex_string(tmp_tag, prefix_len));
if (status)
return srtp_err_status_cipher_fail;
}
/* initialize auth func context */
status = srtp_auth_start(session_keys->rtp_auth);
if (status)
return status;
/* now compute auth function over packet */
status = srtp_auth_update(session_keys->rtp_auth, (uint8_t *)auth_start,
*pkt_octet_len - tag_len - mki_size);
if (status)
return status;
/* run auth func over ROC, then write tmp tag */
status = srtp_auth_compute(session_keys->rtp_auth, (uint8_t *)&est, 4,
tmp_tag);
debug_print(mod_srtp, "computed auth tag: %s",
srtp_octet_string_hex_string(tmp_tag, tag_len));
debug_print(mod_srtp, "packet auth tag: %s",
srtp_octet_string_hex_string(auth_tag, tag_len));
if (status)
return srtp_err_status_auth_fail;
if (srtp_octet_string_is_eq(tmp_tag, auth_tag, tag_len))
return srtp_err_status_auth_fail;
}
/*
* update the key usage limit, and check it to make sure that we
* didn't just hit either the soft limit or the hard limit, and call
* the event handler if we hit either.
*/
switch (srtp_key_limit_update(session_keys->limit)) {
case srtp_key_event_normal:
break;
case srtp_key_event_soft_limit:
srtp_handle_event(ctx, stream, event_key_soft_limit);
break;
case srtp_key_event_hard_limit:
srtp_handle_event(ctx, stream, event_key_hard_limit);
return srtp_err_status_key_expired;
default:
break;
}
if (xtn_hdr && session_keys->rtp_xtn_hdr_cipher) {
/* extensions header encryption RFC 6904 */
status = srtp_process_header_encryption(stream, xtn_hdr, session_keys);
if (status) {
return status;
}
}
/* if we're decrypting, add keystream into ciphertext */
if (enc_start) {
status = srtp_cipher_decrypt(session_keys->rtp_cipher,
(uint8_t *)enc_start, &enc_octet_len);
if (status)
return srtp_err_status_cipher_fail;
}
/*
* verify that stream is for received traffic - this check will
* detect SSRC collisions, since a stream that appears in both
* srtp_protect() and srtp_unprotect() will fail this test in one of
* those functions.
*
* we do this check *after* the authentication check, so that the
* latter check will catch any attempts to fool us into thinking
* that we've got a collision
*/
if (stream->direction != dir_srtp_receiver) {
if (stream->direction == dir_unknown) {
stream->direction = dir_srtp_receiver;
} else {
srtp_handle_event(ctx, stream, event_ssrc_collision);
}
}
/*
* if the stream is a 'provisional' one, in which the template context
* is used, then we need to allocate a new stream at this point, since
* the authentication passed
*/
if (stream == ctx->stream_template) {
srtp_stream_ctx_t *new_stream;
/*
* allocate and initialize a new stream
*
* note that we indicate failure if we can't allocate the new
* stream, and some implementations will want to not return
* failure here
*/
status =
srtp_stream_clone(ctx->stream_template, hdr->ssrc, &new_stream);
if (status)
return status;
/* add new stream to the head of the stream_list */
new_stream->next = ctx->stream_list;
ctx->stream_list = new_stream;
/* set stream (the pointer used in this function) */
stream = new_stream;
}
/*
* the message authentication function passed, so add the packet
* index into the replay database
*/
if (advance_packet_index) {
srtp_rdbx_set_roc_seq(&stream->rtp_rdbx, roc_to_set, seq_to_set);
stream->pending_roc = 0;
srtp_rdbx_add_index(&stream->rtp_rdbx, 0);
} else {
srtp_rdbx_add_index(&stream->rtp_rdbx, delta);
}
/* decrease the packet length by the length of the auth tag */
*pkt_octet_len -= tag_len;
/* decrease the packet length by the mki size */
*pkt_octet_len -= mki_size;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_init()
{
srtp_err_status_t status;
/* initialize crypto kernel */
status = srtp_crypto_kernel_init();
if (status)
return status;
/* load srtp debug module into the kernel */
status = srtp_crypto_kernel_load_debug_module(&mod_srtp);
if (status)
return status;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_shutdown()
{
srtp_err_status_t status;
/* shut down crypto kernel */
status = srtp_crypto_kernel_shutdown();
if (status)
return status;
/* shutting down crypto kernel frees the srtp debug module as well */
return srtp_err_status_ok;
}
/*
* The following code is under consideration for removal. See
* SRTP_MAX_TRAILER_LEN
*/
#if 0
/*
* srtp_get_trailer_length(&a) returns the number of octets that will
* be added to an RTP packet by the SRTP processing. This value
* is constant for a given srtp_stream_t (i.e. between initializations).
*/
int
srtp_get_trailer_length(const srtp_stream_t s) {
return srtp_auth_get_tag_length(s->rtp_auth);
}
#endif
/*
* srtp_get_stream(ssrc) returns a pointer to the stream corresponding
* to ssrc, or NULL if no stream exists for that ssrc
*
* this is an internal function
*/
srtp_stream_ctx_t *srtp_get_stream(srtp_t srtp, uint32_t ssrc)
{
srtp_stream_ctx_t *stream;
/* walk down list until ssrc is found */
stream = srtp->stream_list;
while (stream != NULL) {
if (stream->ssrc == ssrc)
return stream;
stream = stream->next;
}
/* we haven't found our ssrc, so return a null */
return NULL;
}
srtp_err_status_t srtp_dealloc(srtp_t session)
{
srtp_stream_ctx_t *stream;
srtp_err_status_t status;
/*
* we take a conservative deallocation strategy - if we encounter an
* error deallocating a stream, then we stop trying to deallocate
* memory and just return an error
*/
/* walk list of streams, deallocating as we go */
stream = session->stream_list;
while (stream != NULL) {
srtp_stream_t next = stream->next;
status = srtp_stream_dealloc(stream, session->stream_template);
if (status)
return status;
stream = next;
}
/* deallocate stream template, if there is one */
if (session->stream_template != NULL) {
status = srtp_stream_dealloc(session->stream_template, NULL);
if (status)
return status;
}
/* deallocate session context */
srtp_crypto_free(session);
return srtp_err_status_ok;
}
srtp_err_status_t srtp_add_stream(srtp_t session, const srtp_policy_t *policy)
{
srtp_err_status_t status;
srtp_stream_t tmp;
/* sanity check arguments */
if ((session == NULL) || (policy == NULL) ||
(!srtp_validate_policy_master_keys(policy)))
return srtp_err_status_bad_param;
/* allocate stream */
status = srtp_stream_alloc(&tmp, policy);
if (status) {
return status;
}
/* initialize stream */
status = srtp_stream_init(tmp, policy);
if (status) {
srtp_stream_dealloc(tmp, NULL);
return status;
}
/*
* set the head of the stream list or the template to point to the
* stream that we've just alloced and init'ed, depending on whether
* or not it has a wildcard SSRC value or not
*
* if the template stream has already been set, then the policy is
* inconsistent, so we return a bad_param error code
*/
switch (policy->ssrc.type) {
case (ssrc_any_outbound):
if (session->stream_template) {
srtp_stream_dealloc(tmp, NULL);
return srtp_err_status_bad_param;
}
session->stream_template = tmp;
session->stream_template->direction = dir_srtp_sender;
break;
case (ssrc_any_inbound):
if (session->stream_template) {
srtp_stream_dealloc(tmp, NULL);
return srtp_err_status_bad_param;
}
session->stream_template = tmp;
session->stream_template->direction = dir_srtp_receiver;
break;
case (ssrc_specific):
tmp->next = session->stream_list;
session->stream_list = tmp;
break;
case (ssrc_undefined):
default:
srtp_stream_dealloc(tmp, NULL);
return srtp_err_status_bad_param;
}
return srtp_err_status_ok;
}
srtp_err_status_t srtp_create(srtp_t *session, /* handle for session */
const srtp_policy_t *policy)
{ /* SRTP policy (list) */
srtp_err_status_t stat;
srtp_ctx_t *ctx;
/* sanity check arguments */
if (session == NULL)
return srtp_err_status_bad_param;
/* allocate srtp context and set ctx_ptr */
ctx = (srtp_ctx_t *)srtp_crypto_alloc(sizeof(srtp_ctx_t));
if (ctx == NULL)
return srtp_err_status_alloc_fail;
*session = ctx;
/*
* loop over elements in the policy list, allocating and
* initializing a stream for each element
*/
ctx->stream_template = NULL;
ctx->stream_list = NULL;
ctx->user_data = NULL;
while (policy != NULL) {
stat = srtp_add_stream(ctx, policy);
if (stat) {
/* clean up everything */
srtp_dealloc(*session);
*session = NULL;
return stat;
}
/* set policy to next item in list */
policy = policy->next;
}
return srtp_err_status_ok;
}
srtp_err_status_t srtp_remove_stream(srtp_t session, uint32_t ssrc)
{
srtp_stream_ctx_t *stream, *last_stream;
srtp_err_status_t status;
/* sanity check arguments */
if (session == NULL)
return srtp_err_status_bad_param;
/* find stream in list; complain if not found */
last_stream = stream = session->stream_list;
while ((stream != NULL) && (ssrc != stream->ssrc)) {
last_stream = stream;
stream = stream->next;
}
if (stream == NULL)
return srtp_err_status_no_ctx;
/* remove stream from the list */
if (last_stream == stream)
/* stream was first in list */
session->stream_list = stream->next;
else
last_stream->next = stream->next;
/* deallocate the stream */
status = srtp_stream_dealloc(stream, session->stream_template);
if (status)
return status;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_update(srtp_t session, const srtp_policy_t *policy)
{
srtp_err_status_t stat;
/* sanity check arguments */
if ((session == NULL) || (policy == NULL) ||
(!srtp_validate_policy_master_keys(policy))) {
return srtp_err_status_bad_param;
}
while (policy != NULL) {
stat = srtp_update_stream(session, policy);
if (stat) {
return stat;
}
/* set policy to next item in list */
policy = policy->next;
}
return srtp_err_status_ok;
}
static srtp_err_status_t update_template_streams(srtp_t session,
const srtp_policy_t *policy)
{
srtp_err_status_t status;
srtp_stream_t new_stream_template;
srtp_stream_t new_stream_list = NULL;
if (session->stream_template == NULL) {
return srtp_err_status_bad_param;
}
/* allocate new template stream */
status = srtp_stream_alloc(&new_stream_template, policy);
if (status) {
return status;
}
/* initialize new template stream */
status = srtp_stream_init(new_stream_template, policy);
if (status) {
srtp_crypto_free(new_stream_template);
return status;
}
/* for all old templated streams */
for (;;) {
srtp_stream_t stream;
uint32_t ssrc;
srtp_xtd_seq_num_t old_index;
srtp_rdb_t old_rtcp_rdb;
stream = session->stream_list;
while ((stream != NULL) &&
(stream->session_keys[0].rtp_auth !=
session->stream_template->session_keys[0].rtp_auth)) {
stream = stream->next;
}
if (stream == NULL) {
/* no more templated streams */
break;
}
/* save old extendard seq */
ssrc = stream->ssrc;
old_index = stream->rtp_rdbx.index;
old_rtcp_rdb = stream->rtcp_rdb;
/* remove stream */
status = srtp_remove_stream(session, ssrc);
if (status) {
/* free new allocations */
while (new_stream_list != NULL) {
srtp_stream_t next = new_stream_list->next;
srtp_stream_dealloc(new_stream_list, new_stream_template);
new_stream_list = next;
}
srtp_stream_dealloc(new_stream_template, NULL);
return status;
}
/* allocate and initialize a new stream */
status = srtp_stream_clone(new_stream_template, ssrc, &stream);
if (status) {
/* free new allocations */
while (new_stream_list != NULL) {
srtp_stream_t next = new_stream_list->next;
srtp_stream_dealloc(new_stream_list, new_stream_template);
new_stream_list = next;
}
srtp_stream_dealloc(new_stream_template, NULL);
return status;
}
/* add new stream to the head of the new_stream_list */
stream->next = new_stream_list;
new_stream_list = stream;
/* restore old extended seq */
stream->rtp_rdbx.index = old_index;
stream->rtcp_rdb = old_rtcp_rdb;
}
/* dealloc old template */
srtp_stream_dealloc(session->stream_template, NULL);
/* set new template */
session->stream_template = new_stream_template;
/* add new list */
if (new_stream_list) {
srtp_stream_t tail = new_stream_list;
while (tail->next) {
tail = tail->next;
}
tail->next = session->stream_list;
session->stream_list = new_stream_list;
}
return status;
}
static srtp_err_status_t update_stream(srtp_t session,
const srtp_policy_t *policy)
{
srtp_err_status_t status;
srtp_xtd_seq_num_t old_index;
srtp_rdb_t old_rtcp_rdb;
srtp_stream_t stream;
stream = srtp_get_stream(session, htonl(policy->ssrc.value));
if (stream == NULL) {
return srtp_err_status_bad_param;
}
/* save old extendard seq */
old_index = stream->rtp_rdbx.index;
old_rtcp_rdb = stream->rtcp_rdb;
status = srtp_remove_stream(session, htonl(policy->ssrc.value));
if (status) {
return status;
}
status = srtp_add_stream(session, policy);
if (status) {
return status;
}
stream = srtp_get_stream(session, htonl(policy->ssrc.value));
if (stream == NULL) {
return srtp_err_status_fail;
}
/* restore old extended seq */
stream->rtp_rdbx.index = old_index;
stream->rtcp_rdb = old_rtcp_rdb;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_update_stream(srtp_t session,
const srtp_policy_t *policy)
{
srtp_err_status_t status;
/* sanity check arguments */
if ((session == NULL) || (policy == NULL) ||
(!srtp_validate_policy_master_keys(policy)))
return srtp_err_status_bad_param;
switch (policy->ssrc.type) {
case (ssrc_any_outbound):
case (ssrc_any_inbound):
status = update_template_streams(session, policy);
break;
case (ssrc_specific):
status = update_stream(session, policy);
break;
case (ssrc_undefined):
default:
return srtp_err_status_bad_param;
}
return status;
}
/*
* The default policy - provides a convenient way for callers to use
* the default security policy
*
* The default policy is defined in RFC 3711
* (Section 5. Default and mandatory-to-implement Transforms)
*
*/
/*
* NOTE: cipher_key_len is really key len (128 bits) plus salt len
* (112 bits)
*/
/* There are hard-coded 16's for base_key_len in the key generation code */
void srtp_crypto_policy_set_rtp_default(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_ICM_128;
p->cipher_key_len =
SRTP_AES_ICM_128_KEY_LEN_WSALT; /* default 128 bits per RFC 3711 */
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* default 160 bits per RFC 3711 */
p->auth_tag_len = 10; /* default 80 bits per RFC 3711 */
p->sec_serv = sec_serv_conf_and_auth;
}
void srtp_crypto_policy_set_rtcp_default(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_ICM_128;
p->cipher_key_len =
SRTP_AES_ICM_128_KEY_LEN_WSALT; /* default 128 bits per RFC 3711 */
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* default 160 bits per RFC 3711 */
p->auth_tag_len = 10; /* default 80 bits per RFC 3711 */
p->sec_serv = sec_serv_conf_and_auth;
}
void srtp_crypto_policy_set_aes_cm_128_hmac_sha1_32(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 4568
*
* note that this crypto policy is intended for SRTP, but not SRTCP
*/
p->cipher_type = SRTP_AES_ICM_128;
p->cipher_key_len =
SRTP_AES_ICM_128_KEY_LEN_WSALT; /* 128 bit key, 112 bit salt */
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* 160 bit key */
p->auth_tag_len = 4; /* 32 bit tag */
p->sec_serv = sec_serv_conf_and_auth;
}
void srtp_crypto_policy_set_aes_cm_128_null_auth(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 4568
*
* note that this crypto policy is intended for SRTP, but not SRTCP
*/
p->cipher_type = SRTP_AES_ICM_128;
p->cipher_key_len =
SRTP_AES_ICM_128_KEY_LEN_WSALT; /* 128 bit key, 112 bit salt */
p->auth_type = SRTP_NULL_AUTH;
p->auth_key_len = 0;
p->auth_tag_len = 0;
p->sec_serv = sec_serv_conf;
}
void srtp_crypto_policy_set_null_cipher_hmac_sha1_80(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 4568
*/
p->cipher_type = SRTP_NULL_CIPHER;
p->cipher_key_len = 0;
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20;
p->auth_tag_len = 10;
p->sec_serv = sec_serv_auth;
}
void srtp_crypto_policy_set_null_cipher_hmac_null(srtp_crypto_policy_t *p)
{
/*
* Should only be used for testing
*/
p->cipher_type = SRTP_NULL_CIPHER;
p->cipher_key_len = 0;
p->auth_type = SRTP_NULL_AUTH;
p->auth_key_len = 0;
p->auth_tag_len = 0;
p->sec_serv = sec_serv_none;
}
void srtp_crypto_policy_set_aes_cm_256_hmac_sha1_80(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 6188
*/
p->cipher_type = SRTP_AES_ICM_256;
p->cipher_key_len = SRTP_AES_ICM_256_KEY_LEN_WSALT;
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* default 160 bits per RFC 3711 */
p->auth_tag_len = 10; /* default 80 bits per RFC 3711 */
p->sec_serv = sec_serv_conf_and_auth;
}
void srtp_crypto_policy_set_aes_cm_256_hmac_sha1_32(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 6188
*
* note that this crypto policy is intended for SRTP, but not SRTCP
*/
p->cipher_type = SRTP_AES_ICM_256;
p->cipher_key_len = SRTP_AES_ICM_256_KEY_LEN_WSALT;
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* default 160 bits per RFC 3711 */
p->auth_tag_len = 4; /* default 80 bits per RFC 3711 */
p->sec_serv = sec_serv_conf_and_auth;
}
/*
* AES-256 with no authentication.
*/
void srtp_crypto_policy_set_aes_cm_256_null_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_ICM_256;
p->cipher_key_len = SRTP_AES_ICM_256_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH;
p->auth_key_len = 0;
p->auth_tag_len = 0;
p->sec_serv = sec_serv_conf;
}
void srtp_crypto_policy_set_aes_cm_192_hmac_sha1_80(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 6188
*/
p->cipher_type = SRTP_AES_ICM_192;
p->cipher_key_len = SRTP_AES_ICM_192_KEY_LEN_WSALT;
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* default 160 bits per RFC 3711 */
p->auth_tag_len = 10; /* default 80 bits per RFC 3711 */
p->sec_serv = sec_serv_conf_and_auth;
}
void srtp_crypto_policy_set_aes_cm_192_hmac_sha1_32(srtp_crypto_policy_t *p)
{
/*
* corresponds to RFC 6188
*
* note that this crypto policy is intended for SRTP, but not SRTCP
*/
p->cipher_type = SRTP_AES_ICM_192;
p->cipher_key_len = SRTP_AES_ICM_192_KEY_LEN_WSALT;
p->auth_type = SRTP_HMAC_SHA1;
p->auth_key_len = 20; /* default 160 bits per RFC 3711 */
p->auth_tag_len = 4; /* default 80 bits per RFC 3711 */
p->sec_serv = sec_serv_conf_and_auth;
}
/*
* AES-192 with no authentication.
*/
void srtp_crypto_policy_set_aes_cm_192_null_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_ICM_192;
p->cipher_key_len = SRTP_AES_ICM_192_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH;
p->auth_key_len = 0;
p->auth_tag_len = 0;
p->sec_serv = sec_serv_conf;
}
/*
* AES-128 GCM mode with 8 octet auth tag.
*/
void srtp_crypto_policy_set_aes_gcm_128_8_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_GCM_128;
p->cipher_key_len = SRTP_AES_GCM_128_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH; /* GCM handles the auth for us */
p->auth_key_len = 0;
p->auth_tag_len = 8; /* 8 octet tag length */
p->sec_serv = sec_serv_conf_and_auth;
}
/*
* AES-256 GCM mode with 8 octet auth tag.
*/
void srtp_crypto_policy_set_aes_gcm_256_8_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_GCM_256;
p->cipher_key_len = SRTP_AES_GCM_256_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH; /* GCM handles the auth for us */
p->auth_key_len = 0;
p->auth_tag_len = 8; /* 8 octet tag length */
p->sec_serv = sec_serv_conf_and_auth;
}
/*
* AES-128 GCM mode with 8 octet auth tag, no RTCP encryption.
*/
void srtp_crypto_policy_set_aes_gcm_128_8_only_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_GCM_128;
p->cipher_key_len = SRTP_AES_GCM_128_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH; /* GCM handles the auth for us */
p->auth_key_len = 0;
p->auth_tag_len = 8; /* 8 octet tag length */
p->sec_serv = sec_serv_auth; /* This only applies to RTCP */
}
/*
* AES-256 GCM mode with 8 octet auth tag, no RTCP encryption.
*/
void srtp_crypto_policy_set_aes_gcm_256_8_only_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_GCM_256;
p->cipher_key_len = SRTP_AES_GCM_256_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH; /* GCM handles the auth for us */
p->auth_key_len = 0;
p->auth_tag_len = 8; /* 8 octet tag length */
p->sec_serv = sec_serv_auth; /* This only applies to RTCP */
}
/*
* AES-128 GCM mode with 16 octet auth tag.
*/
void srtp_crypto_policy_set_aes_gcm_128_16_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_GCM_128;
p->cipher_key_len = SRTP_AES_GCM_128_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH; /* GCM handles the auth for us */
p->auth_key_len = 0;
p->auth_tag_len = 16; /* 16 octet tag length */
p->sec_serv = sec_serv_conf_and_auth;
}
/*
* AES-256 GCM mode with 16 octet auth tag.
*/
void srtp_crypto_policy_set_aes_gcm_256_16_auth(srtp_crypto_policy_t *p)
{
p->cipher_type = SRTP_AES_GCM_256;
p->cipher_key_len = SRTP_AES_GCM_256_KEY_LEN_WSALT;
p->auth_type = SRTP_NULL_AUTH; /* GCM handles the auth for us */
p->auth_key_len = 0;
p->auth_tag_len = 16; /* 16 octet tag length */
p->sec_serv = sec_serv_conf_and_auth;
}
/*
* secure rtcp functions
*/
/*
* AEAD uses a new IV formation method. This function implements
* section 9.1 (SRTCP IV Formation for AES-GCM) from RFC7714.
* The calculation is defined as, where (+) is the xor operation:
*
* 0 1 2 3 4 5 6 7 8 9 10 11
* +--+--+--+--+--+--+--+--+--+--+--+--+
* |00|00| SSRC |00|00|0+SRTCP Idx|---+
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* |
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* | Encryption Salt |->(+)
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* |
* +--+--+--+--+--+--+--+--+--+--+--+--+ |
* | Initialization Vector |<--+
* +--+--+--+--+--+--+--+--+--+--+--+--+*
*
* Input: *session_keys - pointer to SRTP stream context session keys,
* used to retrieve the SALT
* *iv - Pointer to recieve the calculated IV
* seq_num - The SEQ value to use for the IV calculation.
* *hdr - The RTP header, used to get the SSRC value
*
* Returns: srtp_err_status_ok if no error or srtp_err_status_bad_param
* if seq_num is invalid
*
*/
static srtp_err_status_t srtp_calc_aead_iv_srtcp(
srtp_session_keys_t *session_keys,
v128_t *iv,
uint32_t seq_num,
srtcp_hdr_t *hdr)
{
v128_t in;
v128_t salt;
memset(&in, 0, sizeof(v128_t));
memset(&salt, 0, sizeof(v128_t));
in.v16[0] = 0;
memcpy(&in.v16[1], &hdr->ssrc, 4); /* still in network order! */
in.v16[3] = 0;
/*
* The SRTCP index (seq_num) spans bits 0 through 30 inclusive.
* The most significant bit should be zero.
*/
if (seq_num & 0x80000000UL) {
return srtp_err_status_bad_param;
}
in.v32[2] = htonl(seq_num);
debug_print(mod_srtp, "Pre-salted RTCP IV = %s\n", v128_hex_string(&in));
/*
* Get the SALT value from the context
*/
memcpy(salt.v8, session_keys->c_salt, 12);
debug_print(mod_srtp, "RTCP SALT = %s\n", v128_hex_string(&salt));
/*
* Finally, apply the SALT to the input
*/
v128_xor(iv, &in, &salt);
return srtp_err_status_ok;
}
/*
* This code handles AEAD ciphers for outgoing RTCP. We currently support
* AES-GCM mode with 128 or 256 bit keys.
*/
static srtp_err_status_t srtp_protect_rtcp_aead(
srtp_t ctx,
srtp_stream_ctx_t *stream,
void *rtcp_hdr,
unsigned int *pkt_octet_len,
srtp_session_keys_t *session_keys,
unsigned int use_mki)
{
srtcp_hdr_t *hdr = (srtcp_hdr_t *)rtcp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
uint32_t *trailer_p; /* pointer to start of trailer */
uint32_t trailer; /* trailer value */
unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */
uint8_t *auth_tag = NULL; /* location of auth_tag within packet */
srtp_err_status_t status;
uint32_t tag_len;
uint32_t seq_num;
v128_t iv;
uint32_t tseq;
unsigned int mki_size = 0;
/* get tag length from stream context */
tag_len = srtp_auth_get_tag_length(session_keys->rtcp_auth);
/*
* set encryption start and encryption length - if we're not
* providing confidentiality, set enc_start to NULL
*/
enc_start = (uint32_t *)hdr + uint32s_in_rtcp_header;
enc_octet_len = *pkt_octet_len - octets_in_rtcp_header;
/* NOTE: hdr->length is not usable - it refers to only the first
* RTCP report in the compound packet!
*/
trailer_p = (uint32_t *)((char *)enc_start + enc_octet_len + tag_len);
if (stream->rtcp_services & sec_serv_conf) {
trailer = htonl(SRTCP_E_BIT); /* set encrypt bit */
} else {
enc_start = NULL;
enc_octet_len = 0;
/* 0 is network-order independant */
trailer = 0x00000000; /* set encrypt bit */
}
mki_size = srtp_inject_mki((uint8_t *)hdr + *pkt_octet_len + tag_len +
sizeof(srtcp_trailer_t),
session_keys, use_mki);
/*
* set the auth_tag pointer to the proper location, which is after
* the payload, but before the trailer
* (note that srtpc *always* provides authentication, unlike srtp)
*/
/* Note: This would need to change for optional mikey data */
auth_tag = (uint8_t *)hdr + *pkt_octet_len;
/*
* check sequence number for overruns, and copy it into the packet
* if its value isn't too big
*/
status = srtp_rdb_increment(&stream->rtcp_rdb);
if (status) {
return status;
}
seq_num = srtp_rdb_get_value(&stream->rtcp_rdb);
trailer |= htonl(seq_num);
debug_print(mod_srtp, "srtcp index: %x", seq_num);
memcpy(trailer_p, &trailer, sizeof(trailer));
/*
* Calculate and set the IV
*/
status = srtp_calc_aead_iv_srtcp(session_keys, &iv, seq_num, hdr);
if (status) {
return srtp_err_status_cipher_fail;
}
status = srtp_cipher_set_iv(session_keys->rtcp_cipher, (uint8_t *)&iv,
srtp_direction_encrypt);
if (status) {
return srtp_err_status_cipher_fail;
}
/*
* Set the AAD for GCM mode
*/
if (enc_start) {
/*
* If payload encryption is enabled, then the AAD consist of
* the RTCP header and the seq# at the end of the packet
*/
status = srtp_cipher_set_aad(session_keys->rtcp_cipher, (uint8_t *)hdr,
octets_in_rtcp_header);
if (status) {
return (srtp_err_status_cipher_fail);
}
} else {
/*
* Since payload encryption is not enabled, we must authenticate
* the entire packet as described in RFC 7714 (Section 9.3. Data
* Types in Unencrypted SRTCP Compound Packets)
*/
status = srtp_cipher_set_aad(session_keys->rtcp_cipher, (uint8_t *)hdr,
*pkt_octet_len);
if (status) {
return (srtp_err_status_cipher_fail);
}
}
/*
* Process the sequence# as AAD
*/
tseq = trailer;
status = srtp_cipher_set_aad(session_keys->rtcp_cipher, (uint8_t *)&tseq,
sizeof(srtcp_trailer_t));
if (status) {
return (srtp_err_status_cipher_fail);
}
/* if we're encrypting, exor keystream into the message */
if (enc_start) {
status = srtp_cipher_encrypt(session_keys->rtcp_cipher,
(uint8_t *)enc_start, &enc_octet_len);
if (status) {
return srtp_err_status_cipher_fail;
}
/*
* Get the tag and append that to the output
*/
status = srtp_cipher_get_tag(session_keys->rtcp_cipher,
(uint8_t *)auth_tag, &tag_len);
if (status) {
return (srtp_err_status_cipher_fail);
}
enc_octet_len += tag_len;
} else {
/*
* Even though we're not encrypting the payload, we need
* to run the cipher to get the auth tag.
*/
unsigned int nolen = 0;
status = srtp_cipher_encrypt(session_keys->rtcp_cipher, NULL, &nolen);
if (status) {
return srtp_err_status_cipher_fail;
}
/*
* Get the tag and append that to the output
*/
status = srtp_cipher_get_tag(session_keys->rtcp_cipher,
(uint8_t *)auth_tag, &tag_len);
if (status) {
return (srtp_err_status_cipher_fail);
}
enc_octet_len += tag_len;
}
/* increase the packet length by the length of the auth tag and seq_num*/
*pkt_octet_len += (tag_len + sizeof(srtcp_trailer_t));
/* increase the packet by the mki_size */
*pkt_octet_len += mki_size;
return srtp_err_status_ok;
}
/*
* This function handles incoming SRTCP packets while in AEAD mode,
* which currently supports AES-GCM encryption. Note, the auth tag is
* at the end of the packet stream and is automatically checked by GCM
* when decrypting the payload.
*/
static srtp_err_status_t srtp_unprotect_rtcp_aead(
srtp_t ctx,
srtp_stream_ctx_t *stream,
void *srtcp_hdr,
unsigned int *pkt_octet_len,
srtp_session_keys_t *session_keys,
unsigned int use_mki)
{
srtcp_hdr_t *hdr = (srtcp_hdr_t *)srtcp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
uint32_t *trailer_p; /* pointer to start of trailer */
uint32_t trailer; /* trailer value */
unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */
uint8_t *auth_tag = NULL; /* location of auth_tag within packet */
srtp_err_status_t status;
int tag_len;
unsigned int tmp_len;
uint32_t seq_num;
v128_t iv;
uint32_t tseq;
unsigned int mki_size = 0;
/* get tag length from stream context */
tag_len = srtp_auth_get_tag_length(session_keys->rtcp_auth);
if (use_mki) {
mki_size = session_keys->mki_size;
}
/*
* set encryption start, encryption length, and trailer
*/
/* index & E (encryption) bit follow normal data. hdr->len is the number of
* words (32-bit) in the normal packet minus 1
*/
/* This should point trailer to the word past the end of the normal data. */
/* This would need to be modified for optional mikey data */
trailer_p = (uint32_t *)((char *)hdr + *pkt_octet_len -
sizeof(srtcp_trailer_t) - mki_size);
memcpy(&trailer, trailer_p, sizeof(trailer));
/*
* We pass the tag down to the cipher when doing GCM mode
*/
enc_octet_len = *pkt_octet_len - (octets_in_rtcp_header +
sizeof(srtcp_trailer_t) + mki_size);
auth_tag = (uint8_t *)hdr + *pkt_octet_len - tag_len - mki_size -
sizeof(srtcp_trailer_t);
if (*((unsigned char *)trailer_p) & SRTCP_E_BYTE_BIT) {
enc_start = (uint32_t *)hdr + uint32s_in_rtcp_header;
} else {
enc_octet_len = 0;
enc_start = NULL; /* this indicates that there's no encryption */
}
/*
* check the sequence number for replays
*/
/* this is easier than dealing with bitfield access */
seq_num = ntohl(trailer) & SRTCP_INDEX_MASK;
debug_print(mod_srtp, "srtcp index: %x", seq_num);
status = srtp_rdb_check(&stream->rtcp_rdb, seq_num);
if (status) {
return status;
}
/*
* Calculate and set the IV
*/
status = srtp_calc_aead_iv_srtcp(session_keys, &iv, seq_num, hdr);
if (status) {
return srtp_err_status_cipher_fail;
}
status = srtp_cipher_set_iv(session_keys->rtcp_cipher, (uint8_t *)&iv,
srtp_direction_decrypt);
if (status) {
return srtp_err_status_cipher_fail;
}
/*
* Set the AAD for GCM mode
*/
if (enc_start) {
/*
* If payload encryption is enabled, then the AAD consist of
* the RTCP header and the seq# at the end of the packet
*/
status = srtp_cipher_set_aad(session_keys->rtcp_cipher, (uint8_t *)hdr,
octets_in_rtcp_header);
if (status) {
return (srtp_err_status_cipher_fail);
}
} else {
/*
* Since payload encryption is not enabled, we must authenticate
* the entire packet as described in RFC 7714 (Section 9.3. Data
* Types in Unencrypted SRTCP Compound Packets)
*/
status = srtp_cipher_set_aad(
session_keys->rtcp_cipher, (uint8_t *)hdr,
(*pkt_octet_len - tag_len - sizeof(srtcp_trailer_t) - mki_size));
if (status) {
return (srtp_err_status_cipher_fail);
}
}
/*
* Process the sequence# as AAD
*/
tseq = trailer;
status = srtp_cipher_set_aad(session_keys->rtcp_cipher, (uint8_t *)&tseq,
sizeof(srtcp_trailer_t));
if (status) {
return (srtp_err_status_cipher_fail);
}
/* if we're decrypting, exor keystream into the message */
if (enc_start) {
status = srtp_cipher_decrypt(session_keys->rtcp_cipher,
(uint8_t *)enc_start, &enc_octet_len);
if (status) {
return status;
}
} else {
/*
* Still need to run the cipher to check the tag
*/
tmp_len = tag_len;
status = srtp_cipher_decrypt(session_keys->rtcp_cipher,
(uint8_t *)auth_tag, &tmp_len);
if (status) {
return status;
}
}
/* decrease the packet length by the length of the auth tag and seq_num*/
*pkt_octet_len -= (tag_len + sizeof(srtcp_trailer_t) + mki_size);
/*
* verify that stream is for received traffic - this check will
* detect SSRC collisions, since a stream that appears in both
* srtp_protect() and srtp_unprotect() will fail this test in one of
* those functions.
*
* we do this check *after* the authentication check, so that the
* latter check will catch any attempts to fool us into thinking
* that we've got a collision
*/
if (stream->direction != dir_srtp_receiver) {
if (stream->direction == dir_unknown) {
stream->direction = dir_srtp_receiver;
} else {
srtp_handle_event(ctx, stream, event_ssrc_collision);
}
}
/*
* if the stream is a 'provisional' one, in which the template context
* is used, then we need to allocate a new stream at this point, since
* the authentication passed
*/
if (stream == ctx->stream_template) {
srtp_stream_ctx_t *new_stream;
/*
* allocate and initialize a new stream
*
* note that we indicate failure if we can't allocate the new
* stream, and some implementations will want to not return
* failure here
*/
status =
srtp_stream_clone(ctx->stream_template, hdr->ssrc, &new_stream);
if (status) {
return status;
}
/* add new stream to the head of the stream_list */
new_stream->next = ctx->stream_list;
ctx->stream_list = new_stream;
/* set stream (the pointer used in this function) */
stream = new_stream;
}
/* we've passed the authentication check, so add seq_num to the rdb */
srtp_rdb_add_index(&stream->rtcp_rdb, seq_num);
return srtp_err_status_ok;
}
srtp_err_status_t srtp_protect_rtcp(srtp_t ctx,
void *rtcp_hdr,
int *pkt_octet_len)
{
return srtp_protect_rtcp_mki(ctx, rtcp_hdr, pkt_octet_len, 0, 0);
}
srtp_err_status_t srtp_protect_rtcp_mki(srtp_t ctx,
void *rtcp_hdr,
int *pkt_octet_len,
unsigned int use_mki,
unsigned int mki_index)
{
srtcp_hdr_t *hdr = (srtcp_hdr_t *)rtcp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
uint32_t *auth_start; /* pointer to start of auth. portion */
uint32_t *trailer_p; /* pointer to start of trailer */
uint32_t trailer; /* trailer value */
unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */
uint8_t *auth_tag = NULL; /* location of auth_tag within packet */
srtp_err_status_t status;
int tag_len;
srtp_stream_ctx_t *stream;
uint32_t prefix_len;
uint32_t seq_num;
unsigned int mki_size = 0;
srtp_session_keys_t *session_keys = NULL;
/* we assume the hdr is 32-bit aligned to start */
/* check the packet length - it must at least contain a full header */
if (*pkt_octet_len < octets_in_rtcp_header)
return srtp_err_status_bad_param;
/*
* look up ssrc in srtp_stream list, and process the packet with
* the appropriate stream. if we haven't seen this stream before,
* there's only one key for this srtp_session, and the cipher
* supports key-sharing, then we assume that a new stream using
* that key has just started up
*/
stream = srtp_get_stream(ctx, hdr->ssrc);
if (stream == NULL) {
if (ctx->stream_template != NULL) {
srtp_stream_ctx_t *new_stream;
/* allocate and initialize a new stream */
status =
srtp_stream_clone(ctx->stream_template, hdr->ssrc, &new_stream);
if (status)
return status;
/* add new stream to the head of the stream_list */
new_stream->next = ctx->stream_list;
ctx->stream_list = new_stream;
/* set stream (the pointer used in this function) */
stream = new_stream;
} else {
/* no template stream, so we return an error */
return srtp_err_status_no_ctx;
}
}
/*
* verify that stream is for sending traffic - this check will
* detect SSRC collisions, since a stream that appears in both
* srtp_protect() and srtp_unprotect() will fail this test in one of
* those functions.
*/
if (stream->direction != dir_srtp_sender) {
if (stream->direction == dir_unknown) {
stream->direction = dir_srtp_sender;
} else {
srtp_handle_event(ctx, stream, event_ssrc_collision);
}
}
session_keys =
srtp_get_session_keys_with_mki_index(stream, use_mki, mki_index);
if (session_keys == NULL)
return srtp_err_status_bad_mki;
/*
* Check if this is an AEAD stream (GCM mode). If so, then dispatch
* the request to our AEAD handler.
*/
if (session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_128 ||
session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_256) {
return srtp_protect_rtcp_aead(ctx, stream, rtcp_hdr,
(unsigned int *)pkt_octet_len,
session_keys, use_mki);
}
/* get tag length from stream context */
tag_len = srtp_auth_get_tag_length(session_keys->rtcp_auth);
/*
* set encryption start and encryption length - if we're not
* providing confidentiality, set enc_start to NULL
*/
enc_start = (uint32_t *)hdr + uint32s_in_rtcp_header;
enc_octet_len = *pkt_octet_len - octets_in_rtcp_header;
/* all of the packet, except the header, gets encrypted */
/*
* NOTE: hdr->length is not usable - it refers to only the first RTCP report
* in the compound packet!
*/
trailer_p = (uint32_t *)((char *)enc_start + enc_octet_len);
if (stream->rtcp_services & sec_serv_conf) {
trailer = htonl(SRTCP_E_BIT); /* set encrypt bit */
} else {
enc_start = NULL;
enc_octet_len = 0;
/* 0 is network-order independant */
trailer = 0x00000000; /* set encrypt bit */
}
mki_size = srtp_inject_mki((uint8_t *)hdr + *pkt_octet_len +
sizeof(srtcp_trailer_t),
session_keys, use_mki);
/*
* set the auth_start and auth_tag pointers to the proper locations
* (note that srtpc *always* provides authentication, unlike srtp)
*/
/* Note: This would need to change for optional mikey data */
auth_start = (uint32_t *)hdr;
auth_tag =
(uint8_t *)hdr + *pkt_octet_len + sizeof(srtcp_trailer_t) + mki_size;
/* perform EKT processing if needed */
srtp_ekt_write_data(stream->ekt, auth_tag, tag_len, pkt_octet_len,
srtp_rdbx_get_packet_index(&stream->rtp_rdbx));
/*
* check sequence number for overruns, and copy it into the packet
* if its value isn't too big
*/
status = srtp_rdb_increment(&stream->rtcp_rdb);
if (status)
return status;
seq_num = srtp_rdb_get_value(&stream->rtcp_rdb);
trailer |= htonl(seq_num);
debug_print(mod_srtp, "srtcp index: %x", seq_num);
memcpy(trailer_p, &trailer, sizeof(trailer));
/*
* if we're using rindael counter mode, set nonce and seq
*/
if (session_keys->rtcp_cipher->type->id == SRTP_AES_ICM_128 ||
session_keys->rtcp_cipher->type->id == SRTP_AES_ICM_192 ||
session_keys->rtcp_cipher->type->id == SRTP_AES_ICM_256) {
v128_t iv;
iv.v32[0] = 0;
iv.v32[1] = hdr->ssrc; /* still in network order! */
iv.v32[2] = htonl(seq_num >> 16);
iv.v32[3] = htonl(seq_num << 16);
status = srtp_cipher_set_iv(session_keys->rtcp_cipher, (uint8_t *)&iv,
srtp_direction_encrypt);
} else {
v128_t iv;
/* otherwise, just set the index to seq_num */
iv.v32[0] = 0;
iv.v32[1] = 0;
iv.v32[2] = 0;
iv.v32[3] = htonl(seq_num);
status = srtp_cipher_set_iv(session_keys->rtcp_cipher, (uint8_t *)&iv,
srtp_direction_encrypt);
}
if (status)
return srtp_err_status_cipher_fail;
/*
* if we're authenticating using a universal hash, put the keystream
* prefix into the authentication tag
*/
/* if auth_start is non-null, then put keystream into tag */
if (auth_start) {
/* put keystream prefix into auth_tag */
prefix_len = srtp_auth_get_prefix_length(session_keys->rtcp_auth);
status = srtp_cipher_output(session_keys->rtcp_cipher, auth_tag,
&prefix_len);
debug_print(mod_srtp, "keystream prefix: %s",
srtp_octet_string_hex_string(auth_tag, prefix_len));
if (status)
return srtp_err_status_cipher_fail;
}
/* if we're encrypting, exor keystream into the message */
if (enc_start) {
status = srtp_cipher_encrypt(session_keys->rtcp_cipher,
(uint8_t *)enc_start, &enc_octet_len);
if (status)
return srtp_err_status_cipher_fail;
}
/* initialize auth func context */
status = srtp_auth_start(session_keys->rtcp_auth);
if (status)
return status;
/*
* run auth func over packet (including trailer), and write the
* result at auth_tag
*/
status =
srtp_auth_compute(session_keys->rtcp_auth, (uint8_t *)auth_start,
(*pkt_octet_len) + sizeof(srtcp_trailer_t), auth_tag);
debug_print(mod_srtp, "srtcp auth tag: %s",
srtp_octet_string_hex_string(auth_tag, tag_len));
if (status)
return srtp_err_status_auth_fail;
/* increase the packet length by the length of the auth tag and seq_num*/
*pkt_octet_len += (tag_len + sizeof(srtcp_trailer_t));
/* increase the packet by the mki_size */
*pkt_octet_len += mki_size;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_unprotect_rtcp(srtp_t ctx,
void *srtcp_hdr,
int *pkt_octet_len)
{
return srtp_unprotect_rtcp_mki(ctx, srtcp_hdr, pkt_octet_len, 0);
}
srtp_err_status_t srtp_unprotect_rtcp_mki(srtp_t ctx,
void *srtcp_hdr,
int *pkt_octet_len,
unsigned int use_mki)
{
srtcp_hdr_t *hdr = (srtcp_hdr_t *)srtcp_hdr;
uint32_t *enc_start; /* pointer to start of encrypted portion */
uint32_t *auth_start; /* pointer to start of auth. portion */
uint32_t *trailer_p; /* pointer to start of trailer */
uint32_t trailer; /* trailer value */
unsigned int enc_octet_len = 0; /* number of octets in encrypted portion */
uint8_t *auth_tag = NULL; /* location of auth_tag within packet */
uint8_t tmp_tag[SRTP_MAX_TAG_LEN];
uint8_t tag_copy[SRTP_MAX_TAG_LEN];
srtp_err_status_t status;
unsigned int auth_len;
int tag_len;
srtp_stream_ctx_t *stream;
uint32_t prefix_len;
uint32_t seq_num;
int e_bit_in_packet; /* whether the E-bit was found in the packet */
int sec_serv_confidentiality; /* whether confidentiality was requested */
unsigned int mki_size = 0;
srtp_session_keys_t *session_keys = NULL;
/* we assume the hdr is 32-bit aligned to start */
if (*pkt_octet_len < 0)
return srtp_err_status_bad_param;
/*
* check that the length value is sane; we'll check again once we
* know the tag length, but we at least want to know that it is
* a positive value
*/
if ((unsigned int)(*pkt_octet_len) <
octets_in_rtcp_header + sizeof(srtcp_trailer_t))
return srtp_err_status_bad_param;
/*
* look up ssrc in srtp_stream list, and process the packet with
* the appropriate stream. if we haven't seen this stream before,
* there's only one key for this srtp_session, and the cipher
* supports key-sharing, then we assume that a new stream using
* that key has just started up
*/
stream = srtp_get_stream(ctx, hdr->ssrc);
if (stream == NULL) {
if (ctx->stream_template != NULL) {
stream = ctx->stream_template;
/*
* check to see if stream_template has an EKT data structure, in
* which case we initialize the template using the EKT policy
* referenced by that data (which consists of decrypting the
* master key from the EKT field)
*
* this function initializes a *provisional* stream, and this
* stream should not be accepted until and unless the packet
* passes its authentication check
*/
if (stream->ekt != NULL) {
status = srtp_stream_init_from_ekt(stream, srtcp_hdr,
*pkt_octet_len);
if (status)
return status;
}
debug_print(mod_srtp,
"srtcp using provisional stream (SSRC: 0x%08x)",
ntohl(hdr->ssrc));
} else {
/* no template stream, so we return an error */
return srtp_err_status_no_ctx;
}
}
/*
* Determine if MKI is being used and what session keys should be used
*/
if (use_mki) {
session_keys = srtp_get_session_keys(
stream, (uint8_t *)hdr, (const unsigned int *)pkt_octet_len,
&mki_size);
if (session_keys == NULL)
return srtp_err_status_bad_mki;
} else {
session_keys = &stream->session_keys[0];
}
/* get tag length from stream context */
tag_len = srtp_auth_get_tag_length(session_keys->rtcp_auth);
/* check the packet length - it must contain at least a full RTCP
header, an auth tag (if applicable), and the SRTCP encrypted flag
and 31-bit index value */
if (*pkt_octet_len < (int)(octets_in_rtcp_header + tag_len + mki_size +
sizeof(srtcp_trailer_t))) {
return srtp_err_status_bad_param;
}
/*
* Check if this is an AEAD stream (GCM mode). If so, then dispatch
* the request to our AEAD handler.
*/
if (session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_128 ||
session_keys->rtp_cipher->algorithm == SRTP_AES_GCM_256) {
return srtp_unprotect_rtcp_aead(ctx, stream, srtcp_hdr,
(unsigned int *)pkt_octet_len,
session_keys, mki_size);
}
sec_serv_confidentiality = stream->rtcp_services == sec_serv_conf ||
stream->rtcp_services == sec_serv_conf_and_auth;
/*
* set encryption start, encryption length, and trailer
*/
enc_octet_len = *pkt_octet_len - (octets_in_rtcp_header + tag_len +
mki_size + sizeof(srtcp_trailer_t));
/*
*index & E (encryption) bit follow normal data. hdr->len is the number of
* words (32-bit) in the normal packet minus 1
*/
/* This should point trailer to the word past the end of the normal data. */
/* This would need to be modified for optional mikey data */
trailer_p = (uint32_t *)((char *)hdr + *pkt_octet_len -
(tag_len + mki_size + sizeof(srtcp_trailer_t)));
memcpy(&trailer, trailer_p, sizeof(trailer));
e_bit_in_packet =
(*((unsigned char *)trailer_p) & SRTCP_E_BYTE_BIT) == SRTCP_E_BYTE_BIT;
if (e_bit_in_packet != sec_serv_confidentiality) {
return srtp_err_status_cant_check;
}
if (sec_serv_confidentiality) {
enc_start = (uint32_t *)hdr + uint32s_in_rtcp_header;
} else {
enc_octet_len = 0;
enc_start = NULL; /* this indicates that there's no encryption */
}
/*
* set the auth_start and auth_tag pointers to the proper locations
* (note that srtcp *always* uses authentication, unlike srtp)
*/
auth_start = (uint32_t *)hdr;
/*
* The location of the auth tag in the packet needs to know MKI
* could be present. The data needed to calculate the Auth tag
* must not include the MKI
*/
auth_len = *pkt_octet_len - tag_len - mki_size;
auth_tag = (uint8_t *)hdr + auth_len + mki_size;
/*
* if EKT is in use, then we make a copy of the tag from the packet,
* and then zeroize the location of the base tag
*
* we first re-position the auth_tag pointer so that it points to
* the base tag
*/
if (stream->ekt) {
auth_tag -= srtp_ekt_octets_after_base_tag(stream->ekt);
memcpy(tag_copy, auth_tag, tag_len);
octet_string_set_to_zero(auth_tag, tag_len);
auth_tag = tag_copy;
auth_len += tag_len;
}
/*
* check the sequence number for replays
*/
/* this is easier than dealing with bitfield access */
seq_num = ntohl(trailer) & SRTCP_INDEX_MASK;
debug_print(mod_srtp, "srtcp index: %x", seq_num);
status = srtp_rdb_check(&stream->rtcp_rdb, seq_num);
if (status)
return status;
/*
* if we're using aes counter mode, set nonce and seq
*/
if (session_keys->rtcp_cipher->type->id == SRTP_AES_ICM_128 ||
session_keys->rtcp_cipher->type->id == SRTP_AES_ICM_192 ||
session_keys->rtcp_cipher->type->id == SRTP_AES_ICM_256) {
v128_t iv;
iv.v32[0] = 0;
iv.v32[1] = hdr->ssrc; /* still in network order! */
iv.v32[2] = htonl(seq_num >> 16);
iv.v32[3] = htonl(seq_num << 16);
status = srtp_cipher_set_iv(session_keys->rtcp_cipher, (uint8_t *)&iv,
srtp_direction_decrypt);
} else {
v128_t iv;
/* otherwise, just set the index to seq_num */
iv.v32[0] = 0;
iv.v32[1] = 0;
iv.v32[2] = 0;
iv.v32[3] = htonl(seq_num);
status = srtp_cipher_set_iv(session_keys->rtcp_cipher, (uint8_t *)&iv,
srtp_direction_decrypt);
}
if (status)
return srtp_err_status_cipher_fail;
/* initialize auth func context */
status = srtp_auth_start(session_keys->rtcp_auth);
if (status)
return status;
/* run auth func over packet, put result into tmp_tag */
status = srtp_auth_compute(session_keys->rtcp_auth, (uint8_t *)auth_start,
auth_len, tmp_tag);
debug_print(mod_srtp, "srtcp computed tag: %s",
srtp_octet_string_hex_string(tmp_tag, tag_len));
if (status)
return srtp_err_status_auth_fail;
/* compare the tag just computed with the one in the packet */
debug_print(mod_srtp, "srtcp tag from packet: %s",
srtp_octet_string_hex_string(auth_tag, tag_len));
if (srtp_octet_string_is_eq(tmp_tag, auth_tag, tag_len))
return srtp_err_status_auth_fail;
/*
* if we're authenticating using a universal hash, put the keystream
* prefix into the authentication tag
*/
prefix_len = srtp_auth_get_prefix_length(session_keys->rtcp_auth);
if (prefix_len) {
status = srtp_cipher_output(session_keys->rtcp_cipher, auth_tag,
&prefix_len);
debug_print(mod_srtp, "keystream prefix: %s",
srtp_octet_string_hex_string(auth_tag, prefix_len));
if (status)
return srtp_err_status_cipher_fail;
}
/* if we're decrypting, exor keystream into the message */
if (enc_start) {
status = srtp_cipher_decrypt(session_keys->rtcp_cipher,
(uint8_t *)enc_start, &enc_octet_len);
if (status)
return srtp_err_status_cipher_fail;
}
/* decrease the packet length by the length of the auth tag and seq_num */
*pkt_octet_len -= (tag_len + sizeof(srtcp_trailer_t));
/* decrease the packet length by the length of the mki_size */
*pkt_octet_len -= mki_size;
/*
* if EKT is in effect, subtract the EKT data out of the packet
* length
*/
*pkt_octet_len -= srtp_ekt_octets_after_base_tag(stream->ekt);
/*
* verify that stream is for received traffic - this check will
* detect SSRC collisions, since a stream that appears in both
* srtp_protect() and srtp_unprotect() will fail this test in one of
* those functions.
*
* we do this check *after* the authentication check, so that the
* latter check will catch any attempts to fool us into thinking
* that we've got a collision
*/
if (stream->direction != dir_srtp_receiver) {
if (stream->direction == dir_unknown) {
stream->direction = dir_srtp_receiver;
} else {
srtp_handle_event(ctx, stream, event_ssrc_collision);
}
}
/*
* if the stream is a 'provisional' one, in which the template context
* is used, then we need to allocate a new stream at this point, since
* the authentication passed
*/
if (stream == ctx->stream_template) {
srtp_stream_ctx_t *new_stream;
/*
* allocate and initialize a new stream
*
* note that we indicate failure if we can't allocate the new
* stream, and some implementations will want to not return
* failure here
*/
status =
srtp_stream_clone(ctx->stream_template, hdr->ssrc, &new_stream);
if (status)
return status;
/* add new stream to the head of the stream_list */
new_stream->next = ctx->stream_list;
ctx->stream_list = new_stream;
/* set stream (the pointer used in this function) */
stream = new_stream;
}
/* we've passed the authentication check, so add seq_num to the rdb */
srtp_rdb_add_index(&stream->rtcp_rdb, seq_num);
return srtp_err_status_ok;
}
/*
* user data within srtp_t context
*/
void srtp_set_user_data(srtp_t ctx, void *data)
{
ctx->user_data = data;
}
void *srtp_get_user_data(srtp_t ctx)
{
return ctx->user_data;
}
srtp_err_status_t srtp_crypto_policy_set_from_profile_for_rtp(
srtp_crypto_policy_t *policy,
srtp_profile_t profile)
{
/* set SRTP policy from the SRTP profile in the key set */
switch (profile) {
case srtp_profile_aes128_cm_sha1_80:
srtp_crypto_policy_set_aes_cm_128_hmac_sha1_80(policy);
break;
case srtp_profile_aes128_cm_sha1_32:
srtp_crypto_policy_set_aes_cm_128_hmac_sha1_32(policy);
break;
case srtp_profile_null_sha1_80:
srtp_crypto_policy_set_null_cipher_hmac_sha1_80(policy);
break;
#ifdef GCM
case srtp_profile_aead_aes_128_gcm:
srtp_crypto_policy_set_aes_gcm_128_16_auth(policy);
break;
case srtp_profile_aead_aes_256_gcm:
srtp_crypto_policy_set_aes_gcm_256_16_auth(policy);
break;
#endif
/* the following profiles are not (yet) supported */
case srtp_profile_null_sha1_32:
default:
return srtp_err_status_bad_param;
}
return srtp_err_status_ok;
}
srtp_err_status_t srtp_crypto_policy_set_from_profile_for_rtcp(
srtp_crypto_policy_t *policy,
srtp_profile_t profile)
{
/* set SRTP policy from the SRTP profile in the key set */
switch (profile) {
case srtp_profile_aes128_cm_sha1_80:
srtp_crypto_policy_set_aes_cm_128_hmac_sha1_80(policy);
break;
case srtp_profile_aes128_cm_sha1_32:
/* We do not honor the 32-bit auth tag request since
* this is not compliant with RFC 3711 */
srtp_crypto_policy_set_aes_cm_128_hmac_sha1_80(policy);
break;
case srtp_profile_null_sha1_80:
srtp_crypto_policy_set_null_cipher_hmac_sha1_80(policy);
break;
#ifdef GCM
case srtp_profile_aead_aes_128_gcm:
srtp_crypto_policy_set_aes_gcm_128_16_auth(policy);
break;
case srtp_profile_aead_aes_256_gcm:
srtp_crypto_policy_set_aes_gcm_256_16_auth(policy);
break;
#endif
/* the following profiles are not (yet) supported */
case srtp_profile_null_sha1_32:
default:
return srtp_err_status_bad_param;
}
return srtp_err_status_ok;
}
void srtp_append_salt_to_key(uint8_t *key,
unsigned int bytes_in_key,
uint8_t *salt,
unsigned int bytes_in_salt)
{
memcpy(key + bytes_in_key, salt, bytes_in_salt);
}
unsigned int srtp_profile_get_master_key_length(srtp_profile_t profile)
{
switch (profile) {
case srtp_profile_aes128_cm_sha1_80:
return SRTP_AES_128_KEY_LEN;
break;
case srtp_profile_aes128_cm_sha1_32:
return SRTP_AES_128_KEY_LEN;
break;
case srtp_profile_null_sha1_80:
return SRTP_AES_128_KEY_LEN;
break;
case srtp_profile_aead_aes_128_gcm:
return SRTP_AES_128_KEY_LEN;
break;
case srtp_profile_aead_aes_256_gcm:
return SRTP_AES_256_KEY_LEN;
break;
/* the following profiles are not (yet) supported */
case srtp_profile_null_sha1_32:
default:
return 0; /* indicate error by returning a zero */
}
}
unsigned int srtp_profile_get_master_salt_length(srtp_profile_t profile)
{
switch (profile) {
case srtp_profile_aes128_cm_sha1_80:
return SRTP_SALT_LEN;
break;
case srtp_profile_aes128_cm_sha1_32:
return SRTP_SALT_LEN;
break;
case srtp_profile_null_sha1_80:
return SRTP_SALT_LEN;
break;
case srtp_profile_aead_aes_128_gcm:
return SRTP_AEAD_SALT_LEN;
break;
case srtp_profile_aead_aes_256_gcm:
return SRTP_AEAD_SALT_LEN;
break;
/* the following profiles are not (yet) supported */
case srtp_profile_null_sha1_32:
default:
return 0; /* indicate error by returning a zero */
}
}
srtp_err_status_t stream_get_protect_trailer_length(srtp_stream_ctx_t *stream,
uint32_t is_rtp,
uint32_t use_mki,
uint32_t mki_index,
uint32_t *length)
{
srtp_session_keys_t *session_key;
*length = 0;
if (use_mki) {
if (mki_index >= stream->num_master_keys) {
return srtp_err_status_bad_mki;
}
session_key = &stream->session_keys[mki_index];
*length += session_key->mki_size;
} else {
session_key = &stream->session_keys[0];
}
if (is_rtp) {
*length += srtp_auth_get_tag_length(session_key->rtp_auth);
} else {
*length += srtp_auth_get_tag_length(session_key->rtcp_auth);
*length += sizeof(srtcp_trailer_t);
}
return srtp_err_status_ok;
}
srtp_err_status_t get_protect_trailer_length(srtp_t session,
uint32_t is_rtp,
uint32_t use_mki,
uint32_t mki_index,
uint32_t *length)
{
srtp_stream_ctx_t *stream;
if (session == NULL) {
return srtp_err_status_bad_param;
}
if (session->stream_template == NULL && session->stream_list == NULL) {
return srtp_err_status_bad_param;
}
*length = 0;
stream = session->stream_template;
if (stream != NULL) {
stream_get_protect_trailer_length(stream, is_rtp, use_mki, mki_index,
length);
}
stream = session->stream_list;
while (stream != NULL) {
uint32_t temp_length;
if (stream_get_protect_trailer_length(stream, is_rtp, use_mki,
mki_index, &temp_length) ==
srtp_err_status_ok) {
if (temp_length > *length) {
*length = temp_length;
}
}
stream = stream->next;
}
return srtp_err_status_ok;
}
srtp_err_status_t srtp_get_protect_trailer_length(srtp_t session,
uint32_t use_mki,
uint32_t mki_index,
uint32_t *length)
{
return get_protect_trailer_length(session, 1, use_mki, mki_index, length);
}
srtp_err_status_t srtp_get_protect_rtcp_trailer_length(srtp_t session,
uint32_t use_mki,
uint32_t mki_index,
uint32_t *length)
{
return get_protect_trailer_length(session, 0, use_mki, mki_index, length);
}
/*
* SRTP debug interface
*/
srtp_err_status_t srtp_set_debug_module(const char *mod_name, int v)
{
return srtp_crypto_kernel_set_debug_module(mod_name, v);
}
srtp_err_status_t srtp_list_debug_modules(void)
{
return srtp_crypto_kernel_list_debug_modules();
}
/*
* srtp_log_handler is a global variable holding a pointer to the
* log handler function; this function is called for any log
* output.
*/
static srtp_log_handler_func_t *srtp_log_handler = NULL;
static void *srtp_log_handler_data = NULL;
void srtp_err_handler(srtp_err_reporting_level_t level, const char *msg)
{
if (srtp_log_handler) {
srtp_log_level_t log_level = srtp_log_level_error;
switch (level) {
case srtp_err_level_error:
log_level = srtp_log_level_error;
break;
case srtp_err_level_warning:
log_level = srtp_log_level_warning;
break;
case srtp_err_level_info:
log_level = srtp_log_level_info;
break;
case srtp_err_level_debug:
log_level = srtp_log_level_debug;
break;
}
srtp_log_handler(log_level, msg, srtp_log_handler_data);
}
}
srtp_err_status_t srtp_install_log_handler(srtp_log_handler_func_t func,
void *data)
{
/*
* note that we accept NULL arguments intentionally - calling this
* function with a NULL arguments removes a log handler that's
* been previously installed
*/
if (srtp_log_handler) {
srtp_install_err_report_handler(NULL);
}
srtp_log_handler = func;
srtp_log_handler_data = data;
if (srtp_log_handler) {
srtp_install_err_report_handler(srtp_err_handler);
}
return srtp_err_status_ok;
}
srtp_err_status_t srtp_set_stream_roc(srtp_t session,
uint32_t ssrc,
uint32_t roc)
{
srtp_stream_t stream;
stream = srtp_get_stream(session, htonl(ssrc));
if (stream == NULL)
return srtp_err_status_bad_param;
stream->pending_roc = roc;
return srtp_err_status_ok;
}
srtp_err_status_t srtp_get_stream_roc(srtp_t session,
uint32_t ssrc,
uint32_t *roc)
{
srtp_stream_t stream;
stream = srtp_get_stream(session, htonl(ssrc));
if (stream == NULL)
return srtp_err_status_bad_param;
*roc = srtp_rdbx_get_roc(&stream->rtp_rdbx);
return srtp_err_status_ok;
}
|
87791.c | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) Wipro Technologies Ltd, 2002. All Rights Reserved.
* Copyright (c) 2014 Cyril Hrubis <[email protected]>
* Author: Nirmala Devi Dhanasekar <[email protected]>
*
* Check for basic errors returned by umount(2) system call.
*
* Verify that umount(2) returns -1 and sets errno to
* 1) EBUSY if it cannot be umounted, because dir is still busy.
* 2) EFAULT if specialfile or device file points to invalid address space.
* 3) ENOENT if pathname was empty or has a nonexistent component.
* 4) EINVAL if specialfile or device is invalid or not a mount point.
* 5) ENAMETOOLONG if pathname was longer than MAXPATHLEN.
*/
#include <errno.h>
#include <string.h>
#include <sys/mount.h>
#include "tst_test.h"
#define MNTPOINT "mntpoint"
static char long_path[PATH_MAX + 2];
static int mount_flag;
static int fd;
static struct tcase {
const char *err_desc;
const char *mntpoint;
int exp_errno;
} tcases[] = {
{"Already mounted/busy", MNTPOINT, EBUSY},
{"Invalid address", NULL, EFAULT},
{"Directory not found", "nonexistent", ENOENT},
{"Invalid device", "./", EINVAL},
{"Pathname too long", long_path, ENAMETOOLONG}
};
static void verify_umount(unsigned int n)
{
struct tcase *tc = &tcases[n];
TEST(umount(tc->mntpoint));
if (TST_RET != -1) {
tst_res(TFAIL, "umount() succeeds unexpectedly");
return;
}
if (tc->exp_errno != TST_ERR) {
tst_res(TFAIL | TTERRNO, "umount() should fail with %s",
tst_strerrno(tc->exp_errno));
return;
}
tst_res(TPASS | TTERRNO, "umount() fails as expected: %s",
tc->err_desc);
}
static void setup(void)
{
memset(long_path, 'a', PATH_MAX + 1);
SAFE_MKDIR(MNTPOINT, 0775);
SAFE_MOUNT(tst_device->dev, MNTPOINT, tst_device->fs_type, 0, NULL);
mount_flag = 1;
fd = SAFE_CREAT(MNTPOINT "/file", 0777);
}
static void cleanup(void)
{
if (fd > 0)
SAFE_CLOSE(fd);
if (mount_flag)
tst_umount(MNTPOINT);
}
static struct tst_test test = {
.tcnt = ARRAY_SIZE(tcases),
.needs_root = 1,
.format_device = 1,
.setup = setup,
.cleanup = cleanup,
.test = verify_umount,
};
|
884557.c | /* $Id: interrupt.c,v 1.1.1.1 2007-05-25 06:50:09 bruce Exp $
*
* Copyright (C) 1996 SpellCaster Telecommunications Inc.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For more information, please contact [email protected] or write:
*
* SpellCaster Telecommunications Inc.
* 5621 Finch Avenue East, Unit #3
* Scarborough, Ontario Canada
* M1B 2T9
* +1 (416) 297-8565
* +1 (416) 297-6433 Facsimile
*/
#include "includes.h"
#include "hardware.h"
#include "message.h"
#include "card.h"
#include <linux/interrupt.h>
static int get_card_from_irq(int irq)
{
int i;
for(i = 0 ; i < cinst ; i++) {
if(sc_adapter[i]->interrupt == irq)
return i;
}
return -1;
}
/*
*
*/
irqreturn_t interrupt_handler(int interrupt, void *cardptr)
{
RspMessage rcvmsg;
int channel;
int card;
card = get_card_from_irq(interrupt);
if(!IS_VALID_CARD(card)) {
pr_debug("Invalid param: %d is not a valid card id\n", card);
return IRQ_NONE;
}
pr_debug("%s: Entered Interrupt handler\n",
sc_adapter[card]->devicename);
/*
* Pull all of the waiting messages off the response queue
*/
while (!receivemessage(card, &rcvmsg)) {
/*
* Push the message to the adapter structure for
* send_and_receive to snoop
*/
if(sc_adapter[card]->want_async_messages)
memcpy(&(sc_adapter[card]->async_msg),
&rcvmsg, sizeof(RspMessage));
channel = (unsigned int) rcvmsg.phy_link_no;
/*
* Trap Invalid request messages
*/
if(IS_CM_MESSAGE(rcvmsg, 0, 0, Invalid)) {
pr_debug("%s: Invalid request Message, rsp_status = %d\n",
sc_adapter[card]->devicename,
rcvmsg.rsp_status);
break;
}
/*
* Check for a linkRead message
*/
if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Read))
{
pr_debug("%s: Received packet 0x%x bytes long at 0x%lx\n",
sc_adapter[card]->devicename,
rcvmsg.msg_data.response.msg_len,
rcvmsg.msg_data.response.buff_offset);
rcvpkt(card, &rcvmsg);
continue;
}
/*
* Handle a write acknoledgement
*/
if(IS_CE_MESSAGE(rcvmsg, Lnk, 1, Write)) {
pr_debug("%s: Packet Send ACK on channel %d\n",
sc_adapter[card]->devicename,
rcvmsg.phy_link_no);
sc_adapter[card]->channel[rcvmsg.phy_link_no-1].free_sendbufs++;
continue;
}
/*
* Handle a connection message
*/
if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Connect))
{
unsigned int callid;
setup_parm setup;
pr_debug("%s: Connect message: line %d: status %d: cause 0x%x\n",
sc_adapter[card]->devicename,
rcvmsg.phy_link_no,
rcvmsg.rsp_status,
rcvmsg.msg_data.byte_array[2]);
memcpy(&callid,rcvmsg.msg_data.byte_array,sizeof(int));
if(callid>=0x8000 && callid<=0xFFFF)
{
pr_debug("%s: Got Dial-Out Rsp\n",
sc_adapter[card]->devicename);
indicate_status(card, ISDN_STAT_DCONN,
(unsigned long)rcvmsg.phy_link_no-1,NULL);
}
else if(callid>=0x0000 && callid<=0x7FFF)
{
pr_debug("%s: Got Incoming Call\n",
sc_adapter[card]->devicename);
strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
strcpy(setup.eazmsn,
sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
setup.si1 = 7;
setup.si2 = 0;
setup.plan = 0;
setup.screen = 0;
indicate_status(card, ISDN_STAT_ICALL,(unsigned long)rcvmsg.phy_link_no-1,(char *)&setup);
indicate_status(card, ISDN_STAT_DCONN,(unsigned long)rcvmsg.phy_link_no-1,NULL);
}
continue;
}
/*
* Handle a disconnection message
*/
if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Disconnect))
{
pr_debug("%s: disconnect message: line %d: status %d: cause 0x%x\n",
sc_adapter[card]->devicename,
rcvmsg.phy_link_no,
rcvmsg.rsp_status,
rcvmsg.msg_data.byte_array[2]);
indicate_status(card, ISDN_STAT_BHUP,(unsigned long)rcvmsg.phy_link_no-1,NULL);
indicate_status(card, ISDN_STAT_DHUP,(unsigned long)rcvmsg.phy_link_no-1,NULL);
continue;
}
/*
* Handle a startProc engine up message
*/
if (IS_CM_MESSAGE(rcvmsg, 5, 0, MiscEngineUp)) {
pr_debug("%s: Received EngineUp message\n",
sc_adapter[card]->devicename);
sc_adapter[card]->EngineUp = 1;
sendmessage(card, CEPID,ceReqTypeCall,ceReqClass0,ceReqCallGetMyNumber,1,0,NULL);
sendmessage(card, CEPID,ceReqTypeCall,ceReqClass0,ceReqCallGetMyNumber,2,0,NULL);
init_timer(&sc_adapter[card]->stat_timer);
sc_adapter[card]->stat_timer.function = check_phystat;
sc_adapter[card]->stat_timer.data = card;
sc_adapter[card]->stat_timer.expires = jiffies + CHECKSTAT_TIME;
add_timer(&sc_adapter[card]->stat_timer);
continue;
}
/*
* Start proc response
*/
if (IS_CM_MESSAGE(rcvmsg, 2, 0, StartProc)) {
pr_debug("%s: StartProc Response Status %d\n",
sc_adapter[card]->devicename,
rcvmsg.rsp_status);
continue;
}
/*
* Handle a GetMyNumber Rsp
*/
if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
continue;
}
/*
* PhyStatus response
*/
if(IS_CE_MESSAGE(rcvmsg, Phy, 2, Status)) {
unsigned int b1stat, b2stat;
/*
* Covert the message data to the adapter->phystat code
*/
b1stat = (unsigned int) rcvmsg.msg_data.byte_array[0];
b2stat = (unsigned int) rcvmsg.msg_data.byte_array[1];
sc_adapter[card]->nphystat = (b2stat >> 8) | b1stat; /* endian?? */
pr_debug("%s: PhyStat is 0x%2x\n",
sc_adapter[card]->devicename,
sc_adapter[card]->nphystat);
continue;
}
/*
* Handle a GetFramFormat
*/
if(IS_CE_MESSAGE(rcvmsg, Call, 0, GetFrameFormat)) {
if(rcvmsg.msg_data.byte_array[0] != HDLC_PROTO) {
unsigned int proto = HDLC_PROTO;
/*
* Set board format to HDLC if it wasn't already
*/
pr_debug("%s: current frame format: 0x%x, will change to HDLC\n",
sc_adapter[card]->devicename,
rcvmsg.msg_data.byte_array[0]);
sendmessage(card, CEPID, ceReqTypeCall,
ceReqClass0,
ceReqCallSetFrameFormat,
(unsigned char) channel +1,
1,&proto);
}
continue;
}
/*
* Hmm...
*/
pr_debug("%s: Received unhandled message (%d,%d,%d) link %d\n",
sc_adapter[card]->devicename,
rcvmsg.type, rcvmsg.class, rcvmsg.code,
rcvmsg.phy_link_no);
} /* while */
pr_debug("%s: Exiting Interrupt Handler\n",
sc_adapter[card]->devicename);
return IRQ_HANDLED;
}
|
296920.c | /*
* prof_get.c --- routines that expose the public interfaces for
* querying items from the profile.
*
*/
#include "prof_int.h"
#include <stdio.h>
#include <string.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#include <errno.h>
#include <limits.h>
/*
* Solaris Kerberos: The following functions are made public so that other
* profile functions can call upon these basic routines:
* init_list(), end_list(), and add_to_list().
* Note: That profile_string_list is moved to prof_int.h as a result.
*
* These functions --- init_list(), end_list(), and add_to_list() are
* publicy exported functions used to build up a null-terminated char ** list
* of strings to be returned by functions like profile_get_values.
*
* The publicly exported interface for freeing char** list is
* profile_free_list().
*/
/*
* Initialize the string list abstraction.
*/
errcode_t init_list(struct profile_string_list *list)
{
list->num = 0;
list->max = 10;
list->list = malloc(list->max * sizeof(char *));
if (list->list == 0)
return ENOMEM;
list->list[0] = 0;
return 0;
}
/*
* Free any memory left over in the string abstraction, returning the
* built up list in *ret_list if it is non-null.
*/
void end_list(struct profile_string_list *list, char ***ret_list)
{
char **cp;
if (list == 0)
return;
if (ret_list) {
*ret_list = list->list;
return;
} else {
for (cp = list->list; *cp; cp++)
free(*cp);
free(list->list);
}
list->num = list->max = 0;
list->list = 0;
}
/*
* Add a string to the list.
*/
errcode_t add_to_list(struct profile_string_list *list, const char *str)
{
char *newstr, **newlist;
int newmax;
if (list->num+1 >= list->max) {
newmax = list->max + 10;
newlist = realloc(list->list, newmax * sizeof(char *));
if (newlist == 0)
return ENOMEM;
list->max = newmax;
list->list = newlist;
}
newstr = malloc(strlen(str)+1);
if (newstr == 0)
return ENOMEM;
strcpy(newstr, str);
list->list[list->num++] = newstr;
list->list[list->num] = 0;
return 0;
}
/*
* Return TRUE if the string is already a member of the list.
*/
static int is_list_member(struct profile_string_list *list, const char *str)
{
char **cpp;
if (!list->list)
return 0;
for (cpp = list->list; *cpp; cpp++) {
if (!strcmp(*cpp, str))
return 1;
}
return 0;
}
/*
* This function frees a null-terminated list as returned by
* profile_get_values.
*/
void KRB5_CALLCONV profile_free_list(char **list)
{
char **cp;
if (list == 0)
return;
for (cp = list; *cp; cp++)
free(*cp);
free(list);
}
errcode_t KRB5_CALLCONV
profile_get_values(profile_t profile, const char *const *names,
char ***ret_values)
{
errcode_t retval;
void *state;
char *value;
struct profile_string_list values;
if ((retval = profile_node_iterator_create(profile, names,
PROFILE_ITER_RELATIONS_ONLY,
&state)))
return retval;
if ((retval = init_list(&values)))
return retval;
do {
if ((retval = profile_node_iterator(&state, 0, 0, &value)))
goto cleanup;
if (value)
add_to_list(&values, value);
} while (state);
if (values.num == 0) {
retval = PROF_NO_RELATION;
goto cleanup;
}
end_list(&values, ret_values);
return 0;
cleanup:
end_list(&values, 0);
return retval;
}
/*
* This function only gets the first value from the file; it is a
* helper function for profile_get_string, profile_get_integer, etc.
*/
errcode_t profile_get_value(profile_t profile, const char **names,
const char **ret_value)
{
errcode_t retval;
void *state;
char *value;
if ((retval = profile_node_iterator_create(profile, names,
PROFILE_ITER_RELATIONS_ONLY,
&state)))
return retval;
if ((retval = profile_node_iterator(&state, 0, 0, &value)))
goto cleanup;
if (value)
*ret_value = value;
else
retval = PROF_NO_RELATION;
cleanup:
profile_node_iterator_free(&state);
return retval;
}
errcode_t KRB5_CALLCONV
profile_get_string(profile_t profile, const char *name, const char *subname,
const char *subsubname, const char *def_val,
char **ret_string)
{
const char *value;
errcode_t retval;
const char *names[4];
if (profile) {
names[0] = name;
names[1] = subname;
names[2] = subsubname;
names[3] = 0;
retval = profile_get_value(profile, names, &value);
if (retval == PROF_NO_SECTION || retval == PROF_NO_RELATION)
value = def_val;
else if (retval)
return retval;
} else
value = def_val;
if (value) {
*ret_string = malloc(strlen(value)+1);
if (*ret_string == 0)
return ENOMEM;
strcpy(*ret_string, value);
} else
*ret_string = 0;
return 0;
}
errcode_t KRB5_CALLCONV
profile_get_integer(profile_t profile, const char *name, const char *subname,
const char *subsubname, int def_val, int *ret_int)
{
const char *value;
errcode_t retval;
const char *names[4];
char *end_value;
long ret_long;
*ret_int = def_val;
if (profile == 0)
return 0;
names[0] = name;
names[1] = subname;
names[2] = subsubname;
names[3] = 0;
retval = profile_get_value(profile, names, &value);
if (retval == PROF_NO_SECTION || retval == PROF_NO_RELATION) {
*ret_int = def_val;
return 0;
} else if (retval)
return retval;
if (value[0] == 0)
/* Empty string is no good. */
return PROF_BAD_INTEGER;
errno = 0;
ret_long = strtol (value, &end_value, 10);
/* Overflow or underflow. */
if ((ret_long == LONG_MIN || ret_long == LONG_MAX) && errno != 0)
return PROF_BAD_INTEGER;
/* Value outside "int" range. */
if ((long) (int) ret_long != ret_long)
return PROF_BAD_INTEGER;
/* Garbage in string. */
if (end_value != value + strlen (value))
return PROF_BAD_INTEGER;
*ret_int = ret_long;
return 0;
}
static const char *const conf_yes[] = {
"y", "yes", "true", "t", "1", "on",
0,
};
static const char *const conf_no[] = {
"n", "no", "false", "nil", "0", "off",
0,
};
static errcode_t
profile_parse_boolean(const char *s, int *ret_boolean)
{
const char *const *p;
if (ret_boolean == NULL)
return PROF_EINVAL;
for(p=conf_yes; *p; p++) {
if (!strcasecmp(*p,s)) {
*ret_boolean = 1;
return 0;
}
}
for(p=conf_no; *p; p++) {
if (!strcasecmp(*p,s)) {
*ret_boolean = 0;
return 0;
}
}
return PROF_BAD_BOOLEAN;
}
errcode_t KRB5_CALLCONV
profile_get_boolean(profile_t profile, const char *name, const char *subname,
const char *subsubname, int def_val, int *ret_boolean)
{
const char *value;
errcode_t retval;
const char *names[4];
if (profile == 0) {
*ret_boolean = def_val;
return 0;
}
names[0] = name;
names[1] = subname;
names[2] = subsubname;
names[3] = 0;
retval = profile_get_value(profile, names, &value);
if (retval == PROF_NO_SECTION || retval == PROF_NO_RELATION) {
*ret_boolean = def_val;
return 0;
} else if (retval)
return retval;
return profile_parse_boolean (value, ret_boolean);
}
/*
* This function will return the list of the names of subections in the
* under the specified section name.
*/
errcode_t KRB5_CALLCONV
profile_get_subsection_names(profile_t profile, const char **names,
char ***ret_names)
{
errcode_t retval;
void *state;
char *name;
struct profile_string_list values;
if ((retval = profile_node_iterator_create(profile, names,
PROFILE_ITER_LIST_SECTION | PROFILE_ITER_SECTIONS_ONLY,
&state)))
return retval;
if ((retval = init_list(&values)))
return retval;
do {
if ((retval = profile_node_iterator(&state, 0, &name, 0)))
goto cleanup;
if (name)
add_to_list(&values, name);
} while (state);
end_list(&values, ret_names);
return 0;
cleanup:
end_list(&values, 0);
return retval;
}
/*
* This function will return the list of the names of relations in the
* under the specified section name.
*/
errcode_t KRB5_CALLCONV
profile_get_relation_names(profile_t profile, const char **names,
char ***ret_names)
{
errcode_t retval;
void *state;
char *name;
struct profile_string_list values;
if ((retval = profile_node_iterator_create(profile, names,
PROFILE_ITER_LIST_SECTION | PROFILE_ITER_RELATIONS_ONLY,
&state)))
return retval;
if ((retval = init_list(&values)))
return retval;
do {
if ((retval = profile_node_iterator(&state, 0, &name, 0)))
goto cleanup;
if (name && !is_list_member(&values, name))
add_to_list(&values, name);
} while (state);
end_list(&values, ret_names);
return 0;
cleanup:
end_list(&values, 0);
return retval;
}
errcode_t KRB5_CALLCONV
profile_iterator_create(profile_t profile, const char *const *names, int flags,
void **ret_iter)
{
return profile_node_iterator_create(profile, names, flags, ret_iter);
}
void KRB5_CALLCONV
profile_iterator_free(void **iter_p)
{
profile_node_iterator_free(iter_p);
}
errcode_t KRB5_CALLCONV
profile_iterator(void **iter_p, char **ret_name, char **ret_value)
{
char *name, *value;
errcode_t retval;
retval = profile_node_iterator(iter_p, 0, &name, &value);
if (retval)
return retval;
if (ret_name) {
if (name) {
*ret_name = malloc(strlen(name)+1);
if (!*ret_name)
return ENOMEM;
strcpy(*ret_name, name);
} else
*ret_name = 0;
}
if (ret_value) {
if (value) {
*ret_value = malloc(strlen(value)+1);
if (!*ret_value) {
if (ret_name) {
free(*ret_name);
*ret_name = 0;
}
return ENOMEM;
}
strcpy(*ret_value, value);
} else
*ret_value = 0;
}
return 0;
}
void KRB5_CALLCONV
profile_release_string(char *str)
{
free(str);
}
|
41619.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -msve-vector-bits=512 -fallow-half-arguments-and-returns -fno-experimental-new-pass-manager -S -O1 -emit-llvm -o - %s | FileCheck %s
#include <arm_sve.h>
#define N __ARM_FEATURE_SVE_BITS
typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
//===----------------------------------------------------------------------===//
// Test caller/callee with VLST <-> VLAT
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @sizeless_callee(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 4 x i32> [[X:%.*]]
//
svint32_t sizeless_callee(svint32_t x) {
return x;
}
// CHECK-LABEL: @fixed_caller(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to <vscale x 4 x i32>*
// CHECK-NEXT: store <vscale x 4 x i32> [[X_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6:!tbaa !.*]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X1]], i64 0)
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[CASTSCALABLESVE]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
fixed_int32_t fixed_caller(fixed_int32_t x) {
return sizeless_callee(x);
}
// CHECK-LABEL: @fixed_callee(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to <vscale x 4 x i32>*
// CHECK-NEXT: store <vscale x 4 x i32> [[X_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6]]
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
// CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
fixed_int32_t fixed_callee(fixed_int32_t x) {
return x;
}
// CHECK-LABEL: @sizeless_caller(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[COERCE_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
// CHECK-NEXT: [[COERCE1:%.*]] = alloca <16 x i32>, align 16
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X:%.*]], i64 0)
// CHECK-NEXT: [[COERCE_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[COERCE_COERCE]] to <16 x i32>*
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[COERCE_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[COERCE_COERCE]], align 16
// CHECK-NEXT: [[CALL:%.*]] = call <vscale x 4 x i32> @fixed_callee(<vscale x 4 x i32> [[TMP0]])
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[COERCE1]] to <vscale x 4 x i32>*
// CHECK-NEXT: store <vscale x 4 x i32> [[CALL]], <vscale x 4 x i32>* [[TMP1]], align 16
// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
svint32_t sizeless_caller(svint32_t x) {
return fixed_callee(x);
}
//===----------------------------------------------------------------------===//
// fixed, fixed
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @call_int32_ff(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16
// CHECK-NEXT: [[OP2:%.*]] = alloca <16 x i32>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to <vscale x 4 x i32>*
// CHECK-NEXT: store <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP2]] to <vscale x 4 x i32>*
// CHECK-NEXT: store <vscale x 4 x i32> [[OP2_COERCE:%.*]], <vscale x 4 x i32>* [[TMP1]], align 16
// CHECK-NEXT: [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP11]], i64 0)
// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP22]], i64 0)
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP2]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[CASTSCALABLESVE3]])
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP3]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) {
return svsel(pg, op1, op2);
}
// CHECK-LABEL: @call_float64_ff(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16
// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x double>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to <vscale x 2 x double>*
// CHECK-NEXT: store <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16
// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP2]] to <vscale x 2 x double>*
// CHECK-NEXT: store <vscale x 2 x double> [[OP2_COERCE:%.*]], <vscale x 2 x double>* [[TMP1]], align 16
// CHECK-NEXT: [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP11]], i64 0)
// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP22]], i64 0)
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP2]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[CASTSCALABLESVE3]])
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP3]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
// CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
//
fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) {
return svsel(pg, op1, op2);
}
// CHECK-LABEL: @call_bool_ff(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16
// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x i8>, align 16
// CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16
// CHECK-NEXT: [[OP2_ADDR:%.*]] = alloca <8 x i8>, align 16
// CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
// CHECK-NEXT: store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to <vscale x 16 x i1>*
// CHECK-NEXT: store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP1]], align 16
// CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, [[TBAA6]]
// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]]
// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
// CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP2]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to <vscale x 16 x i1>*
// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP4]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]])
// CHECK-NEXT: store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]]
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
// CHECK-NEXT: [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
// CHECK-NEXT: store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP8]]
//
fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) {
return svsel(pg, op1, op2);
}
//===----------------------------------------------------------------------===//
// fixed, scalable
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @call_int32_fs(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to <vscale x 4 x i32>*
// CHECK-NEXT: store <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP11]], i64 0)
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[OP2:%.*]])
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
//
fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) {
return svsel(pg, op1, op2);
}
// CHECK-LABEL: @call_float64_fs(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to <vscale x 2 x double>*
// CHECK-NEXT: store <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16
// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP11]], i64 0)
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP1]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[OP2:%.*]])
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
// CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
//
fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) {
return svsel(pg, op1, op2);
}
// CHECK-LABEL: @call_bool_fs(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16
// CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16
// CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
// CHECK-NEXT: store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]]
// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]]
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[OP2:%.*]])
// CHECK-NEXT: store <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
// CHECK-NEXT: store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP5]]
//
fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) {
return svsel(pg, op1, op2);
}
//===----------------------------------------------------------------------===//
// scalable, scalable
//===----------------------------------------------------------------------===//
// CHECK-LABEL: @call_int32_ss(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) {
return svsel(pg, op1, op2);
}
// CHECK-LABEL: @call_float64_ss(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]])
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
// CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
//
fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) {
return svsel(pg, op1, op2);
}
// CHECK-LABEL: @call_bool_ss(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]])
// CHECK-NEXT: store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
//
fixed_bool_t call_bool_ss(svbool_t pg, svbool_t op1, svbool_t op2) {
return svsel(pg, op1, op2);
}
|
239162.c | /* $Xorg: resource.c,v 1.4 2001/02/09 02:05:42 xorgcvs Exp $ */
/*
Copyright 1987, 1998 The Open Group
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall not be
used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from The Open Group.
* Copyright 1990, 1991 Network Computing Devices;
* Portions Copyright 1987 by Digital Equipment Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the names of Network Computing Devices,
* or Digital not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Network Computing Devices, or Digital
* make no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* NETWORK COMPUTING DEVICES, AND DIGITAL DISCLAIM ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS, IN NO EVENT SHALL NETWORK COMPUTING DEVICES, OR DIGITAL BE
* LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* %W% %G%
*
*/
/* $XFree86: xc/programs/xfs/difs/resource.c,v 3.8tsi Exp $ */
/*
* a resource is a 32 bit quantity. the upper 12 bits are client id.
* client provides a 19 bit resource id. this is "hashed" by me by
* taking the 10 lower bits and xor'ing with the mid 10 bits.
*
* It is sometimes necessary for the server to create an ID that looks
* like it belongs to a client. This ID, however, must not be one
* the client actually can create, or we have the potential for conflict.
* The 20th bit of the ID is resevered for the server's use for this
* purpose. By setting CLIENT_ID(id) to the client, the SERVER_BIT to
* 1, and an otherwise unused ID in the low 19 bits, we can create a
* resource "owned" by the client.
*
* The following IDs are currently reserved for siccing on the client:
* 1 - allocated color to be freed when the client dies
*/
#include <X11/fonts/FS.h>
#include "misc.h"
#include "os.h"
#include "fsresource.h"
#include "clientstr.h"
#include "dispatch.h"
#include "globals.h"
static void rebuild_table(int client);
#define INITBUCKETS 64
#define INITHASHSIZE 6
#define MAXHASHSIZE 11
typedef struct _Resource {
struct _Resource *next;
FSID id;
RESTYPE type;
pointer value;
} ResourceRec, *ResourcePtr;
#define NullResource ((ResourcePtr)NULL)
typedef struct _ClientResource {
ResourcePtr *resources;
int elements;
int buckets;
int hashsize; /* log(2)(buckets) */
FSID fakeID;
FSID endFakeID;
FSID expectID;
} ClientResourceRec;
static RESTYPE lastResourceType;
#ifdef NOTYET
static RESTYPE lastResourceClass;
#endif
static RESTYPE TypeMask;
typedef int (*DeleteType) (void *, FSID);
extern int CloseClientFont(ClientPtr, FSID);
static DeleteType *DeleteFuncs = (DeleteType *) NULL;
#ifdef NOTYET
RESTYPE
CreateNewResourceType(DeleteType deleteFunc)
{
RESTYPE next = lastResourceType + 1;
DeleteType *funcs;
if (next & lastResourceClass)
return 0;
funcs = (DeleteType *) fsrealloc(DeleteFuncs,
(next + 1) * sizeof(DeleteType));
if (!funcs)
return 0;
lastResourceType = next;
DeleteFuncs = funcs;
DeleteFuncs[next] = deleteFunc;
return next;
}
RESTYPE
CreateNewResourceClass(void)
{
RESTYPE next = lastResourceClass >> 1;
if (next & lastResourceType)
return 0;
lastResourceClass = next;
TypeMask = next - 1;
return next;
}
#endif /* NOTYET */
ClientResourceRec clientTable[MAXCLIENTS];
/*****************
* InitClientResources
* When a new client is created, call this to allocate space
* in resource table
*****************/
int
NoneDeleteFunc (void *ptr, FSID id)
{
return FSSuccess;
}
Bool
InitClientResources(ClientPtr client)
{
register int i,
j;
if (client == serverClient) {
lastResourceType = RT_LASTPREDEF;
#ifdef NOTYET
lastResourceClass = RC_LASTPREDEF;
#endif
TypeMask = RC_LASTPREDEF - 1;
if (DeleteFuncs)
fsfree(DeleteFuncs);
DeleteFuncs = (DeleteType *) fsalloc((lastResourceType + 1) *
sizeof(DeleteType));
if (!DeleteFuncs)
return FALSE;
DeleteFuncs[RT_NONE & TypeMask] = NoneDeleteFunc;
DeleteFuncs[RT_FONT & TypeMask] = (DeleteType)CloseClientFont;
DeleteFuncs[RT_AUTHCONT & TypeMask] = (DeleteType)DeleteAuthCont;
}
clientTable[i = client->index].resources =
(ResourcePtr *) fsalloc(INITBUCKETS * sizeof(ResourcePtr));
if (!clientTable[i].resources)
return FALSE;
clientTable[i].buckets = INITBUCKETS;
clientTable[i].elements = 0;
clientTable[i].hashsize = INITHASHSIZE;
clientTable[i].fakeID = SERVER_BIT;
clientTable[i].endFakeID = (clientTable[i].fakeID | RESOURCE_ID_MASK) + 1;
for (j = 0; j < INITBUCKETS; j++) {
clientTable[i].resources[j] = NullResource;
}
return TRUE;
}
static int
hash(int client, FSID id)
{
id &= RESOURCE_ID_MASK;
switch (clientTable[client].hashsize) {
case 6:
return ((int) (0x03F & (id ^ (id >> 6) ^ (id >> 12))));
case 7:
return ((int) (0x07F & (id ^ (id >> 7) ^ (id >> 13))));
case 8:
return ((int) (0x0FF & (id ^ (id >> 8) ^ (id >> 16))));
case 9:
return ((int) (0x1FF & (id ^ (id >> 9))));
case 10:
return ((int) (0x3FF & (id ^ (id >> 10))));
case 11:
return ((int) (0x7FF & (id ^ (id >> 11))));
}
return -1;
}
static Font
AvailableID(
register int client,
register FSID id,
register FSID maxid,
register FSID goodid)
{
register ResourcePtr res;
if ((goodid >= id) && (goodid <= maxid))
return goodid;
for (; id <= maxid; id++)
{
res = clientTable[client].resources[hash(client, id)];
while (res && (res->id != id))
res = res->next;
if (!res)
return id;
}
return 0;
}
/*
* Return the next usable fake client ID.
*
* Normally this is just the next one in line, but if we've used the last
* in the range, we need to find a new range of safe IDs to avoid
* over-running another client.
*/
FSID
FakeClientID(int client)
{
register FSID id, maxid;
register ResourcePtr *resp;
register ResourcePtr res;
register int i;
FSID goodid;
id = clientTable[client].fakeID++;
if (id != clientTable[client].endFakeID)
return id;
id = ((Mask)client << CLIENTOFFSET) | SERVER_BIT;
maxid = id | RESOURCE_ID_MASK;
goodid = 0;
for (resp = clientTable[client].resources, i = clientTable[client].buckets;
--i >= 0;)
{
for (res = *resp++; res; res = res->next)
{
if ((res->id < id) || (res->id > maxid))
continue;
if (((res->id - id) >= (maxid - res->id)) ?
(goodid = AvailableID(client, id, res->id - 1, goodid)) :
!(goodid = AvailableID(client, res->id + 1, maxid, goodid)))
maxid = res->id - 1;
else
id = res->id + 1;
}
}
if (id > maxid) {
if (!client)
FatalError("FakeClientID: server internal ids exhausted\n");
MarkClientException(clients[client]);
id = ((Mask)client << CLIENTOFFSET) | (SERVER_BIT * 3);
maxid = id | RESOURCE_ID_MASK;
}
clientTable[client].fakeID = id + 1;
clientTable[client].endFakeID = maxid + 1;
return id;
}
Bool
AddResource(
int cid,
FSID id,
RESTYPE type,
pointer value)
{
register ClientResourceRec *rrec;
register ResourcePtr res,
*head;
rrec = &clientTable[cid];
if (!rrec->buckets) {
ErrorF("AddResource(%x, %x, %x), client=%d \n",
id, type, value, cid);
FatalError("client not in use\n");
}
if ((rrec->elements >= 4 * rrec->buckets) &&
(rrec->hashsize < MAXHASHSIZE))
rebuild_table(cid);
head = &rrec->resources[hash(cid, id)];
res = (ResourcePtr) fsalloc(sizeof(ResourceRec));
if (!res) {
(*DeleteFuncs[type & TypeMask]) (value, id);
return FALSE;
}
res->next = *head;
res->id = id;
res->type = type;
res->value = value;
*head = res;
rrec->elements++;
if (!(id & SERVER_BIT) && (id >= rrec->expectID))
rrec->expectID = id + 1;
return TRUE;
}
static void
rebuild_table(int client)
{
register int j;
register ResourcePtr res,
next;
ResourcePtr **tails,
*resources;
register ResourcePtr **tptr,
*rptr;
/*
* For now, preserve insertion order, since some ddx layers depend on
* resources being free in the opposite order they are added.
*/
j = 2 * clientTable[client].buckets;
tails = (ResourcePtr **) ALLOCATE_LOCAL(j * sizeof(ResourcePtr *));
if (!tails)
return;
resources = (ResourcePtr *) fsalloc(j * sizeof(ResourcePtr));
if (!resources) {
DEALLOCATE_LOCAL(tails);
return;
}
for (rptr = resources, tptr = tails; --j >= 0; rptr++, tptr++) {
*rptr = NullResource;
*tptr = rptr;
}
clientTable[client].hashsize++;
for (j = clientTable[client].buckets,
rptr = clientTable[client].resources;
--j >= 0;
rptr++) {
for (res = *rptr; res; res = next) {
next = res->next;
res->next = NullResource;
tptr = &tails[hash(client, res->id)];
**tptr = res;
*tptr = &res->next;
}
}
DEALLOCATE_LOCAL(tails);
clientTable[client].buckets *= 2;
fsfree(clientTable[client].resources);
clientTable[client].resources = resources;
}
void
FreeResource(
int cid,
FSID id,
RESTYPE skipDeleteFuncType)
{
register ResourcePtr res;
register ResourcePtr *prev,
*head;
register int *eltptr;
int elements;
Bool gotOne = FALSE;
if (clientTable[cid].buckets) {
head = &clientTable[cid].resources[hash(cid, id)];
eltptr = &clientTable[cid].elements;
prev = head;
while ((res = *prev) != (ResourcePtr) 0) {
if (res->id == id) {
RESTYPE rtype = res->type;
*prev = res->next;
elements = --*eltptr;
if (rtype != skipDeleteFuncType)
(*DeleteFuncs[rtype & TypeMask]) (res->value, res->id);
fsfree(res);
if (*eltptr != elements)
prev = head;/* prev may no longer be valid */
gotOne = TRUE;
} else
prev = &res->next;
}
}
if (!gotOne)
FatalError("freeing resource id=%X which isn't there\n", id);
}
#ifdef NOTYET
void
FreeResourceByType(
int cid,
FSID id,
RESTYPE type,
Bool skipFree)
{
register ResourcePtr res;
register ResourcePtr *prev,
*head;
if (clientTable[cid].buckets) {
head = &clientTable[cid].resources[hash(cid, id)];
prev = head;
while (res = *prev) {
if (res->id == id && res->type == type) {
*prev = res->next;
if (!skipFree)
(*DeleteFuncs[type & TypeMask]) (res->value, res->id);
fsfree(res);
break;
} else
prev = &res->next;
}
}
}
/*
* Change the value associated with a resource id. Caller
* is responsible for "doing the right thing" with the old
* data
*/
Bool
ChangeResourceValue(
int cid,
FSID id,
RESTYPE rtype,
pointer value)
{
register ResourcePtr res;
if (clientTable[cid].buckets) {
res = clientTable[cid].resources[hash(cid, id)];
for (; res; res = res->next)
if ((res->id == id) && (res->type == rtype)) {
res->value = value;
return TRUE;
}
}
return FALSE;
}
#endif /* NOTYET */
void
FreeClientResources(ClientPtr client)
{
register ResourcePtr *resources;
register ResourcePtr this;
int j;
/*
* This routine shouldn't be called with a null client, but just in case
* ...
*/
if (!client)
return;
resources = clientTable[client->index].resources;
for (j = 0; j < clientTable[client->index].buckets; j++) {
/*
* It may seem silly to update the head of this resource list as we
* delete the members, since the entire list will be deleted any way,
* but there are some resource deletion functions "FreeClientPixels"
* for one which do a LookupID on another resource id (a Colormap id
* in this case), so the resource list must be kept valid up to the
* point that it is deleted, so every time we delete a resource, we
* must update the head, just like in free_resource. I hope that this
* doesn't slow down mass deletion appreciably. PRH
*/
ResourcePtr *head;
head = &resources[j];
for (this = *head; this; this = *head) {
RESTYPE rtype = this->type;
*head = this->next;
(*DeleteFuncs[rtype & TypeMask]) (this->value, this->id);
fsfree(this);
}
}
fsfree(clientTable[client->index].resources);
clientTable[client->index].buckets = 0;
}
void
FreeAllResources(void)
{
int i;
for (i = 0; i < currentMaxClients; i++) {
if (clientTable[i].buckets)
FreeClientResources(clients[i]);
}
}
/*
* lookup_id_by_type returns the object with the given id and type, else NULL.
*/
pointer
LookupIDByType(
int cid,
FSID id,
RESTYPE rtype)
{
register ResourcePtr res;
if (clientTable[cid].buckets) {
res = clientTable[cid].resources[hash(cid, id)];
for (; res; res = res->next)
if ((res->id == id) && (res->type == rtype))
return res->value;
}
return (pointer) NULL;
}
#ifdef NOTYET
/*
* lookup_ID_by_class returns the object with the given id and any one of the
* given classes, else NULL.
*/
pointer
LookupIDByClass(
FSID id,
RESTYPE classes)
{
int cid;
register ResourcePtr res;
if (((cid = CLIENT_ID(id)) < MAXCLIENTS) && clientTable[cid].buckets) {
res = clientTable[cid].resources[hash(cid, id)];
for (; res; res = res->next)
if ((res->id == id) && (res->type & classes))
return res->value;
}
return (pointer) NULL;
}
#endif /* NOTYET */
|
536669.c | int __attribute__((aligned(4096))) arr[1024];
void exit(int code);
int write(int fd, char *buf, int len);
void _start()
{
int i;
for (i = 0; i < 1024; i++)
if (arr[i] != 0) {
write(1, "FAIL\n", 5);
exit(1);
}
write(1, "SUCCESS\n", 8);
exit(0);
}
|
317651.c | /*++
Copyright (c) Microsoft Corporation. All rights reserved.
THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR
PURPOSE.
Module Name:
install.c
Abstract:
Win32 routines to dynamically load and unload a Windows NT kernel-mode
driver using the Service Control Manager APIs.
Environment:
User mode only
--*/
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strsafe.h>
#include "sdma.h"
BOOLEAN
InstallDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName,
_In_ LPCTSTR ServiceExe
);
BOOLEAN
RemoveDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName
);
BOOLEAN
StartDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName
);
BOOLEAN
StopDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName
);
BOOLEAN
InstallDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName,
_In_ LPCTSTR ServiceExe
)
/*++
Routine Description:
Arguments:
Return Value:
--*/
{
SC_HANDLE schService;
DWORD err;
//
// NOTE: This creates an entry for a standalone driver. If this
// is modified for use with a driver that requires a Tag,
// Group, and/or Dependencies, it may be necessary to
// query the registry for existing driver information
// (in order to determine a unique Tag, etc.).
//
//
// Create a new a service object.
//
schService = CreateService(SchSCManager, // handle of service control manager database
DriverName, // address of name of service to start
DriverName, // address of display name
SERVICE_ALL_ACCESS, // type of access to service
SERVICE_KERNEL_DRIVER, // type of service
SERVICE_DEMAND_START, // when to start service
SERVICE_ERROR_NORMAL, // severity if service fails to start
ServiceExe, // address of name of binary file
NULL, // service does not belong to a group
NULL, // no tag requested
NULL, // no dependency names
NULL, // use LocalSystem account
NULL // no password for service account
);
if (schService == NULL) {
err = GetLastError();
if (err == ERROR_SERVICE_EXISTS) {
//
// Ignore this error.
//
return TRUE;
} else {
printf("CreateService failed! Error = %d \n", (int)err );
//
// Indicate an error.
//
return FALSE;
}
}
//
// Close the service object.
//
CloseServiceHandle(schService);
//
// Indicate success.
//
return TRUE;
} // InstallDriver
BOOLEAN
ManageDriver(
_In_ LPCTSTR DriverName,
_In_ LPCTSTR ServiceName,
_In_ USHORT Function
)
{
SC_HANDLE schSCManager;
BOOLEAN rCode = TRUE;
//
// Insure (somewhat) that the driver and service names are valid.
//
if (!DriverName || !ServiceName) {
printf("Invalid Driver or Service provided to ManageDriver() \n");
return FALSE;
}
//
// Connect to the Service Control Manager and open the Services database.
//
schSCManager = OpenSCManager(NULL, // local machine
NULL, // local database
SC_MANAGER_ALL_ACCESS // access required
);
if (!schSCManager) {
printf("Open SC Manager failed! Error = %d \n", (int)GetLastError());
return FALSE;
}
//
// Do the requested function.
//
switch( Function ) {
case DRIVER_FUNC_INSTALL:
//
// Install the driver service.
//
if (InstallDriver(schSCManager,
DriverName,
ServiceName
)) {
//
// Start the driver service (i.e. start the driver).
//
rCode = StartDriver(schSCManager,
DriverName
);
} else {
//
// Indicate an error.
//
rCode = FALSE;
}
break;
case DRIVER_FUNC_REMOVE:
//
// Stop the driver.
//
StopDriver(schSCManager,
DriverName
);
//
// Remove the driver service.
//
RemoveDriver(schSCManager,
DriverName
);
//
// Ignore all errors.
//
rCode = TRUE;
break;
default:
printf("Unknown ManageDriver() function. \n");
rCode = FALSE;
break;
}
//
// Close handle to service control manager.
//
CloseServiceHandle(schSCManager);
return rCode;
} // ManageDriver
BOOLEAN
RemoveDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName
)
{
SC_HANDLE schService;
BOOLEAN rCode;
//
// Open the handle to the existing service.
//
schService = OpenService(SchSCManager,
DriverName,
SERVICE_ALL_ACCESS
);
if (schService == NULL) {
printf("OpenService failed! Error = %d \n", (int)GetLastError());
//
// Indicate error.
//
return FALSE;
}
//
// Mark the service for deletion from the service control manager database.
//
if (DeleteService(schService)) {
//
// Indicate success.
//
rCode = TRUE;
} else {
printf("DeleteService failed! Error = %d \n", (int)GetLastError());
//
// Indicate failure. Fall through to properly close the service handle.
//
rCode = FALSE;
}
//
// Close the service object.
//
CloseServiceHandle(schService);
return rCode;
} // RemoveDriver
BOOLEAN
StartDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName
)
{
SC_HANDLE schService;
DWORD err;
//
// Open the handle to the existing service.
//
schService = OpenService(SchSCManager,
DriverName,
SERVICE_ALL_ACCESS
);
if (schService == NULL) {
printf("OpenService failed! Error = %d \n", (int)GetLastError());
//
// Indicate failure.
//
return FALSE;
}
//
// Start the execution of the service (i.e. start the driver).
//
if (!StartService(schService, // service identifier
0, // number of arguments
NULL // pointer to arguments
)) {
err = GetLastError();
if (err == ERROR_SERVICE_ALREADY_RUNNING) {
//
// Ignore this error.
//
return TRUE;
} else {
printf("StartService failure! Error = %d \n", (int)err );
//
// Indicate failure. Fall through to properly close the service handle.
//
return FALSE;
}
}
//
// Close the service object.
//
CloseServiceHandle(schService);
return TRUE;
} // StartDriver
BOOLEAN
StopDriver(
_In_ SC_HANDLE SchSCManager,
_In_ LPCTSTR DriverName
)
{
BOOLEAN rCode = TRUE;
SC_HANDLE schService;
SERVICE_STATUS serviceStatus;
//
// Open the handle to the existing service.
//
schService = OpenService(SchSCManager,
DriverName,
SERVICE_ALL_ACCESS
);
if (schService == NULL) {
printf("OpenService failed! Error = %d \n", (int)GetLastError());
return FALSE;
}
//
// Request that the service stop.
//
if (ControlService(schService,
SERVICE_CONTROL_STOP,
&serviceStatus
)) {
//
// Indicate success.
//
rCode = TRUE;
} else {
printf("ControlService failed! Error = %d \n", (int)GetLastError() );
//
// Indicate failure. Fall through to properly close the service handle.
//
rCode = FALSE;
}
//
// Close the service object.
//
CloseServiceHandle (schService);
return rCode;
} // StopDriver
BOOLEAN
SetupDriverName(
_Inout_updates_bytes_all_(BufferLength) PCHAR DriverLocation,
_In_ ULONG BufferLength
)
{
HANDLE fileHandle;
DWORD driverLocLen = 0;
//
// Get the current directory.
//
driverLocLen = GetCurrentDirectory(BufferLength,
DriverLocation
);
if (driverLocLen == 0 || driverLocLen < BufferLength) {
printf("GetCurrentDirectory failed! Error = %d \n", (int)GetLastError());
return FALSE;
}
DriverLocation[ driverLocLen - 1 ] = '\0';
//
// Setup path name to driver file.
//
if (FAILED( StringCbCat(DriverLocation, BufferLength, "\\"DRIVER_NAME".sys") )) {
return FALSE;
}
//
// Insure driver file is in the specified directory.
//
if ((fileHandle = CreateFile(DriverLocation,
GENERIC_READ,
0,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL
)) == INVALID_HANDLE_VALUE) {
printf("%s.sys is not loaded.\n", DRIVER_NAME);
//
// Indicate failure.
//
return FALSE;
}
//
// Close open file handle.
//
if (fileHandle) {
CloseHandle(fileHandle);
}
//
// Indicate success.
//
return TRUE;
} // SetupDriverName
|
330117.c | /***************************************************************************
video.c
Functions to emulate the video hardware of the machine.
***************************************************************************/
#include "driver.h"
static tilemap *bg_tilemap;
PALETTE_INIT( ponttehk )
{
int i;
for ( i = 0; i < machine->drv->total_colors; i++ )
{
int bit0,bit1,bit2,bit3,r,g,b;
/* red component */
bit0 = (color_prom[0] >> 0) & 0x01;
bit1 = (color_prom[0] >> 1) & 0x01;
bit2 = (color_prom[0] >> 2) & 0x01;
bit3 = (color_prom[0] >> 3) & 0x01;
r = 0x0e * bit0 + 0x1f * bit1 + 0x43 * bit2 + 0x8f * bit3;
/* green component */
bit0 = (color_prom[machine->drv->total_colors] >> 0) & 0x01;
bit1 = (color_prom[machine->drv->total_colors] >> 1) & 0x01;
bit2 = (color_prom[machine->drv->total_colors] >> 2) & 0x01;
bit3 = (color_prom[machine->drv->total_colors] >> 3) & 0x01;
g = 0x0e * bit0 + 0x1f * bit1 + 0x43 * bit2 + 0x8f * bit3;
/* blue component */
bit0 = (color_prom[2*machine->drv->total_colors] >> 0) & 0x01;
bit1 = (color_prom[2*machine->drv->total_colors] >> 1) & 0x01;
bit2 = (color_prom[2*machine->drv->total_colors] >> 2) & 0x01;
bit3 = (color_prom[2*machine->drv->total_colors] >> 3) & 0x01;
b = 0x0e * bit0 + 0x1f * bit1 + 0x43 * bit2 + 0x8f * bit3;
palette_set_color(machine,i,MAKE_RGB(r,g,b));
color_prom++;
}
}
PALETTE_INIT( lvcards ) //Ever so slightly different, but different enough.
{
int i;
for ( i = 0; i < machine->drv->total_colors; i++ )
{
int bit0,bit1,bit2,bit3,r,g,b;
/* red component */
bit0 = (color_prom[0] >> 0) & 0x11;
bit1 = (color_prom[0] >> 1) & 0x11;
bit2 = (color_prom[0] >> 2) & 0x11;
bit3 = (color_prom[0] >> 3) & 0x11;
r = 0x0e * bit0 + 0x1f * bit1 + 0x43 * bit2 + 0x8f * bit3;
/* green component */
bit0 = (color_prom[machine->drv->total_colors] >> 0) & 0x11;
bit1 = (color_prom[machine->drv->total_colors] >> 1) & 0x11;
bit2 = (color_prom[machine->drv->total_colors] >> 2) & 0x11;
bit3 = (color_prom[machine->drv->total_colors] >> 3) & 0x11;
g = 0x0e * bit0 + 0x1f * bit1 + 0x43 * bit2 + 0x8f * bit3;
/* blue component */
bit0 = (color_prom[2*machine->drv->total_colors] >> 0) & 0x11;
bit1 = (color_prom[2*machine->drv->total_colors] >> 1) & 0x11;
bit2 = (color_prom[2*machine->drv->total_colors] >> 2) & 0x11;
bit3 = (color_prom[2*machine->drv->total_colors] >> 3) & 0x11;
b = 0x0e * bit0 + 0x1f * bit1 + 0x43 * bit2 + 0x8f * bit3;
palette_set_color(machine,i,MAKE_RGB(r,g,b));
color_prom++;
}
}
WRITE8_HANDLER( lvcards_videoram_w )
{
videoram[offset] = data;
tilemap_mark_tile_dirty(bg_tilemap, offset);
}
WRITE8_HANDLER( lvcards_colorram_w )
{
colorram[offset] = data;
tilemap_mark_tile_dirty(bg_tilemap, offset);
}
static TILE_GET_INFO( get_bg_tile_info )
{
int attr = colorram[tile_index];
int code = videoram[tile_index] + ((attr & 0x30) << 4) + ((attr & 0x80) << 3);
int color = attr & 0x0f;
int flags = (attr & 0x40) ? TILE_FLIPX : 0;
SET_TILE_INFO(0, code, color, flags);
}
VIDEO_START( lvcards )
{
bg_tilemap = tilemap_create(get_bg_tile_info, tilemap_scan_rows,
TILEMAP_TYPE_PEN, 8, 8, 32, 32);
}
VIDEO_UPDATE( lvcards )
{
tilemap_draw(bitmap, cliprect, bg_tilemap, 0, 0);
return 0;
}
|
170345.c | /*
* Copyright (c) 2021 Arm Limited. All rights reserved.
*/
#include <stddef.h>
#include "audio_drv.h"
#include "arm_vsi.h"
#ifdef _RTE_
#include "RTE_Components.h"
#endif
#include CMSIS_device_header
/* Audio Peripheral definitions */
#define AudioIn ARM_VSI0 /* Audio Input access struct */
#define AudioIn_IRQn ARM_VSI0_IRQn /* Audio Input Interrupt number */
#define AudioIn_Handler ARM_VSI0_Handler /* Audio Input Interrupt handler */
/* Audio Peripheral registers */
#define CONTROL Regs[0] /* Control receiver */
#define CHANNELS Regs[1] /* Number of channels */
#define SAMPLE_BITS Regs[2] /* Sample number of bits (8..32) */
#define SAMPLE_RATE Regs[3] /* Sample rate (samples per second) */
/* Audio Control register definitions */
#define CONTROL_ENABLE_Pos 0U /* CONTROL: ENABLE Position */
#define CONTROL_ENABLE_Msk (1UL << CONTROL_ENABLE_Pos) /* CONTROL: ENABLE Mask */
/* Driver State */
static uint8_t Initialized = 0U;
/* Event Callback */
static AudioDrv_Event_t CB_Event = NULL;
/* Audio Input Interrupt Handler */
void AudioIn_Handler (void) {
AudioIn->IRQ = 0U; /* Clear IRQ */
__ISB();
__DSB();
if (CB_Event != NULL) {
CB_Event(AUDIO_DRV_EVENT_RX_DATA);
}
}
/* Initialize Audio Interface */
int32_t AudioDrv_Initialize (AudioDrv_Event_t cb_event) {
CB_Event = cb_event;
AudioIn->Timer.Control = 0U;
AudioIn->DMA.Control = 0U;
AudioIn->IRQ = 0U;
AudioIn->CONTROL = 0U;
//NVIC_EnableIRQ(AudioIn_IRQn);
NVIC->ISER[(((uint32_t)AudioIn_IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)AudioIn_IRQn) & 0x1FUL));
__ISB();
__DSB();
Initialized = 1U;
return AUDIO_DRV_OK;
}
/* De-initialize Audio Interface */
int32_t AudioDrv_Uninitialize (void) {
//NVIC_DisableIRQ(AudioIn_IRQn);
NVIC->ICER[(((uint32_t)AudioIn_IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)AudioIn_IRQn) & 0x1FUL));
__DSB();
__ISB();
AudioIn->Timer.Control = 0U;
AudioIn->DMA.Control = 0U;
AudioIn->IRQ = 0U;
AudioIn->CONTROL = 0U;
Initialized = 0U;
return AUDIO_DRV_OK;
}
/* Configure Audio Interface */
int32_t AudioDrv_Configure (uint32_t interface, uint32_t channels, uint32_t sample_bits, uint32_t sample_rate) {
uint32_t format;
if (Initialized == 0U) {
return AUDIO_DRV_ERROR;
}
if ((channels < 1U) ||
(channels > 32U) ||
(sample_bits < 8U) ||
(sample_bits > 32U) ||
(sample_rate == 0U)) {
return AUDIO_DRV_ERROR_PARAMETER;
}
switch (interface) {
case AUDIO_DRV_INTERFACE_TX:
return AUDIO_DRV_ERROR_UNSUPPORTED;
break;
case AUDIO_DRV_INTERFACE_RX:
if ((AudioIn->CONTROL & CONTROL_ENABLE_Msk) != 0U) {
return AUDIO_DRV_ERROR;
}
AudioIn->CHANNELS = channels;
AudioIn->SAMPLE_BITS = sample_bits;
AudioIn->SAMPLE_RATE = sample_rate;
break;
default:
return AUDIO_DRV_ERROR_PARAMETER;
}
return AUDIO_DRV_OK;
}
/* Set Audio Interface buffer */
int32_t AudioDrv_SetBuf (uint32_t interface, void *buf, uint32_t block_num, uint32_t block_size) {
if (Initialized == 0U) {
return AUDIO_DRV_ERROR;
}
switch (interface) {
case AUDIO_DRV_INTERFACE_TX:
return AUDIO_DRV_ERROR_UNSUPPORTED;
break;
case AUDIO_DRV_INTERFACE_RX:
if ((AudioIn->DMA.Control & ARM_VSI_DMA_Enable_Msk) != 0U) {
return AUDIO_DRV_ERROR;
}
AudioIn->DMA.Address = (uint32_t)buf;
AudioIn->DMA.BlockNum = block_num;
AudioIn->DMA.BlockSize = block_size;
break;
default:
return AUDIO_DRV_ERROR_PARAMETER;
}
return AUDIO_DRV_OK;
}
/* Control Audio Interface */
int32_t AudioDrv_Control (uint32_t control) {
uint32_t sample_size;
uint32_t sample_rate;
uint32_t block_size;
if (Initialized == 0U) {
return AUDIO_DRV_ERROR;
}
//if ((control & AUDIO_DRV_CONTROL_TX_DISABLE) != 0U) {
//} else if ((control & AUDIO_DRV_CONTROL_TX_ENABLE) != 0U) {
//}
if ((control & AUDIO_DRV_CONTROL_RX_DISABLE) != 0U) {
AudioIn->Timer.Control = 0U;
AudioIn->DMA.Control = 0U;
AudioIn->CONTROL = 0U;
} else if ((control & AUDIO_DRV_CONTROL_RX_ENABLE) != 0U) {
sample_size = AudioIn->CHANNELS * ((AudioIn->SAMPLE_BITS + 7U) / 8U);
sample_rate = AudioIn->SAMPLE_RATE;
if ((sample_size == 0U) || (sample_rate == 0U)) {
AudioIn->Timer.Interval = 0xFFFFFFFFU;
} else {
block_size = AudioIn->DMA.BlockSize;
AudioIn->Timer.Interval = (1000000U * (block_size / sample_size)) / sample_rate;
}
AudioIn->DMA.Control = ARM_VSI_DMA_Direction_P2M |
ARM_VSI_DMA_Enable_Msk;
AudioIn->CONTROL = CONTROL_ENABLE_Msk;
AudioIn->Timer.Control = ARM_VSI_Timer_Trig_IRQ_Msk |
ARM_VSI_Timer_Periodic_Msk |
ARM_VSI_Timer_Run_Msk;
}
return AUDIO_DRV_OK;
}
/* Get transmitted block count */
uint32_t AudioDrv_GetTxCount (void) {
return (0UL); // Unsupported
}
/* Get received block count */
uint32_t AudioDrv_GetRxCount (void) {
return (AudioIn->Timer.Count);
}
/* Get Audio Interface status */
AudioDrv_Status_t AudioDrv_GetStatus (void) {
AudioDrv_Status_t status;
uint32_t sr;
status.tx_active = 0U; // Unsupported
if ((AudioIn->CONTROL & CONTROL_ENABLE_Msk) != 0U) {
status.rx_active = 1U;
} else {
status.rx_active = 0U;
}
return (status);
}
|
471890.c | // Uppercases string using ctype library (and an unnecessary condition)
#include <cs50.h>
#include <ctype.h>
#include <stdio.h>
#include <string.h>
int main(void)
{
string s = get_string("Before: ");
printf("After: ");
for (int i = 0, n = strlen(s); i < n; i++)
{
if (islower(s[i]))
{
printf("%c", toupper(s[i]));
}
else
{
printf("%c", s[i]);
}
}
printf("\n");
}
|
157403.c | /*
* Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
<<<<<<< HEAD
=======
#include <openssl/crypto.h>
#include "modes_local.h"
>>>>>>> 6e3ba20dc49ccbf12ff4c27a4d8b84dcbeb71654
#include <string.h>
#include <openssl/crypto.h>
#include "internal/endian.h"
#include "crypto/modes.h"
#ifndef STRICT_ALIGNMENT
# ifdef __GNUC__
typedef u64 u64_a1 __attribute((__aligned__(1)));
# else
typedef u64 u64_a1;
# endif
#endif
#ifndef STRICT_ALIGNMENT
# ifdef __GNUC__
typedef u64 u64_a1 __attribute((__aligned__(1)));
# else
typedef u64 u64_a1;
# endif
#endif
int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx,
const unsigned char iv[16],
const unsigned char *inp, unsigned char *out,
size_t len, int enc)
{
DECLARE_IS_ENDIAN;
union {
u64 u[2];
u32 d[4];
u8 c[16];
} tweak, scratch;
unsigned int i;
if (len < 16)
return -1;
memcpy(tweak.c, iv, 16);
(*ctx->block2) (tweak.c, tweak.c, ctx->key2);
if (!enc && (len % 16))
len -= 16;
while (len >= 16) {
#if defined(STRICT_ALIGNMENT)
memcpy(scratch.c, inp, 16);
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
#else
scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak.u[0];
scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak.u[1];
#endif
(*ctx->block1) (scratch.c, scratch.c, ctx->key1);
#if defined(STRICT_ALIGNMENT)
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
memcpy(out, scratch.c, 16);
#else
((u64_a1 *)out)[0] = scratch.u[0] ^= tweak.u[0];
((u64_a1 *)out)[1] = scratch.u[1] ^= tweak.u[1];
#endif
inp += 16;
out += 16;
len -= 16;
if (len == 0)
return 0;
if (IS_LITTLE_ENDIAN) {
unsigned int carry, res;
res = 0x87 & (((int)tweak.d[3]) >> 31);
carry = (unsigned int)(tweak.u[0] >> 63);
tweak.u[0] = (tweak.u[0] << 1) ^ res;
tweak.u[1] = (tweak.u[1] << 1) | carry;
} else {
size_t c;
for (c = 0, i = 0; i < 16; ++i) {
/*
* + substitutes for |, because c is 1 bit
*/
c += ((size_t)tweak.c[i]) << 1;
tweak.c[i] = (u8)c;
c = c >> 8;
}
tweak.c[0] ^= (u8)(0x87 & (0 - c));
}
}
if (enc) {
for (i = 0; i < len; ++i) {
u8 c = inp[i];
out[i] = scratch.c[i];
scratch.c[i] = c;
}
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
(*ctx->block1) (scratch.c, scratch.c, ctx->key1);
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
memcpy(out - 16, scratch.c, 16);
} else {
union {
u64 u[2];
u8 c[16];
} tweak1;
if (IS_LITTLE_ENDIAN) {
unsigned int carry, res;
res = 0x87 & (((int)tweak.d[3]) >> 31);
carry = (unsigned int)(tweak.u[0] >> 63);
tweak1.u[0] = (tweak.u[0] << 1) ^ res;
tweak1.u[1] = (tweak.u[1] << 1) | carry;
} else {
size_t c;
for (c = 0, i = 0; i < 16; ++i) {
/*
* + substitutes for |, because c is 1 bit
*/
c += ((size_t)tweak.c[i]) << 1;
tweak1.c[i] = (u8)c;
c = c >> 8;
}
tweak1.c[0] ^= (u8)(0x87 & (0 - c));
}
#if defined(STRICT_ALIGNMENT)
memcpy(scratch.c, inp, 16);
scratch.u[0] ^= tweak1.u[0];
scratch.u[1] ^= tweak1.u[1];
#else
scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak1.u[0];
scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak1.u[1];
#endif
(*ctx->block1) (scratch.c, scratch.c, ctx->key1);
scratch.u[0] ^= tweak1.u[0];
scratch.u[1] ^= tweak1.u[1];
for (i = 0; i < len; ++i) {
u8 c = inp[16 + i];
out[16 + i] = scratch.c[i];
scratch.c[i] = c;
}
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
(*ctx->block1) (scratch.c, scratch.c, ctx->key1);
#if defined(STRICT_ALIGNMENT)
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
memcpy(out, scratch.c, 16);
#else
((u64_a1 *)out)[0] = scratch.u[0] ^ tweak.u[0];
((u64_a1 *)out)[1] = scratch.u[1] ^ tweak.u[1];
#endif
}
return 0;
}
|
658308.c | #include <stdio.h>
/* copy its input and return this as output. replace tabs,
backspaces and backslashes with their visual representation */
int main(){
int c;
while ((c = getchar()) != EOF){
if (c == '\t'){
putchar('\\');
putchar('t');
continue; // skip to the next charater (loop) like in Python
}
if (c == '\b'){
putchar('\\');
putchar('b');
continue;
}
if (c == '\\'){
putchar('\\');
putchar('\\');
continue;
}
putchar(c);
}
}
|
493085.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "../../include/mt19937.h"
uint32_t generate_recent_mt();
int main(){
uint32_t i;
uint32_t current_time_seed = (uint32_t) time(NULL);
uint32_t found_past_seed;
uint32_t recent_rng_result = generate_recent_mt();
uint32_t current_rng_result;
for(i = 40; i <= 1000; i++){
mt_seed(current_time_seed-i);
current_rng_result = mt_rand();
if(current_rng_result == recent_rng_result){
found_past_seed = current_time_seed-i;
printf("Found seed %u seconds in the past: %u (0x%.8X)\n", i, found_past_seed, found_past_seed);
break;
}
}
return 0;
}
/* Generate a MT seed using a time between 40 and 1000 seconds, inclusive, in the past,
* then return the first RNG value.
*/
uint32_t generate_recent_mt(){
uint32_t recent_time_seed;
srand(time(NULL));
recent_time_seed = (uint32_t) time(NULL) - ((rand() % 961) + 40);
mt_seed(recent_time_seed);
return mt_rand();
}
|
632419.c | /* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
*
* Copyright (C) 1997 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*/
#include "mpioimpl.h"
#include "adio_extern.h"
#ifdef HAVE_WEAK_SYMBOLS
#if defined(HAVE_PRAGMA_WEAK)
#pragma weak MPI_File_get_errhandler = PMPI_File_get_errhandler
#elif defined(HAVE_PRAGMA_HP_SEC_DEF)
#pragma _HP_SECONDARY_DEF PMPI_File_get_errhandler MPI_File_get_errhandler
#elif defined(HAVE_PRAGMA_CRI_DUP)
#pragma _CRI duplicate MPI_File_get_errhandler as PMPI_File_get_errhandler
/* end of weak pragmas */
#endif
/* Include mapping from MPI->PMPI */
#define MPIO_BUILD_PROFILING
#include "mpioprof.h"
#endif
/*@
MPI_File_get_errhandler - Returns the error handler for a file
Input Parameters:
. fh - file handle (handle)
Output Parameters:
. errhandler - error handler (handle)
.N fortran
@*/
int MPI_File_get_errhandler(MPI_File mpi_fh, MPI_Errhandler *errhandler)
{
int error_code = MPI_SUCCESS;
ADIO_File fh;
static char myname[] = "MPI_FILE_GET_ERRHANDLER";
MPIU_THREAD_SINGLE_CS_ENTER("io");
if (mpi_fh == MPI_FILE_NULL) {
*errhandler = ADIOI_DFLT_ERR_HANDLER;
}
else {
fh = MPIO_File_resolve(mpi_fh);
/* --BEGIN ERROR HANDLING-- */
if ((fh <= (MPI_File) 0) || ((fh)->cookie != ADIOI_FILE_COOKIE))
{
error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_ARG,
"**iobadfh", 0);
error_code = MPIO_Err_return_file(MPI_FILE_NULL, error_code);
goto fn_exit;
}
/* --END ERROR HANDLING-- */
*errhandler = fh->err_handler;
}
fn_exit:
MPIU_THREAD_SINGLE_CS_EXIT("io");
return MPI_SUCCESS;
}
|
253673.c | /******************************************************************************
*
* Copyright (C) 2000 Pierangelo Masarati, <[email protected]>
* All rights reserved.
*
* Permission is granted to anyone to use this software for any purpose
* on any computer system, and to alter it and redistribute it, subject
* to the following restrictions:
*
* 1. The author is not responsible for the consequences of use of this
* software, no matter how awful, even if they arise from flaws in it.
*
* 2. The origin of this software must not be misrepresented, either by
* explicit claim or by omission. Since few users ever read sources,
* credits should appear in the documentation.
*
* 3. Altered versions must be plainly marked as such, and must not be
* misrepresented as being the original software. Since few users
* ever read sources, credits should appear in the documentation.
*
* 4. This notice may not be removed or altered.
*
******************************************************************************/
#include <portable.h>
#include <stdio.h>
#include "rewrite-int.h"
static int
parse_line(
char **argv,
int *argc,
int maxargs,
char *buf
)
{
char *p, *begin;
int in_quoted_field = 0, cnt = 0;
char quote = '\0';
for ( p = buf; isspace( (unsigned char) p[ 0 ] ); p++ );
if ( p[ 0 ] == '#' ) {
return 0;
}
for ( begin = p; p[ 0 ] != '\0'; p++ ) {
if ( p[ 0 ] == '\\' && p[ 1 ] != '\0' ) {
p++;
} else if ( p[ 0 ] == '\'' || p[ 0 ] == '\"') {
if ( in_quoted_field && p[ 0 ] == quote ) {
in_quoted_field = 1 - in_quoted_field;
quote = '\0';
p[ 0 ] = '\0';
argv[ cnt ] = begin;
if ( ++cnt == maxargs ) {
*argc = cnt;
return 1;
}
for ( p++; isspace( (unsigned char) p[ 0 ] ); p++ );
begin = p;
p--;
} else if ( !in_quoted_field ) {
if ( p != begin ) {
return -1;
}
begin++;
in_quoted_field = 1 - in_quoted_field;
quote = p[ 0 ];
}
} else if ( isspace( (unsigned char) p[ 0 ] ) && !in_quoted_field ) {
p[ 0 ] = '\0';
argv[ cnt ] = begin;
if ( ++cnt == maxargs ) {
*argc = cnt;
return 1;
}
for ( p++; isspace( (unsigned char) p[ 0 ] ); p++ );
begin = p;
p--;
}
}
*argc = cnt;
return 1;
}
int
rewrite_read(
FILE *fin,
struct rewrite_info *info
)
{
char buf[ 1024 ];
char *argv[11];
int argc, lineno;
/*
* Empty rule at the beginning of the context
*/
for ( lineno = 0; fgets( buf, sizeof( buf ), fin ); lineno++ ) {
switch ( parse_line( argv, &argc, sizeof( argv ) - 1, buf ) ) {
case -1:
return REWRITE_ERR;
case 0:
break;
case 1:
if ( strncasecmp( argv[ 0 ], "rewrite", 7 ) == 0 ) {
int rc;
rc = rewrite_parse( info, "file", lineno,
argc, argv );
if ( rc != REWRITE_SUCCESS ) {
return rc;
}
}
break;
}
}
return REWRITE_SUCCESS;
}
|
274135.c | #include "parsec.h"
#include "parsec/arena.h"
#include "parsec/data_dist/matrix/matrix.h"
#include "parsec/data_dist/matrix/two_dim_rectangle_cyclic.h"
#include "parsec/interfaces/dtd/insert_function_internal.h"
// The file is not compiled if CUDA is not present or CUBLAS is not found
#include "parsec/mca/device/cuda/device_cuda.h"
#include "cublas_v2.h"
#if defined(HAVE_BLAS)
// If our CMake finds a BLAS library, it defines HAVE_BLAS
// BLAS does not guarantee there is a cblas.h, we define our own prototype
typedef enum CBLAS_LAYOUT {CblasRowMajor=101, CblasColMajor=102} CBLAS_LAYOUT;
typedef enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113} CBLAS_TRANSPOSE;
typedef enum CBLAS_UPLO {CblasUpper=121, CblasLower=122} CBLAS_UPLO;
typedef enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132} CBLAS_DIAG;
typedef enum CBLAS_SIDE {CblasLeft=141, CblasRight=142} CBLAS_SIDE;
#define CBLAS_INDEX int
extern void cblas_dgemm(const CBLAS_LAYOUT layout, const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const CBLAS_INDEX M, const CBLAS_INDEX N,
const CBLAS_INDEX K, const double alpha, const double *A,
const CBLAS_INDEX lda, const double *B, const CBLAS_INDEX ldb,
const double beta, double *C, const CBLAS_INDEX ldc);
#endif
#if defined(PARSEC_HAVE_MPI)
#include <mpi.h>
#endif /* defined(PARSEC_HAVE_MPI) */
#include <unistd.h>
#include <getopt.h>
static int TILE_FULL = -1;
static parsec_info_id_t CuHI = -1;
static parsec_info_id_t Cu1 = -1;
static int verbose = 0;
static int device = PARSEC_DEV_CUDA;
static int P = -1;
static int Q = -1;
#define Rnd64_A 6364136223846793005ULL
#define Rnd64_C 1ULL
#define RndF_Mul 5.4210108624275222e-20f
#define RndD_Mul 5.4210108624275222e-20
#define NBELEM 1
static unsigned long long int Rnd64_jump(unsigned long long int n, unsigned long long int seed)
{
unsigned long long int a_k, c_k, ran;
int i;
a_k = Rnd64_A;
c_k = Rnd64_C;
ran = seed;
for( i = 0; n; n >>= 1, ++i ) {
if( n & 1 )
ran = a_k * ran + c_k;
c_k *= (a_k + 1);
a_k *= a_k;
}
return ran;
}
int initialize_tile(parsec_execution_stream_t *es, parsec_task_t *this_task)
{
(void)es;
double *data;
int i, j, mb, nb, m, n, M, ld;
unsigned int seed;
unsigned long long jump, ran;
parsec_dtd_unpack_args(this_task, &data, &m, &n, &mb, &nb, &M, &ld, &seed);
jump = (unsigned long long int)m + (unsigned long long int)n * (unsigned long long int)M;
for( j = 0; j < nb; j++ ) {
ran = Rnd64_jump(NBELEM * jump, seed);
for( i = 0; i < mb; i++ ) {
*data = 0.5f - ran * RndF_Mul;
ran = Rnd64_A * ran + Rnd64_C;
data++;
}
data += ld - i;
jump += M;
}
return PARSEC_HOOK_RETURN_DONE;
}
int initialize_matrix(parsec_context_t *parsec_context, int rank, parsec_matrix_block_cyclic_t *mat, unsigned int seed,
const char *name, int *gpu_device_index, int nb_gpus)
{
parsec_taskpool_t *tp = parsec_dtd_taskpool_new();
parsec_data_key_t key;
int perr;
parsec_task_class_t *init_tc;
perr = parsec_context_start(parsec_context);
PARSEC_CHECK_ERROR(perr, "parsec_context_start");
// Registering the dtd_handle with PARSEC context
perr = parsec_context_add_taskpool(parsec_context, tp);
PARSEC_CHECK_ERROR(perr, "parsec_context_add_taskpool");
init_tc = parsec_dtd_create_task_class(tp, "init",
PASSED_BY_REF, PARSEC_INOUT | TILE_FULL | PARSEC_AFFINITY,
sizeof(int), PARSEC_VALUE, /* m */
sizeof(int), PARSEC_VALUE, /* n */
sizeof(int), PARSEC_VALUE, /* mb */
sizeof(int), PARSEC_VALUE, /* nb */
sizeof(int), PARSEC_VALUE, /* M */
sizeof(int), PARSEC_VALUE, /* ld */
sizeof(unsigned int), PARSEC_VALUE, /* seed */
PARSEC_DTD_ARG_END);
parsec_dtd_task_class_add_chore(tp, init_tc, PARSEC_DEV_CPU, initialize_tile);
int g = 0;
for( int i = 0; i < mat->super.mt; i++ ) {
for( int j = 0; j < mat->super.nt; j++ ) {
key = mat->super.super.data_key(&mat->super.super, i, j);
parsec_dtd_insert_task_with_task_class(tp, init_tc, 1, PARSEC_DEV_CPU,
PARSEC_PUSHOUT, PARSEC_DTD_TILE_OF_KEY(&mat->super.super, key),
PARSEC_DTD_EMPTY_FLAG, &i,
PARSEC_DTD_EMPTY_FLAG, &j,
PARSEC_DTD_EMPTY_FLAG, &mat->super.mb,
PARSEC_DTD_EMPTY_FLAG, &mat->super.nb,
PARSEC_DTD_EMPTY_FLAG, &mat->super.m,
PARSEC_DTD_EMPTY_FLAG, &mat->super.mb,
PARSEC_DTD_EMPTY_FLAG, &seed,
PARSEC_DTD_ARG_END);
if(PARSEC_DEV_CUDA == device &&
(int)mat->super.super.rank_of_key(&mat->super.super, key) == rank ) {
if( verbose ) {
fprintf(stderr, "Advice %s(%d, %d) to prefer GPU device %d (parsec device %d) of rank %d\n",
name, i, j, g, gpu_device_index[g], (int)mat->super.super.rank_of_key(&mat->super.super, key));
}
parsec_advise_data_on_device(mat->super.super.data_of_key(&mat->super.super, key),
gpu_device_index[g],
PARSEC_DEV_DATA_ADVICE_PREFERRED_DEVICE);
}
g = (g + 1) % nb_gpus;
}
}
parsec_dtd_data_flush_all(tp, &mat->super.super);
// Wait for task completion
perr = parsec_dtd_taskpool_wait(tp);
PARSEC_CHECK_ERROR(perr, "parsec_dtd_taskpool_wait");
perr = parsec_context_wait(parsec_context);
PARSEC_CHECK_ERROR(perr, "parsec_context_wait");
parsec_dtd_task_class_release(tp, init_tc);
parsec_taskpool_free(tp);
return 0;
}
int gemm_kernel_cuda(parsec_device_gpu_module_t *gpu_device,
parsec_gpu_task_t *gpu_task,
parsec_gpu_exec_stream_t *gpu_stream)
{
double *A, *B, *C;
int m, n, k, mb, nb, kb;
parsec_task_t *this_task = gpu_task->ec;
cublasStatus_t status;
cublasHandle_t handle;
double *one_device = NULL;
struct timeval start, end, diff;
double delta;
double *a_gpu, *b_gpu, *c_gpu;
(void)gpu_stream;
(void)gpu_device;
parsec_dtd_unpack_args(this_task,
&A, &B, &C,
&m, &n, &k,
&mb, &nb, &kb);
a_gpu = parsec_dtd_get_dev_ptr(this_task, 0);
b_gpu = parsec_dtd_get_dev_ptr(this_task, 1);
c_gpu = parsec_dtd_get_dev_ptr(this_task, 2);
handle = parsec_info_get(&gpu_stream->infos, CuHI);
assert(NULL != handle);
one_device = parsec_info_get(&gpu_device->super.infos, Cu1);
assert(NULL != one_device);
gettimeofday(&start, NULL);
status = cublasDgemm_v2(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
mb, nb, kb,
one_device, a_gpu, mb,
b_gpu, kb,
one_device, c_gpu, mb);
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
delta = (double)diff.tv_sec + (double)diff.tv_usec/1e6;
if(verbose)
fprintf(stderr, "GEMM(%d, %d, %d) with tiles of %dx%d, %dx%d, %dx%d on node %d, GPU %s submitted in %g s\n",
m, n, k, mb, kb, kb, nb, mb, kb,
this_task->taskpool->context->my_rank,
gpu_stream->name, delta);
PARSEC_CUDA_CHECK_ERROR("cublasDgemm_v2 ", status,
{ return PARSEC_HOOK_RETURN_ERROR; });
return PARSEC_HOOK_RETURN_DONE;
}
#if defined(HAVE_BLAS)
int gemm_kernel_cpu(parsec_execution_stream_t *es,
parsec_task_t *this_task)
{
double *A, *B, *C;
int m, n, k, mb, nb, kb;
double alpha = 1.0;
double beta = 1.0;
double delta;
struct timeval start, end, diff;
(void)es;
parsec_dtd_unpack_args(this_task,
&A, &B, &C,
&m, &n, &k,
&mb, &nb, &kb);
gettimeofday(&start, NULL);
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, mb, nb, kb, alpha, A, mb, B, kb, beta, C, mb);
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
delta = (double)diff.tv_sec + (double)diff.tv_usec/1e6;
if( verbose )
fprintf(stderr, "GEMM(%d, %d, %d) with tiles of %dx%d, %dx%d, %dx%d on node %d, on core %d: %g s\n",
m, n, k, mb, kb, kb, nb, mb, kb,
this_task->taskpool->context->my_rank,
es->core_id,
delta);
return PARSEC_HOOK_RETURN_DONE;
}
#endif
int simple_gemm(parsec_context_t *parsec_context, parsec_matrix_block_cyclic_t *A, parsec_matrix_block_cyclic_t *B, parsec_matrix_block_cyclic_t *C)
{
parsec_taskpool_t *tp = parsec_dtd_taskpool_new();
parsec_data_key_t keyA, keyB, keyC;
int perr;
parsec_task_class_t *gemm_tc;
perr = parsec_context_start(parsec_context);
PARSEC_CHECK_ERROR(perr, "parsec_context_start");
// Registering the dtd_handle with PARSEC context
perr = parsec_context_add_taskpool(parsec_context, tp);
PARSEC_CHECK_ERROR(perr, "parsec_context_add_taskpool");
gemm_tc = parsec_dtd_create_task_class(tp, "GEMM",
PASSED_BY_REF, PARSEC_INPUT | TILE_FULL, /* A */
PASSED_BY_REF, PARSEC_INPUT | TILE_FULL, /* B */
PASSED_BY_REF, PARSEC_INOUT | TILE_FULL | PARSEC_AFFINITY, /* C */
sizeof(int), PARSEC_VALUE, /* m */
sizeof(int), PARSEC_VALUE, /* n */
sizeof(int), PARSEC_VALUE, /* k */
sizeof(int), PARSEC_VALUE, /* mb */
sizeof(int), PARSEC_VALUE, /* nb */
sizeof(int), PARSEC_VALUE, /* kb */
PARSEC_DTD_ARG_END);
parsec_dtd_task_class_add_chore(tp, gemm_tc, PARSEC_DEV_CUDA, gemm_kernel_cuda);
#if defined(HAVE_BLAS)
parsec_dtd_task_class_add_chore(tp, gemm_tc, PARSEC_DEV_CPU, gemm_kernel_cpu);
#endif
for( int i = 0; i < C->super.mt; i++ ) {
for( int j = 0; j < C->super.nt; j++ ) {
keyC = C->super.super.data_key(&C->super.super, i, j);
for( int k = 0; k < A->super.nt; k++ ) {
keyA = A->super.super.data_key(&A->super.super, i, k);
keyB = B->super.super.data_key(&B->super.super, k, j);
parsec_dtd_insert_task_with_task_class(tp, gemm_tc, C->super.mt*C->super.nt*A->super.nt - i*C->super.nt + j, device,
PARSEC_INPUT, PARSEC_DTD_TILE_OF_KEY(&A->super.super, keyA),
PARSEC_INPUT, PARSEC_DTD_TILE_OF_KEY(&B->super.super, keyB),
k == A->super.nt - 1 ? (PARSEC_INOUT | PARSEC_PUSHOUT) : PARSEC_INOUT,
PARSEC_DTD_TILE_OF_KEY(&C->super.super, keyC),
PARSEC_DTD_EMPTY_FLAG, &i,
PARSEC_DTD_EMPTY_FLAG, &j,
PARSEC_DTD_EMPTY_FLAG, &k,
PARSEC_DTD_EMPTY_FLAG, &C->super.mb,
PARSEC_DTD_EMPTY_FLAG, &C->super.nb,
PARSEC_DTD_EMPTY_FLAG, &B->super.mb,
PARSEC_DTD_ARG_END);
}
}
}
parsec_dtd_data_flush_all(tp, &A->super.super);
parsec_dtd_data_flush_all(tp, &B->super.super);
parsec_dtd_data_flush_all(tp, &C->super.super);
// Wait for task completion
perr = parsec_dtd_taskpool_wait(tp);
PARSEC_CHECK_ERROR(perr, "parsec_dtd_taskpool_wait");
perr = parsec_context_wait(parsec_context);
PARSEC_CHECK_ERROR(perr, "parsec_context_wait");
parsec_dtd_task_class_release(tp, gemm_tc);
parsec_taskpool_free(tp);
return 0;
}
int get_nb_gpu_devices()
{
int nb = 0;
for( int dev = 0; dev < (int)parsec_nb_devices; dev++ ) {
parsec_device_module_t *device = parsec_mca_device_get(dev);
if( PARSEC_DEV_CUDA == device->type ) {
nb++;
}
}
return nb;
}
int *get_gpu_device_index()
{
int *dev_index = NULL;
dev_index = (int *)malloc(parsec_nb_devices * sizeof(int));
int i = 0;
for( int dev = 0; dev < (int)parsec_nb_devices; dev++ ) {
parsec_device_module_t *device = parsec_mca_device_get(dev);
if( PARSEC_DEV_CUDA == device->type ) {
dev_index[i++] = device->device_index;
}
}
return dev_index;
}
static void destroy_cublas_handle(void *_h, void *_n)
{
#if defined(PARSEC_HAVE_CUDA)
cublasHandle_t cublas_handle = (cublasHandle_t)_h;
cublasDestroy_v2(cublas_handle);
#endif
(void)_n;
(void)_h;
}
static void *create_cublas_handle(void *obj, void *p)
{
#if defined(PARSEC_HAVE_CUDA)
cublasHandle_t handle;
cublasStatus_t status;
parsec_cuda_exec_stream_t *stream = (parsec_cuda_exec_stream_t *)obj;
(void)p;
/* No need to call cudaSetDevice, as this has been done by PaRSEC before calling the task body */
status = cublasCreate(&handle);
assert(CUBLAS_STATUS_SUCCESS == status);
status = cublasSetStream(handle, stream->cuda_stream);
assert(CUBLAS_STATUS_SUCCESS == status);
(void)status;
return (void *)handle;
#else
(void)obj;
(void)p;
return NULL;
#endif
}
static void destroy_one_on_device(void *_h, void *_n)
{
#if defined(PARSEC_HAVE_CUDA)
cudaFree(_h);
#endif
(void)_h;
(void)_n;
}
static void *allocate_one_on_device(void *obj, void *p)
{
(void)obj;
(void)p;
#if defined(PARSEC_HAVE_CUDA)
void *one_device;
double one_host = 1.0;
cudaError_t cr;
cr = cudaMallocManaged(&one_device, sizeof(double), cudaMemAttachGlobal);
PARSEC_CUDA_CHECK_ERROR("cudaMalloc ", cr,
{ return NULL; });
cr = cudaMemcpy(one_device, &one_host, sizeof(double), cudaMemcpyHostToDevice);
PARSEC_CUDA_CHECK_ERROR("cudaMemcpy ", cr,
{ return NULL; });
return one_device;
#else
return NULL;
#endif
}
static parsec_matrix_block_cyclic_t *create_initialize_matrix(parsec_context_t *parsec_context, int rank, unsigned int seed, const char *name, int mb, int nb, int M, int N, int *gpu_device_index, int nbgpus)
{
parsec_matrix_block_cyclic_t *dc;
dc = calloc(1, sizeof(parsec_matrix_block_cyclic_t));
parsec_matrix_block_cyclic_init(dc, PARSEC_MATRIX_DOUBLE, PARSEC_MATRIX_TILE, rank,
mb, nb,
M, N,
0, 0,
M, N,
P, Q,
1, 1,
0, 0);
parsec_data_collection_t *A = &dc->super.super;
parsec_data_collection_set_key(A, name);
dc->mat = parsec_data_allocate((size_t)dc->super.nb_local_tiles *
(size_t)dc->super.bsiz *
(size_t)parsec_datadist_getsizeoftype(dc->super.mtype));
parsec_dtd_data_collection_init(A);
initialize_matrix(parsec_context, rank, dc, seed, name, gpu_device_index, nbgpus);
return dc;
}
static void destroy_matrix(parsec_matrix_block_cyclic_t *dc)
{
parsec_data_collection_t *A = &dc->super.super;
parsec_dtd_data_collection_fini(A);
if( NULL != dc->mat ) {
parsec_data_free(dc->mat);
}
parsec_tiled_matrix_destroy_data(&dc->super);
parsec_data_collection_destroy(&dc->super.super);
free(dc);
}
int main(int argc, char **argv)
{
int ret = 0, rc, nbgpus = 0;
parsec_context_t *parsec_context = NULL;
int rank, world;
int mb = 1024, nb = 1024, kb = 1024;
int M = 16 * mb, N = 16 * nb, K = 16 * kb;
double min_perf=0.0;
int runs = 5;
int debug=-1;
#if defined(PARSEC_HAVE_MPI)
{
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &provided);
}
MPI_Comm_size(MPI_COMM_WORLD, &world);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#else
world = 1;
rank = 0;
#endif
while( 1 ) {
int option_index = 0;
static struct option long_options[] = {
{"M", required_argument, 0, 'M'},
{"N", required_argument, 0, 'N'},
{"K", required_argument, 0, 'K'},
{"mb", required_argument, 0, 'm'},
{"nb", required_argument, 0, 'n'},
{"kb", required_argument, 0, 'k'},
{"P", required_argument, 0, 'P'},
{"Q", required_argument, 0, 'Q'},
{"device", required_argument, 0, 'd'},
{"nruns", required_argument, 0, 't'},
{"verbose", no_argument, 0, 'v'},
{"Debug", required_argument, 0, 'D'},
{"Alarm", required_argument, 0, 'A'},
{"help", no_argument, 0, 'h'},
{0, 0, 0, 0}
};
int c = getopt_long(argc, argv, "M:N:K:m:n:k:P:Q:t:d:D:A:vh",
long_options, &option_index);
if( c == -1 )
break;
switch( c ) {
case 'M':
M = atoi(optarg);
break;
case 'N':
N = atoi(optarg);
break;
case 'K':
K = atoi(optarg);
break;
case 'm':
mb = atoi(optarg);
break;
case 'n':
nb = atoi(optarg);
break;
case 'k':
kb = atoi(optarg);
break;
case 'P':
P = atoi(optarg);
break;
case 'Q':
Q = atoi(optarg);
break;
case 't':
runs = atoi(optarg);
break;
case 'v':
verbose = !verbose;
break;
case 'd':
if(strcmp(optarg, "GPU") == 0) {
device=PARSEC_DEV_CUDA;
} else if(strcmp(optarg, "CPU") == 0) {
#if defined(HAVE_BLAS)
device=PARSEC_DEV_CPU;
#else
fprintf(stderr, "Error: requested to run on CPU (--device=CPU), but no BLAS library has been found at configure time\n");
exit(1);
#endif
} else {
fprintf(stderr, "Error: device parameter should either be 'GPU' or 'CPU' (got '%s')\n", optarg);
exit(1);
}
break;
case 'D':
debug = atoi(optarg);
break;
case 'A':
min_perf = strtod(optarg, NULL);
break;
case 'h':
case '?':
fprintf(stderr,
"Usage %s [flags] [-- <parsec options>]\n"
" Compute pdgemm on a process grid of PxQ, using all available GPUs on each\n"
" node (modulo parsec options), using DTD. Compute C += AxB, where A is MxK\n"
" tiled in mb x kb, B is KxN tiled in kb x nb, and C is MxN tiled in mb x nb\n"
" Executes nruns+1 executions of the GEMM operation, and display the last\n"
" nruns timing and performance.\n"
" flags:\n"
" --M|-M / --K|-K / --N|-N: set M, K and N (resp.)\n"
" --mb|-m / --kb/-k / --nb|-n: set mb, kb and nb (resp.)\n"
" --nruns|-t: set the number of runs to do\n"
" --device|-d: which device to use (CPU or GPU)\n"
" --verbose|-v: display which GEMM runs on which GPU\n"
" as execution is unfolding\n"
" --help|-h|-?: display this help\n"
" --debug|-D: blocks the process passed as parameter and\n"
" waits for gdb to connect to it\n"
" --Alarm|-A: sets the expected minimum performance for a\n"
" single GPU (kills the process if it takes longer\n"
" than the time corresponding to the expected\n"
" performance to complete the product)\n"
"\n",
argv[0]);
break;
}
}
int pargc = argc - optind + 1;
char **pargv = (char **)malloc((pargc + 1) * sizeof(char *));
pargv[0] = argv[0];
for( int i = 0; i < argc - optind; i++ )
pargv[i + 1] = argv[optind + i];
pargv[pargc] = NULL;
if( -1 == P )
P = (int)sqrt(world);
if( -1 == Q )
Q = world / P;
while( P * Q != world ) {
P--;
Q = world / P;
}
if(debug == rank) {
int loop=1;
char hostname[64];
gethostname(hostname, 64);
fprintf(stderr, "ssh -t %s gdb -p %d\n", hostname, getpid());
while(loop) { sleep(1); }
}
// Number of CPU cores involved
int ncores = -1; // Use all available cores
parsec_context = parsec_init(ncores, &pargc, &pargv);
int *gpu_device_index = NULL;
if( PARSEC_DEV_CUDA == device ) {
nbgpus = get_nb_gpu_devices();
rc = !(nbgpus >= 1);
if( rc != 0 ) {
fprintf(stderr, "Rank %d doesn't have CUDA accelerators\n", rank);
MPI_Abort(MPI_COMM_WORLD, 0);
return -1;
}
gpu_device_index = get_gpu_device_index();
// Prepare CUBLAS Handle marshaller
CuHI = parsec_info_register(&parsec_per_stream_infos, "CUBLAS::HANDLE",
destroy_cublas_handle, NULL,
create_cublas_handle, NULL,
NULL);
assert(CuHI != -1);
Cu1 = parsec_info_register(&parsec_per_device_infos, "DEVICE::ONE",
destroy_one_on_device, NULL,
allocate_one_on_device, NULL,
NULL);
assert(Cu1 != -1);
}
// Create datatypes
parsec_arena_datatype_t *adt = parsec_dtd_create_arena_datatype(parsec_context, &TILE_FULL);
parsec_add2arena_rect(adt, parsec_datatype_double_t, mb, nb, mb);
// Create and initialize the data
parsec_matrix_block_cyclic_t *dcA = create_initialize_matrix(parsec_context, rank, 1789, "A", mb, kb, M, K,
gpu_device_index, nbgpus);
parsec_matrix_block_cyclic_t *dcB = create_initialize_matrix(parsec_context, rank, 1805, "B", kb, nb, K, N,
gpu_device_index, nbgpus);
parsec_matrix_block_cyclic_t *dcC = create_initialize_matrix(parsec_context, rank, 1901, "C", mb, nb, M, N,
gpu_device_index, nbgpus);
for( int r = 0; r < runs + 1; r++ ) {
double gflop = 2.0 * M * N * K / 1e9;
double maxtime = 0.0;
if(min_perf > 0.0)
maxtime = gflop/world/nbgpus/min_perf;
struct timeval start, end, diff;
if(maxtime > 0.0 && maxtime < 60.0) maxtime=60.0;
if(rank == 0 && maxtime > 0.0) fprintf(stderr, "watchdog: %d seconds\n", (int)maxtime);
if(maxtime > 0.0) alarm((int)maxtime);
gettimeofday(&start, NULL);
simple_gemm(parsec_context, dcA, dcB, dcC);
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
double t = (double)diff.tv_sec + (double)diff.tv_usec / 1e6;
double gflops = gflop / t;
if( 0 == rank && r > 0 ) {
fprintf(stderr, "DTD_GEMM PxQxg: %d %d %d M: %d N: %d K: %d mb: %d nb: %d kb: %d Time(s): %g gflops: %10g\n",
P, Q, nbgpus, M, N, K, mb, nb, kb, t, gflops);
}
}
// deactivate the alarm if it was set
alarm(0);
if(PARSEC_DEV_CUDA == device) {
// Cleanup data and parsec data structures
parsec_info_unregister(&parsec_per_stream_infos, CuHI, NULL);
parsec_info_unregister(&parsec_per_device_infos, Cu1, NULL);
}
parsec_type_free(&adt->opaque_dtt);
PARSEC_OBJ_RELEASE(adt->arena);
parsec_dtd_destroy_arena_datatype(parsec_context, TILE_FULL);
destroy_matrix(dcA);
destroy_matrix(dcB);
destroy_matrix(dcC);
parsec_fini(&parsec_context);
#if defined(PARSEC_HAVE_MPI)
MPI_Finalize();
#endif
return ret;
}
|
454285.c | /* (C) 2013-2015, The Regents of The University of Michigan
All rights reserved.
This software may be available under alternative licensing
terms. Contact Edwin Olson, [email protected], for more information.
An unlimited license is granted to use, adapt, modify, or embed the 2D
barcodes into any medium.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
*/
#include <stdlib.h>
#include "apriltag.h"
apriltag_family_t *tag36artoolkit_create()
{
apriltag_family_t *tf = calloc(1, sizeof(apriltag_family_t));
tf->name = strdup("artoolkit");
tf->black_border = 1;
tf->d = 6;
tf->h = 7; // not sure.
tf->ncodes = 512;
tf->codes = calloc(512, sizeof(uint64_t));
tf->codes[0] = 0x0006dc269c27UL;
tf->codes[1] = 0x0006d4229e26UL;
tf->codes[2] = 0x0006cc2e9825UL;
tf->codes[3] = 0x0006c42a9a24UL;
tf->codes[4] = 0x0006fc369423UL;
tf->codes[5] = 0x0006f4329622UL;
tf->codes[6] = 0x0006ec3e9021UL;
tf->codes[7] = 0x0006e43a9220UL;
tf->codes[8] = 0x00069c068c2fUL;
tf->codes[9] = 0x000694028e2eUL;
tf->codes[10] = 0x00068c0e882dUL;
tf->codes[11] = 0x0006840a8a2cUL;
tf->codes[12] = 0x0006bc16842bUL;
tf->codes[13] = 0x0006b412862aUL;
tf->codes[14] = 0x0006ac1e8029UL;
tf->codes[15] = 0x0006a41a8228UL;
tf->codes[16] = 0x00065c66bc37UL;
tf->codes[17] = 0x00065462be36UL;
tf->codes[18] = 0x00064c6eb835UL;
tf->codes[19] = 0x0006446aba34UL;
tf->codes[20] = 0x00067c76b433UL;
tf->codes[21] = 0x00067472b632UL;
tf->codes[22] = 0x00066c7eb031UL;
tf->codes[23] = 0x0006647ab230UL;
tf->codes[24] = 0x00061c46ac3fUL;
tf->codes[25] = 0x00061442ae3eUL;
tf->codes[26] = 0x00060c4ea83dUL;
tf->codes[27] = 0x0006044aaa3cUL;
tf->codes[28] = 0x00063c56a43bUL;
tf->codes[29] = 0x00063452a63aUL;
tf->codes[30] = 0x00062c5ea039UL;
tf->codes[31] = 0x0006245aa238UL;
tf->codes[32] = 0x0007dca6dc07UL;
tf->codes[33] = 0x0007d4a2de06UL;
tf->codes[34] = 0x0007ccaed805UL;
tf->codes[35] = 0x0007c4aada04UL;
tf->codes[36] = 0x0007fcb6d403UL;
tf->codes[37] = 0x0007f4b2d602UL;
tf->codes[38] = 0x0007ecbed001UL;
tf->codes[39] = 0x0007e4bad200UL;
tf->codes[40] = 0x00079c86cc0fUL;
tf->codes[41] = 0x00079482ce0eUL;
tf->codes[42] = 0x00078c8ec80dUL;
tf->codes[43] = 0x0007848aca0cUL;
tf->codes[44] = 0x0007bc96c40bUL;
tf->codes[45] = 0x0007b492c60aUL;
tf->codes[46] = 0x0007ac9ec009UL;
tf->codes[47] = 0x0007a49ac208UL;
tf->codes[48] = 0x00075ce6fc17UL;
tf->codes[49] = 0x000754e2fe16UL;
tf->codes[50] = 0x00074ceef815UL;
tf->codes[51] = 0x000744eafa14UL;
tf->codes[52] = 0x00077cf6f413UL;
tf->codes[53] = 0x000774f2f612UL;
tf->codes[54] = 0x00076cfef011UL;
tf->codes[55] = 0x000764faf210UL;
tf->codes[56] = 0x00071cc6ec1fUL;
tf->codes[57] = 0x000714c2ee1eUL;
tf->codes[58] = 0x00070ccee81dUL;
tf->codes[59] = 0x000704caea1cUL;
tf->codes[60] = 0x00073cd6e41bUL;
tf->codes[61] = 0x000734d2e61aUL;
tf->codes[62] = 0x00072cdee019UL;
tf->codes[63] = 0x000724dae218UL;
tf->codes[64] = 0x0004dd261c67UL;
tf->codes[65] = 0x0004d5221e66UL;
tf->codes[66] = 0x0004cd2e1865UL;
tf->codes[67] = 0x0004c52a1a64UL;
tf->codes[68] = 0x0004fd361463UL;
tf->codes[69] = 0x0004f5321662UL;
tf->codes[70] = 0x0004ed3e1061UL;
tf->codes[71] = 0x0004e53a1260UL;
tf->codes[72] = 0x00049d060c6fUL;
tf->codes[73] = 0x000495020e6eUL;
tf->codes[74] = 0x00048d0e086dUL;
tf->codes[75] = 0x0004850a0a6cUL;
tf->codes[76] = 0x0004bd16046bUL;
tf->codes[77] = 0x0004b512066aUL;
tf->codes[78] = 0x0004ad1e0069UL;
tf->codes[79] = 0x0004a51a0268UL;
tf->codes[80] = 0x00045d663c77UL;
tf->codes[81] = 0x000455623e76UL;
tf->codes[82] = 0x00044d6e3875UL;
tf->codes[83] = 0x0004456a3a74UL;
tf->codes[84] = 0x00047d763473UL;
tf->codes[85] = 0x000475723672UL;
tf->codes[86] = 0x00046d7e3071UL;
tf->codes[87] = 0x0004657a3270UL;
tf->codes[88] = 0x00041d462c7fUL;
tf->codes[89] = 0x000415422e7eUL;
tf->codes[90] = 0x00040d4e287dUL;
tf->codes[91] = 0x0004054a2a7cUL;
tf->codes[92] = 0x00043d56247bUL;
tf->codes[93] = 0x00043552267aUL;
tf->codes[94] = 0x00042d5e2079UL;
tf->codes[95] = 0x0004255a2278UL;
tf->codes[96] = 0x0005dda65c47UL;
tf->codes[97] = 0x0005d5a25e46UL;
tf->codes[98] = 0x0005cdae5845UL;
tf->codes[99] = 0x0005c5aa5a44UL;
tf->codes[100] = 0x0005fdb65443UL;
tf->codes[101] = 0x0005f5b25642UL;
tf->codes[102] = 0x0005edbe5041UL;
tf->codes[103] = 0x0005e5ba5240UL;
tf->codes[104] = 0x00059d864c4fUL;
tf->codes[105] = 0x000595824e4eUL;
tf->codes[106] = 0x00058d8e484dUL;
tf->codes[107] = 0x0005858a4a4cUL;
tf->codes[108] = 0x0005bd96444bUL;
tf->codes[109] = 0x0005b592464aUL;
tf->codes[110] = 0x0005ad9e4049UL;
tf->codes[111] = 0x0005a59a4248UL;
tf->codes[112] = 0x00055de67c57UL;
tf->codes[113] = 0x000555e27e56UL;
tf->codes[114] = 0x00054dee7855UL;
tf->codes[115] = 0x000545ea7a54UL;
tf->codes[116] = 0x00057df67453UL;
tf->codes[117] = 0x000575f27652UL;
tf->codes[118] = 0x00056dfe7051UL;
tf->codes[119] = 0x000565fa7250UL;
tf->codes[120] = 0x00051dc66c5fUL;
tf->codes[121] = 0x000515c26e5eUL;
tf->codes[122] = 0x00050dce685dUL;
tf->codes[123] = 0x000505ca6a5cUL;
tf->codes[124] = 0x00053dd6645bUL;
tf->codes[125] = 0x000535d2665aUL;
tf->codes[126] = 0x00052dde6059UL;
tf->codes[127] = 0x000525da6258UL;
tf->codes[128] = 0x0002de279ca7UL;
tf->codes[129] = 0x0002d6239ea6UL;
tf->codes[130] = 0x0002ce2f98a5UL;
tf->codes[131] = 0x0002c62b9aa4UL;
tf->codes[132] = 0x0002fe3794a3UL;
tf->codes[133] = 0x0002f63396a2UL;
tf->codes[134] = 0x0002ee3f90a1UL;
tf->codes[135] = 0x0002e63b92a0UL;
tf->codes[136] = 0x00029e078cafUL;
tf->codes[137] = 0x000296038eaeUL;
tf->codes[138] = 0x00028e0f88adUL;
tf->codes[139] = 0x0002860b8aacUL;
tf->codes[140] = 0x0002be1784abUL;
tf->codes[141] = 0x0002b61386aaUL;
tf->codes[142] = 0x0002ae1f80a9UL;
tf->codes[143] = 0x0002a61b82a8UL;
tf->codes[144] = 0x00025e67bcb7UL;
tf->codes[145] = 0x00025663beb6UL;
tf->codes[146] = 0x00024e6fb8b5UL;
tf->codes[147] = 0x0002466bbab4UL;
tf->codes[148] = 0x00027e77b4b3UL;
tf->codes[149] = 0x00027673b6b2UL;
tf->codes[150] = 0x00026e7fb0b1UL;
tf->codes[151] = 0x0002667bb2b0UL;
tf->codes[152] = 0x00021e47acbfUL;
tf->codes[153] = 0x00021643aebeUL;
tf->codes[154] = 0x00020e4fa8bdUL;
tf->codes[155] = 0x0002064baabcUL;
tf->codes[156] = 0x00023e57a4bbUL;
tf->codes[157] = 0x00023653a6baUL;
tf->codes[158] = 0x00022e5fa0b9UL;
tf->codes[159] = 0x0002265ba2b8UL;
tf->codes[160] = 0x0003dea7dc87UL;
tf->codes[161] = 0x0003d6a3de86UL;
tf->codes[162] = 0x0003ceafd885UL;
tf->codes[163] = 0x0003c6abda84UL;
tf->codes[164] = 0x0003feb7d483UL;
tf->codes[165] = 0x0003f6b3d682UL;
tf->codes[166] = 0x0003eebfd081UL;
tf->codes[167] = 0x0003e6bbd280UL;
tf->codes[168] = 0x00039e87cc8fUL;
tf->codes[169] = 0x00039683ce8eUL;
tf->codes[170] = 0x00038e8fc88dUL;
tf->codes[171] = 0x0003868bca8cUL;
tf->codes[172] = 0x0003be97c48bUL;
tf->codes[173] = 0x0003b693c68aUL;
tf->codes[174] = 0x0003ae9fc089UL;
tf->codes[175] = 0x0003a69bc288UL;
tf->codes[176] = 0x00035ee7fc97UL;
tf->codes[177] = 0x000356e3fe96UL;
tf->codes[178] = 0x00034eeff895UL;
tf->codes[179] = 0x000346ebfa94UL;
tf->codes[180] = 0x00037ef7f493UL;
tf->codes[181] = 0x000376f3f692UL;
tf->codes[182] = 0x00036efff091UL;
tf->codes[183] = 0x000366fbf290UL;
tf->codes[184] = 0x00031ec7ec9fUL;
tf->codes[185] = 0x000316c3ee9eUL;
tf->codes[186] = 0x00030ecfe89dUL;
tf->codes[187] = 0x000306cbea9cUL;
tf->codes[188] = 0x00033ed7e49bUL;
tf->codes[189] = 0x000336d3e69aUL;
tf->codes[190] = 0x00032edfe099UL;
tf->codes[191] = 0x000326dbe298UL;
tf->codes[192] = 0x0000df271ce7UL;
tf->codes[193] = 0x0000d7231ee6UL;
tf->codes[194] = 0x0000cf2f18e5UL;
tf->codes[195] = 0x0000c72b1ae4UL;
tf->codes[196] = 0x0000ff3714e3UL;
tf->codes[197] = 0x0000f73316e2UL;
tf->codes[198] = 0x0000ef3f10e1UL;
tf->codes[199] = 0x0000e73b12e0UL;
tf->codes[200] = 0x00009f070cefUL;
tf->codes[201] = 0x000097030eeeUL;
tf->codes[202] = 0x00008f0f08edUL;
tf->codes[203] = 0x0000870b0aecUL;
tf->codes[204] = 0x0000bf1704ebUL;
tf->codes[205] = 0x0000b71306eaUL;
tf->codes[206] = 0x0000af1f00e9UL;
tf->codes[207] = 0x0000a71b02e8UL;
tf->codes[208] = 0x00005f673cf7UL;
tf->codes[209] = 0x000057633ef6UL;
tf->codes[210] = 0x00004f6f38f5UL;
tf->codes[211] = 0x0000476b3af4UL;
tf->codes[212] = 0x00007f7734f3UL;
tf->codes[213] = 0x0000777336f2UL;
tf->codes[214] = 0x00006f7f30f1UL;
tf->codes[215] = 0x0000677b32f0UL;
tf->codes[216] = 0x00001f472cffUL;
tf->codes[217] = 0x000017432efeUL;
tf->codes[218] = 0x00000f4f28fdUL;
tf->codes[219] = 0x0000074b2afcUL;
tf->codes[220] = 0x00003f5724fbUL;
tf->codes[221] = 0x0000375326faUL;
tf->codes[222] = 0x00002f5f20f9UL;
tf->codes[223] = 0x0000275b22f8UL;
tf->codes[224] = 0x0001dfa75cc7UL;
tf->codes[225] = 0x0001d7a35ec6UL;
tf->codes[226] = 0x0001cfaf58c5UL;
tf->codes[227] = 0x0001c7ab5ac4UL;
tf->codes[228] = 0x0001ffb754c3UL;
tf->codes[229] = 0x0001f7b356c2UL;
tf->codes[230] = 0x0001efbf50c1UL;
tf->codes[231] = 0x0001e7bb52c0UL;
tf->codes[232] = 0x00019f874ccfUL;
tf->codes[233] = 0x000197834eceUL;
tf->codes[234] = 0x00018f8f48cdUL;
tf->codes[235] = 0x0001878b4accUL;
tf->codes[236] = 0x0001bf9744cbUL;
tf->codes[237] = 0x0001b79346caUL;
tf->codes[238] = 0x0001af9f40c9UL;
tf->codes[239] = 0x0001a79b42c8UL;
tf->codes[240] = 0x00015fe77cd7UL;
tf->codes[241] = 0x000157e37ed6UL;
tf->codes[242] = 0x00014fef78d5UL;
tf->codes[243] = 0x000147eb7ad4UL;
tf->codes[244] = 0x00017ff774d3UL;
tf->codes[245] = 0x000177f376d2UL;
tf->codes[246] = 0x00016fff70d1UL;
tf->codes[247] = 0x000167fb72d0UL;
tf->codes[248] = 0x00011fc76cdfUL;
tf->codes[249] = 0x000117c36edeUL;
tf->codes[250] = 0x00010fcf68ddUL;
tf->codes[251] = 0x000107cb6adcUL;
tf->codes[252] = 0x00013fd764dbUL;
tf->codes[253] = 0x000137d366daUL;
tf->codes[254] = 0x00012fdf60d9UL;
tf->codes[255] = 0x000127db62d8UL;
tf->codes[256] = 0x000ed8249d27UL;
tf->codes[257] = 0x000ed0209f26UL;
tf->codes[258] = 0x000ec82c9925UL;
tf->codes[259] = 0x000ec0289b24UL;
tf->codes[260] = 0x000ef8349523UL;
tf->codes[261] = 0x000ef0309722UL;
tf->codes[262] = 0x000ee83c9121UL;
tf->codes[263] = 0x000ee0389320UL;
tf->codes[264] = 0x000e98048d2fUL;
tf->codes[265] = 0x000e90008f2eUL;
tf->codes[266] = 0x000e880c892dUL;
tf->codes[267] = 0x000e80088b2cUL;
tf->codes[268] = 0x000eb814852bUL;
tf->codes[269] = 0x000eb010872aUL;
tf->codes[270] = 0x000ea81c8129UL;
tf->codes[271] = 0x000ea0188328UL;
tf->codes[272] = 0x000e5864bd37UL;
tf->codes[273] = 0x000e5060bf36UL;
tf->codes[274] = 0x000e486cb935UL;
tf->codes[275] = 0x000e4068bb34UL;
tf->codes[276] = 0x000e7874b533UL;
tf->codes[277] = 0x000e7070b732UL;
tf->codes[278] = 0x000e687cb131UL;
tf->codes[279] = 0x000e6078b330UL;
tf->codes[280] = 0x000e1844ad3fUL;
tf->codes[281] = 0x000e1040af3eUL;
tf->codes[282] = 0x000e084ca93dUL;
tf->codes[283] = 0x000e0048ab3cUL;
tf->codes[284] = 0x000e3854a53bUL;
tf->codes[285] = 0x000e3050a73aUL;
tf->codes[286] = 0x000e285ca139UL;
tf->codes[287] = 0x000e2058a338UL;
tf->codes[288] = 0x000fd8a4dd07UL;
tf->codes[289] = 0x000fd0a0df06UL;
tf->codes[290] = 0x000fc8acd905UL;
tf->codes[291] = 0x000fc0a8db04UL;
tf->codes[292] = 0x000ff8b4d503UL;
tf->codes[293] = 0x000ff0b0d702UL;
tf->codes[294] = 0x000fe8bcd101UL;
tf->codes[295] = 0x000fe0b8d300UL;
tf->codes[296] = 0x000f9884cd0fUL;
tf->codes[297] = 0x000f9080cf0eUL;
tf->codes[298] = 0x000f888cc90dUL;
tf->codes[299] = 0x000f8088cb0cUL;
tf->codes[300] = 0x000fb894c50bUL;
tf->codes[301] = 0x000fb090c70aUL;
tf->codes[302] = 0x000fa89cc109UL;
tf->codes[303] = 0x000fa098c308UL;
tf->codes[304] = 0x000f58e4fd17UL;
tf->codes[305] = 0x000f50e0ff16UL;
tf->codes[306] = 0x000f48ecf915UL;
tf->codes[307] = 0x000f40e8fb14UL;
tf->codes[308] = 0x000f78f4f513UL;
tf->codes[309] = 0x000f70f0f712UL;
tf->codes[310] = 0x000f68fcf111UL;
tf->codes[311] = 0x000f60f8f310UL;
tf->codes[312] = 0x000f18c4ed1fUL;
tf->codes[313] = 0x000f10c0ef1eUL;
tf->codes[314] = 0x000f08cce91dUL;
tf->codes[315] = 0x000f00c8eb1cUL;
tf->codes[316] = 0x000f38d4e51bUL;
tf->codes[317] = 0x000f30d0e71aUL;
tf->codes[318] = 0x000f28dce119UL;
tf->codes[319] = 0x000f20d8e318UL;
tf->codes[320] = 0x000cd9241d67UL;
tf->codes[321] = 0x000cd1201f66UL;
tf->codes[322] = 0x000cc92c1965UL;
tf->codes[323] = 0x000cc1281b64UL;
tf->codes[324] = 0x000cf9341563UL;
tf->codes[325] = 0x000cf1301762UL;
tf->codes[326] = 0x000ce93c1161UL;
tf->codes[327] = 0x000ce1381360UL;
tf->codes[328] = 0x000c99040d6fUL;
tf->codes[329] = 0x000c91000f6eUL;
tf->codes[330] = 0x000c890c096dUL;
tf->codes[331] = 0x000c81080b6cUL;
tf->codes[332] = 0x000cb914056bUL;
tf->codes[333] = 0x000cb110076aUL;
tf->codes[334] = 0x000ca91c0169UL;
tf->codes[335] = 0x000ca1180368UL;
tf->codes[336] = 0x000c59643d77UL;
tf->codes[337] = 0x000c51603f76UL;
tf->codes[338] = 0x000c496c3975UL;
tf->codes[339] = 0x000c41683b74UL;
tf->codes[340] = 0x000c79743573UL;
tf->codes[341] = 0x000c71703772UL;
tf->codes[342] = 0x000c697c3171UL;
tf->codes[343] = 0x000c61783370UL;
tf->codes[344] = 0x000c19442d7fUL;
tf->codes[345] = 0x000c11402f7eUL;
tf->codes[346] = 0x000c094c297dUL;
tf->codes[347] = 0x000c01482b7cUL;
tf->codes[348] = 0x000c3954257bUL;
tf->codes[349] = 0x000c3150277aUL;
tf->codes[350] = 0x000c295c2179UL;
tf->codes[351] = 0x000c21582378UL;
tf->codes[352] = 0x000dd9a45d47UL;
tf->codes[353] = 0x000dd1a05f46UL;
tf->codes[354] = 0x000dc9ac5945UL;
tf->codes[355] = 0x000dc1a85b44UL;
tf->codes[356] = 0x000df9b45543UL;
tf->codes[357] = 0x000df1b05742UL;
tf->codes[358] = 0x000de9bc5141UL;
tf->codes[359] = 0x000de1b85340UL;
tf->codes[360] = 0x000d99844d4fUL;
tf->codes[361] = 0x000d91804f4eUL;
tf->codes[362] = 0x000d898c494dUL;
tf->codes[363] = 0x000d81884b4cUL;
tf->codes[364] = 0x000db994454bUL;
tf->codes[365] = 0x000db190474aUL;
tf->codes[366] = 0x000da99c4149UL;
tf->codes[367] = 0x000da1984348UL;
tf->codes[368] = 0x000d59e47d57UL;
tf->codes[369] = 0x000d51e07f56UL;
tf->codes[370] = 0x000d49ec7955UL;
tf->codes[371] = 0x000d41e87b54UL;
tf->codes[372] = 0x000d79f47553UL;
tf->codes[373] = 0x000d71f07752UL;
tf->codes[374] = 0x000d69fc7151UL;
tf->codes[375] = 0x000d61f87350UL;
tf->codes[376] = 0x000d19c46d5fUL;
tf->codes[377] = 0x000d11c06f5eUL;
tf->codes[378] = 0x000d09cc695dUL;
tf->codes[379] = 0x000d01c86b5cUL;
tf->codes[380] = 0x000d39d4655bUL;
tf->codes[381] = 0x000d31d0675aUL;
tf->codes[382] = 0x000d29dc6159UL;
tf->codes[383] = 0x000d21d86358UL;
tf->codes[384] = 0x000ada259da7UL;
tf->codes[385] = 0x000ad2219fa6UL;
tf->codes[386] = 0x000aca2d99a5UL;
tf->codes[387] = 0x000ac2299ba4UL;
tf->codes[388] = 0x000afa3595a3UL;
tf->codes[389] = 0x000af23197a2UL;
tf->codes[390] = 0x000aea3d91a1UL;
tf->codes[391] = 0x000ae23993a0UL;
tf->codes[392] = 0x000a9a058dafUL;
tf->codes[393] = 0x000a92018faeUL;
tf->codes[394] = 0x000a8a0d89adUL;
tf->codes[395] = 0x000a82098bacUL;
tf->codes[396] = 0x000aba1585abUL;
tf->codes[397] = 0x000ab21187aaUL;
tf->codes[398] = 0x000aaa1d81a9UL;
tf->codes[399] = 0x000aa21983a8UL;
tf->codes[400] = 0x000a5a65bdb7UL;
tf->codes[401] = 0x000a5261bfb6UL;
tf->codes[402] = 0x000a4a6db9b5UL;
tf->codes[403] = 0x000a4269bbb4UL;
tf->codes[404] = 0x000a7a75b5b3UL;
tf->codes[405] = 0x000a7271b7b2UL;
tf->codes[406] = 0x000a6a7db1b1UL;
tf->codes[407] = 0x000a6279b3b0UL;
tf->codes[408] = 0x000a1a45adbfUL;
tf->codes[409] = 0x000a1241afbeUL;
tf->codes[410] = 0x000a0a4da9bdUL;
tf->codes[411] = 0x000a0249abbcUL;
tf->codes[412] = 0x000a3a55a5bbUL;
tf->codes[413] = 0x000a3251a7baUL;
tf->codes[414] = 0x000a2a5da1b9UL;
tf->codes[415] = 0x000a2259a3b8UL;
tf->codes[416] = 0x000bdaa5dd87UL;
tf->codes[417] = 0x000bd2a1df86UL;
tf->codes[418] = 0x000bcaadd985UL;
tf->codes[419] = 0x000bc2a9db84UL;
tf->codes[420] = 0x000bfab5d583UL;
tf->codes[421] = 0x000bf2b1d782UL;
tf->codes[422] = 0x000beabdd181UL;
tf->codes[423] = 0x000be2b9d380UL;
tf->codes[424] = 0x000b9a85cd8fUL;
tf->codes[425] = 0x000b9281cf8eUL;
tf->codes[426] = 0x000b8a8dc98dUL;
tf->codes[427] = 0x000b8289cb8cUL;
tf->codes[428] = 0x000bba95c58bUL;
tf->codes[429] = 0x000bb291c78aUL;
tf->codes[430] = 0x000baa9dc189UL;
tf->codes[431] = 0x000ba299c388UL;
tf->codes[432] = 0x000b5ae5fd97UL;
tf->codes[433] = 0x000b52e1ff96UL;
tf->codes[434] = 0x000b4aedf995UL;
tf->codes[435] = 0x000b42e9fb94UL;
tf->codes[436] = 0x000b7af5f593UL;
tf->codes[437] = 0x000b72f1f792UL;
tf->codes[438] = 0x000b6afdf191UL;
tf->codes[439] = 0x000b62f9f390UL;
tf->codes[440] = 0x000b1ac5ed9fUL;
tf->codes[441] = 0x000b12c1ef9eUL;
tf->codes[442] = 0x000b0acde99dUL;
tf->codes[443] = 0x000b02c9eb9cUL;
tf->codes[444] = 0x000b3ad5e59bUL;
tf->codes[445] = 0x000b32d1e79aUL;
tf->codes[446] = 0x000b2adde199UL;
tf->codes[447] = 0x000b22d9e398UL;
tf->codes[448] = 0x0008db251de7UL;
tf->codes[449] = 0x0008d3211fe6UL;
tf->codes[450] = 0x0008cb2d19e5UL;
tf->codes[451] = 0x0008c3291be4UL;
tf->codes[452] = 0x0008fb3515e3UL;
tf->codes[453] = 0x0008f33117e2UL;
tf->codes[454] = 0x0008eb3d11e1UL;
tf->codes[455] = 0x0008e33913e0UL;
tf->codes[456] = 0x00089b050defUL;
tf->codes[457] = 0x000893010feeUL;
tf->codes[458] = 0x00088b0d09edUL;
tf->codes[459] = 0x000883090becUL;
tf->codes[460] = 0x0008bb1505ebUL;
tf->codes[461] = 0x0008b31107eaUL;
tf->codes[462] = 0x0008ab1d01e9UL;
tf->codes[463] = 0x0008a31903e8UL;
tf->codes[464] = 0x00085b653df7UL;
tf->codes[465] = 0x000853613ff6UL;
tf->codes[466] = 0x00084b6d39f5UL;
tf->codes[467] = 0x000843693bf4UL;
tf->codes[468] = 0x00087b7535f3UL;
tf->codes[469] = 0x0008737137f2UL;
tf->codes[470] = 0x00086b7d31f1UL;
tf->codes[471] = 0x0008637933f0UL;
tf->codes[472] = 0x00081b452dffUL;
tf->codes[473] = 0x000813412ffeUL;
tf->codes[474] = 0x00080b4d29fdUL;
tf->codes[475] = 0x000803492bfcUL;
tf->codes[476] = 0x00083b5525fbUL;
tf->codes[477] = 0x0008335127faUL;
tf->codes[478] = 0x00082b5d21f9UL;
tf->codes[479] = 0x0008235923f8UL;
tf->codes[480] = 0x0009dba55dc7UL;
tf->codes[481] = 0x0009d3a15fc6UL;
tf->codes[482] = 0x0009cbad59c5UL;
tf->codes[483] = 0x0009c3a95bc4UL;
tf->codes[484] = 0x0009fbb555c3UL;
tf->codes[485] = 0x0009f3b157c2UL;
tf->codes[486] = 0x0009ebbd51c1UL;
tf->codes[487] = 0x0009e3b953c0UL;
tf->codes[488] = 0x00099b854dcfUL;
tf->codes[489] = 0x000993814fceUL;
tf->codes[490] = 0x00098b8d49cdUL;
tf->codes[491] = 0x000983894bccUL;
tf->codes[492] = 0x0009bb9545cbUL;
tf->codes[493] = 0x0009b39147caUL;
tf->codes[494] = 0x0009ab9d41c9UL;
tf->codes[495] = 0x0009a39943c8UL;
tf->codes[496] = 0x00095be57dd7UL;
tf->codes[497] = 0x000953e17fd6UL;
tf->codes[498] = 0x00094bed79d5UL;
tf->codes[499] = 0x000943e97bd4UL;
tf->codes[500] = 0x00097bf575d3UL;
tf->codes[501] = 0x000973f177d2UL;
tf->codes[502] = 0x00096bfd71d1UL;
tf->codes[503] = 0x000963f973d0UL;
tf->codes[504] = 0x00091bc56ddfUL;
tf->codes[505] = 0x000913c16fdeUL;
tf->codes[506] = 0x00090bcd69ddUL;
tf->codes[507] = 0x000903c96bdcUL;
tf->codes[508] = 0x00093bd565dbUL;
tf->codes[509] = 0x000933d167daUL;
tf->codes[510] = 0x00092bdd61d9UL;
tf->codes[511] = 0x000923d963d8UL;
return tf;
}
void tag36artoolkit_destroy(apriltag_family_t *tf)
{
free(tf->name);
free(tf->codes);
free(tf);
}
|
155950.c | /*
* Copyright 2011 INRIA Saclay
* Copyright 2011 Sven Verdoolaege
* Copyright 2012-2014 Ecole Normale Superieure
* Copyright 2014 INRIA Rocquencourt
*
* Use of this software is governed by the MIT license
*
* Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
* Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
* 91893 Orsay, France
* and Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
* and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
* B.P. 105 - 78153 Le Chesnay, France
*/
#include <isl_ctx_private.h>
#define ISL_DIM_H
#include <isl_map_private.h>
#include <isl_union_map_private.h>
#include <isl_aff_private.h>
#include <isl_space_private.h>
#include <isl_local_space_private.h>
#include <isl_vec_private.h>
#include <isl_mat_private.h>
#include <isl/constraint.h>
#include <isl_seq.h>
#include <isl/set.h>
#include <isl_val_private.h>
#include <isl/deprecated/aff_int.h>
#include <isl_config.h>
#undef BASE
#define BASE aff
#include <isl_list_templ.c>
#undef BASE
#define BASE pw_aff
#include <isl_list_templ.c>
#undef BASE
#define BASE union_pw_aff
#include <isl_list_templ.c>
#undef BASE
#define BASE union_pw_multi_aff
#include <isl_list_templ.c>
__isl_give isl_aff *isl_aff_alloc_vec(__isl_take isl_local_space *ls,
__isl_take isl_vec *v)
{
isl_aff *aff;
if (!ls || !v)
goto error;
aff = isl_calloc_type(v->ctx, struct isl_aff);
if (!aff)
goto error;
aff->ref = 1;
aff->ls = ls;
aff->v = v;
return aff;
error:
isl_local_space_free(ls);
isl_vec_free(v);
return NULL;
}
__isl_give isl_aff *isl_aff_alloc(__isl_take isl_local_space *ls)
{
isl_ctx *ctx;
isl_vec *v;
unsigned total;
if (!ls)
return NULL;
ctx = isl_local_space_get_ctx(ls);
if (!isl_local_space_divs_known(ls))
isl_die(ctx, isl_error_invalid, "local space has unknown divs",
goto error);
if (!isl_local_space_is_set(ls))
isl_die(ctx, isl_error_invalid,
"domain of affine expression should be a set",
goto error);
total = isl_local_space_dim(ls, isl_dim_all);
v = isl_vec_alloc(ctx, 1 + 1 + total);
return isl_aff_alloc_vec(ls, v);
error:
isl_local_space_free(ls);
return NULL;
}
__isl_give isl_aff *isl_aff_zero_on_domain(__isl_take isl_local_space *ls)
{
isl_aff *aff;
aff = isl_aff_alloc(ls);
if (!aff)
return NULL;
isl_int_set_si(aff->v->el[0], 1);
isl_seq_clr(aff->v->el + 1, aff->v->size - 1);
return aff;
}
/* Return a piecewise affine expression defined on the specified domain
* that is equal to zero.
*/
__isl_give isl_pw_aff *isl_pw_aff_zero_on_domain(__isl_take isl_local_space *ls)
{
return isl_pw_aff_from_aff(isl_aff_zero_on_domain(ls));
}
/* Return an affine expression defined on the specified domain
* that represents NaN.
*/
__isl_give isl_aff *isl_aff_nan_on_domain(__isl_take isl_local_space *ls)
{
isl_aff *aff;
aff = isl_aff_alloc(ls);
if (!aff)
return NULL;
isl_seq_clr(aff->v->el, aff->v->size);
return aff;
}
/* Return a piecewise affine expression defined on the specified domain
* that represents NaN.
*/
__isl_give isl_pw_aff *isl_pw_aff_nan_on_domain(__isl_take isl_local_space *ls)
{
return isl_pw_aff_from_aff(isl_aff_nan_on_domain(ls));
}
/* Return an affine expression that is equal to "val" on
* domain local space "ls".
*/
__isl_give isl_aff *isl_aff_val_on_domain(__isl_take isl_local_space *ls,
__isl_take isl_val *val)
{
isl_aff *aff;
if (!ls || !val)
goto error;
if (!isl_val_is_rat(val))
isl_die(isl_val_get_ctx(val), isl_error_invalid,
"expecting rational value", goto error);
aff = isl_aff_alloc(isl_local_space_copy(ls));
if (!aff)
goto error;
isl_seq_clr(aff->v->el + 2, aff->v->size - 2);
isl_int_set(aff->v->el[1], val->n);
isl_int_set(aff->v->el[0], val->d);
isl_local_space_free(ls);
isl_val_free(val);
return aff;
error:
isl_local_space_free(ls);
isl_val_free(val);
return NULL;
}
/* Return an affine expression that is equal to the specified dimension
* in "ls".
*/
__isl_give isl_aff *isl_aff_var_on_domain(__isl_take isl_local_space *ls,
enum isl_dim_type type, unsigned pos)
{
isl_space *space;
isl_aff *aff;
if (!ls)
return NULL;
space = isl_local_space_get_space(ls);
if (!space)
goto error;
if (isl_space_is_map(space))
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"expecting (parameter) set space", goto error);
if (pos >= isl_local_space_dim(ls, type))
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"position out of bounds", goto error);
isl_space_free(space);
aff = isl_aff_alloc(ls);
if (!aff)
return NULL;
pos += isl_local_space_offset(aff->ls, type);
isl_int_set_si(aff->v->el[0], 1);
isl_seq_clr(aff->v->el + 1, aff->v->size - 1);
isl_int_set_si(aff->v->el[1 + pos], 1);
return aff;
error:
isl_local_space_free(ls);
isl_space_free(space);
return NULL;
}
/* Return a piecewise affine expression that is equal to
* the specified dimension in "ls".
*/
__isl_give isl_pw_aff *isl_pw_aff_var_on_domain(__isl_take isl_local_space *ls,
enum isl_dim_type type, unsigned pos)
{
return isl_pw_aff_from_aff(isl_aff_var_on_domain(ls, type, pos));
}
__isl_give isl_aff *isl_aff_copy(__isl_keep isl_aff *aff)
{
if (!aff)
return NULL;
aff->ref++;
return aff;
}
__isl_give isl_aff *isl_aff_dup(__isl_keep isl_aff *aff)
{
if (!aff)
return NULL;
return isl_aff_alloc_vec(isl_local_space_copy(aff->ls),
isl_vec_copy(aff->v));
}
__isl_give isl_aff *isl_aff_cow(__isl_take isl_aff *aff)
{
if (!aff)
return NULL;
if (aff->ref == 1)
return aff;
aff->ref--;
return isl_aff_dup(aff);
}
__isl_null isl_aff *isl_aff_free(__isl_take isl_aff *aff)
{
if (!aff)
return NULL;
if (--aff->ref > 0)
return NULL;
isl_local_space_free(aff->ls);
isl_vec_free(aff->v);
free(aff);
return NULL;
}
isl_ctx *isl_aff_get_ctx(__isl_keep isl_aff *aff)
{
return aff ? isl_local_space_get_ctx(aff->ls) : NULL;
}
/* Return a hash value that digests "aff".
*/
uint32_t isl_aff_get_hash(__isl_keep isl_aff *aff)
{
uint32_t hash, ls_hash, v_hash;
if (!aff)
return 0;
hash = isl_hash_init();
ls_hash = isl_local_space_get_hash(aff->ls);
isl_hash_hash(hash, ls_hash);
v_hash = isl_vec_get_hash(aff->v);
isl_hash_hash(hash, v_hash);
return hash;
}
/* Externally, an isl_aff has a map space, but internally, the
* ls field corresponds to the domain of that space.
*/
int isl_aff_dim(__isl_keep isl_aff *aff, enum isl_dim_type type)
{
if (!aff)
return 0;
if (type == isl_dim_out)
return 1;
if (type == isl_dim_in)
type = isl_dim_set;
return isl_local_space_dim(aff->ls, type);
}
/* Return the position of the dimension of the given type and name
* in "aff".
* Return -1 if no such dimension can be found.
*/
int isl_aff_find_dim_by_name(__isl_keep isl_aff *aff, enum isl_dim_type type,
const char *name)
{
if (!aff)
return -1;
if (type == isl_dim_out)
return -1;
if (type == isl_dim_in)
type = isl_dim_set;
return isl_local_space_find_dim_by_name(aff->ls, type, name);
}
__isl_give isl_space *isl_aff_get_domain_space(__isl_keep isl_aff *aff)
{
return aff ? isl_local_space_get_space(aff->ls) : NULL;
}
__isl_give isl_space *isl_aff_get_space(__isl_keep isl_aff *aff)
{
isl_space *space;
if (!aff)
return NULL;
space = isl_local_space_get_space(aff->ls);
space = isl_space_from_domain(space);
space = isl_space_add_dims(space, isl_dim_out, 1);
return space;
}
__isl_give isl_local_space *isl_aff_get_domain_local_space(
__isl_keep isl_aff *aff)
{
return aff ? isl_local_space_copy(aff->ls) : NULL;
}
__isl_give isl_local_space *isl_aff_get_local_space(__isl_keep isl_aff *aff)
{
isl_local_space *ls;
if (!aff)
return NULL;
ls = isl_local_space_copy(aff->ls);
ls = isl_local_space_from_domain(ls);
ls = isl_local_space_add_dims(ls, isl_dim_out, 1);
return ls;
}
/* Externally, an isl_aff has a map space, but internally, the
* ls field corresponds to the domain of that space.
*/
const char *isl_aff_get_dim_name(__isl_keep isl_aff *aff,
enum isl_dim_type type, unsigned pos)
{
if (!aff)
return NULL;
if (type == isl_dim_out)
return NULL;
if (type == isl_dim_in)
type = isl_dim_set;
return isl_local_space_get_dim_name(aff->ls, type, pos);
}
__isl_give isl_aff *isl_aff_reset_domain_space(__isl_take isl_aff *aff,
__isl_take isl_space *dim)
{
aff = isl_aff_cow(aff);
if (!aff || !dim)
goto error;
aff->ls = isl_local_space_reset_space(aff->ls, dim);
if (!aff->ls)
return isl_aff_free(aff);
return aff;
error:
isl_aff_free(aff);
isl_space_free(dim);
return NULL;
}
/* Reset the space of "aff". This function is called from isl_pw_templ.c
* and doesn't know if the space of an element object is represented
* directly or through its domain. It therefore passes along both.
*/
__isl_give isl_aff *isl_aff_reset_space_and_domain(__isl_take isl_aff *aff,
__isl_take isl_space *space, __isl_take isl_space *domain)
{
isl_space_free(space);
return isl_aff_reset_domain_space(aff, domain);
}
/* Reorder the coefficients of the affine expression based
* on the given reodering.
* The reordering r is assumed to have been extended with the local
* variables.
*/
static __isl_give isl_vec *vec_reorder(__isl_take isl_vec *vec,
__isl_take isl_reordering *r, int n_div)
{
isl_vec *res;
int i;
if (!vec || !r)
goto error;
res = isl_vec_alloc(vec->ctx,
2 + isl_space_dim(r->dim, isl_dim_all) + n_div);
isl_seq_cpy(res->el, vec->el, 2);
isl_seq_clr(res->el + 2, res->size - 2);
for (i = 0; i < r->len; ++i)
isl_int_set(res->el[2 + r->pos[i]], vec->el[2 + i]);
isl_reordering_free(r);
isl_vec_free(vec);
return res;
error:
isl_vec_free(vec);
isl_reordering_free(r);
return NULL;
}
/* Reorder the dimensions of the domain of "aff" according
* to the given reordering.
*/
__isl_give isl_aff *isl_aff_realign_domain(__isl_take isl_aff *aff,
__isl_take isl_reordering *r)
{
aff = isl_aff_cow(aff);
if (!aff)
goto error;
r = isl_reordering_extend(r, aff->ls->div->n_row);
aff->v = vec_reorder(aff->v, isl_reordering_copy(r),
aff->ls->div->n_row);
aff->ls = isl_local_space_realign(aff->ls, r);
if (!aff->v || !aff->ls)
return isl_aff_free(aff);
return aff;
error:
isl_aff_free(aff);
isl_reordering_free(r);
return NULL;
}
__isl_give isl_aff *isl_aff_align_params(__isl_take isl_aff *aff,
__isl_take isl_space *model)
{
if (!aff || !model)
goto error;
if (!isl_space_match(aff->ls->dim, isl_dim_param,
model, isl_dim_param)) {
isl_reordering *exp;
model = isl_space_drop_dims(model, isl_dim_in,
0, isl_space_dim(model, isl_dim_in));
model = isl_space_drop_dims(model, isl_dim_out,
0, isl_space_dim(model, isl_dim_out));
exp = isl_parameter_alignment_reordering(aff->ls->dim, model);
exp = isl_reordering_extend_space(exp,
isl_aff_get_domain_space(aff));
aff = isl_aff_realign_domain(aff, exp);
}
isl_space_free(model);
return aff;
error:
isl_space_free(model);
isl_aff_free(aff);
return NULL;
}
/* Is "aff" obviously equal to zero?
*
* If the denominator is zero, then "aff" is not equal to zero.
*/
isl_bool isl_aff_plain_is_zero(__isl_keep isl_aff *aff)
{
if (!aff)
return isl_bool_error;
if (isl_int_is_zero(aff->v->el[0]))
return isl_bool_false;
return isl_seq_first_non_zero(aff->v->el + 1, aff->v->size - 1) < 0;
}
/* Does "aff" represent NaN?
*/
isl_bool isl_aff_is_nan(__isl_keep isl_aff *aff)
{
if (!aff)
return isl_bool_error;
return isl_seq_first_non_zero(aff->v->el, 2) < 0;
}
/* Does "pa" involve any NaNs?
*/
isl_bool isl_pw_aff_involves_nan(__isl_keep isl_pw_aff *pa)
{
int i;
if (!pa)
return isl_bool_error;
if (pa->n == 0)
return isl_bool_false;
for (i = 0; i < pa->n; ++i) {
isl_bool is_nan = isl_aff_is_nan(pa->p[i].aff);
if (is_nan < 0 || is_nan)
return is_nan;
}
return isl_bool_false;
}
/* Are "aff1" and "aff2" obviously equal?
*
* NaN is not equal to anything, not even to another NaN.
*/
isl_bool isl_aff_plain_is_equal(__isl_keep isl_aff *aff1,
__isl_keep isl_aff *aff2)
{
isl_bool equal;
if (!aff1 || !aff2)
return isl_bool_error;
if (isl_aff_is_nan(aff1) || isl_aff_is_nan(aff2))
return isl_bool_false;
equal = isl_local_space_is_equal(aff1->ls, aff2->ls);
if (equal < 0 || !equal)
return equal;
return isl_vec_is_equal(aff1->v, aff2->v);
}
/* Return the common denominator of "aff" in "v".
*
* We cannot return anything meaningful in case of a NaN.
*/
int isl_aff_get_denominator(__isl_keep isl_aff *aff, isl_int *v)
{
if (!aff)
return -1;
if (isl_aff_is_nan(aff))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot get denominator of NaN", return -1);
isl_int_set(*v, aff->v->el[0]);
return 0;
}
/* Return the common denominator of "aff".
*/
__isl_give isl_val *isl_aff_get_denominator_val(__isl_keep isl_aff *aff)
{
isl_ctx *ctx;
if (!aff)
return NULL;
ctx = isl_aff_get_ctx(aff);
if (isl_aff_is_nan(aff))
return isl_val_nan(ctx);
return isl_val_int_from_isl_int(ctx, aff->v->el[0]);
}
/* Return the constant term of "aff" in "v".
*
* We cannot return anything meaningful in case of a NaN.
*/
int isl_aff_get_constant(__isl_keep isl_aff *aff, isl_int *v)
{
if (!aff)
return -1;
if (isl_aff_is_nan(aff))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot get constant term of NaN", return -1);
isl_int_set(*v, aff->v->el[1]);
return 0;
}
/* Return the constant term of "aff".
*/
__isl_give isl_val *isl_aff_get_constant_val(__isl_keep isl_aff *aff)
{
isl_ctx *ctx;
isl_val *v;
if (!aff)
return NULL;
ctx = isl_aff_get_ctx(aff);
if (isl_aff_is_nan(aff))
return isl_val_nan(ctx);
v = isl_val_rat_from_isl_int(ctx, aff->v->el[1], aff->v->el[0]);
return isl_val_normalize(v);
}
/* Return the coefficient of the variable of type "type" at position "pos"
* of "aff" in "v".
*
* We cannot return anything meaningful in case of a NaN.
*/
int isl_aff_get_coefficient(__isl_keep isl_aff *aff,
enum isl_dim_type type, int pos, isl_int *v)
{
if (!aff)
return -1;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
return -1);
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(aff->v->ctx, isl_error_invalid,
"position out of bounds", return -1);
if (isl_aff_is_nan(aff))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot get coefficient of NaN", return -1);
pos += isl_local_space_offset(aff->ls, type);
isl_int_set(*v, aff->v->el[1 + pos]);
return 0;
}
/* Return the coefficient of the variable of type "type" at position "pos"
* of "aff".
*/
__isl_give isl_val *isl_aff_get_coefficient_val(__isl_keep isl_aff *aff,
enum isl_dim_type type, int pos)
{
isl_ctx *ctx;
isl_val *v;
if (!aff)
return NULL;
ctx = isl_aff_get_ctx(aff);
if (type == isl_dim_out)
isl_die(ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
return NULL);
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(ctx, isl_error_invalid,
"position out of bounds", return NULL);
if (isl_aff_is_nan(aff))
return isl_val_nan(ctx);
pos += isl_local_space_offset(aff->ls, type);
v = isl_val_rat_from_isl_int(ctx, aff->v->el[1 + pos], aff->v->el[0]);
return isl_val_normalize(v);
}
/* Return the sign of the coefficient of the variable of type "type"
* at position "pos" of "aff".
*/
int isl_aff_coefficient_sgn(__isl_keep isl_aff *aff, enum isl_dim_type type,
int pos)
{
isl_ctx *ctx;
if (!aff)
return 0;
ctx = isl_aff_get_ctx(aff);
if (type == isl_dim_out)
isl_die(ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
return 0);
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(ctx, isl_error_invalid,
"position out of bounds", return 0);
pos += isl_local_space_offset(aff->ls, type);
return isl_int_sgn(aff->v->el[1 + pos]);
}
/* Replace the denominator of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_denominator(__isl_take isl_aff *aff, isl_int v)
{
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_set(aff->v->el[0], v);
return aff;
}
/* Replace the numerator of the constant term of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_constant(__isl_take isl_aff *aff, isl_int v)
{
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_set(aff->v->el[1], v);
return aff;
}
/* Replace the constant term of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_constant_val(__isl_take isl_aff *aff,
__isl_take isl_val *v)
{
if (!aff || !v)
goto error;
if (isl_aff_is_nan(aff)) {
isl_val_free(v);
return aff;
}
if (!isl_val_is_rat(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting rational value", goto error);
if (isl_int_eq(aff->v->el[1], v->n) &&
isl_int_eq(aff->v->el[0], v->d)) {
isl_val_free(v);
return aff;
}
aff = isl_aff_cow(aff);
if (!aff)
goto error;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
goto error;
if (isl_int_eq(aff->v->el[0], v->d)) {
isl_int_set(aff->v->el[1], v->n);
} else if (isl_int_is_one(v->d)) {
isl_int_mul(aff->v->el[1], aff->v->el[0], v->n);
} else {
isl_seq_scale(aff->v->el + 1,
aff->v->el + 1, v->d, aff->v->size - 1);
isl_int_mul(aff->v->el[1], aff->v->el[0], v->n);
isl_int_mul(aff->v->el[0], aff->v->el[0], v->d);
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
goto error;
}
isl_val_free(v);
return aff;
error:
isl_aff_free(aff);
isl_val_free(v);
return NULL;
}
/* Add "v" to the constant term of "aff".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_add_constant(__isl_take isl_aff *aff, isl_int v)
{
if (isl_int_is_zero(v))
return aff;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_addmul(aff->v->el[1], aff->v->el[0], v);
return aff;
}
/* Add "v" to the constant term of "aff".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_add_constant_val(__isl_take isl_aff *aff,
__isl_take isl_val *v)
{
if (!aff || !v)
goto error;
if (isl_aff_is_nan(aff) || isl_val_is_zero(v)) {
isl_val_free(v);
return aff;
}
if (!isl_val_is_rat(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting rational value", goto error);
aff = isl_aff_cow(aff);
if (!aff)
goto error;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
goto error;
if (isl_int_is_one(v->d)) {
isl_int_addmul(aff->v->el[1], aff->v->el[0], v->n);
} else if (isl_int_eq(aff->v->el[0], v->d)) {
isl_int_add(aff->v->el[1], aff->v->el[1], v->n);
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
goto error;
} else {
isl_seq_scale(aff->v->el + 1,
aff->v->el + 1, v->d, aff->v->size - 1);
isl_int_addmul(aff->v->el[1], aff->v->el[0], v->n);
isl_int_mul(aff->v->el[0], aff->v->el[0], v->d);
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
goto error;
}
isl_val_free(v);
return aff;
error:
isl_aff_free(aff);
isl_val_free(v);
return NULL;
}
__isl_give isl_aff *isl_aff_add_constant_si(__isl_take isl_aff *aff, int v)
{
isl_int t;
isl_int_init(t);
isl_int_set_si(t, v);
aff = isl_aff_add_constant(aff, t);
isl_int_clear(t);
return aff;
}
/* Add "v" to the numerator of the constant term of "aff".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_add_constant_num(__isl_take isl_aff *aff, isl_int v)
{
if (isl_int_is_zero(v))
return aff;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_add(aff->v->el[1], aff->v->el[1], v);
return aff;
}
/* Add "v" to the numerator of the constant term of "aff".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_add_constant_num_si(__isl_take isl_aff *aff, int v)
{
isl_int t;
if (v == 0)
return aff;
isl_int_init(t);
isl_int_set_si(t, v);
aff = isl_aff_add_constant_num(aff, t);
isl_int_clear(t);
return aff;
}
/* Replace the numerator of the constant term of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_constant_si(__isl_take isl_aff *aff, int v)
{
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_set_si(aff->v->el[1], v);
return aff;
}
/* Replace the numerator of the coefficient of the variable of type "type"
* at position "pos" of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_coefficient(__isl_take isl_aff *aff,
enum isl_dim_type type, int pos, isl_int v)
{
if (!aff)
return NULL;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
return isl_aff_free(aff));
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(aff->v->ctx, isl_error_invalid,
"position out of bounds", return isl_aff_free(aff));
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
pos += isl_local_space_offset(aff->ls, type);
isl_int_set(aff->v->el[1 + pos], v);
return aff;
}
/* Replace the numerator of the coefficient of the variable of type "type"
* at position "pos" of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_coefficient_si(__isl_take isl_aff *aff,
enum isl_dim_type type, int pos, int v)
{
if (!aff)
return NULL;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
return isl_aff_free(aff));
if (type == isl_dim_in)
type = isl_dim_set;
if (pos < 0 || pos >= isl_local_space_dim(aff->ls, type))
isl_die(aff->v->ctx, isl_error_invalid,
"position out of bounds", return isl_aff_free(aff));
if (isl_aff_is_nan(aff))
return aff;
pos += isl_local_space_offset(aff->ls, type);
if (isl_int_cmp_si(aff->v->el[1 + pos], v) == 0)
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_set_si(aff->v->el[1 + pos], v);
return aff;
}
/* Replace the coefficient of the variable of type "type" at position "pos"
* of "aff" by "v".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_set_coefficient_val(__isl_take isl_aff *aff,
enum isl_dim_type type, int pos, __isl_take isl_val *v)
{
if (!aff || !v)
goto error;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
goto error);
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(aff->v->ctx, isl_error_invalid,
"position out of bounds", goto error);
if (isl_aff_is_nan(aff)) {
isl_val_free(v);
return aff;
}
if (!isl_val_is_rat(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting rational value", goto error);
pos += isl_local_space_offset(aff->ls, type);
if (isl_int_eq(aff->v->el[1 + pos], v->n) &&
isl_int_eq(aff->v->el[0], v->d)) {
isl_val_free(v);
return aff;
}
aff = isl_aff_cow(aff);
if (!aff)
goto error;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
goto error;
if (isl_int_eq(aff->v->el[0], v->d)) {
isl_int_set(aff->v->el[1 + pos], v->n);
} else if (isl_int_is_one(v->d)) {
isl_int_mul(aff->v->el[1 + pos], aff->v->el[0], v->n);
} else {
isl_seq_scale(aff->v->el + 1,
aff->v->el + 1, v->d, aff->v->size - 1);
isl_int_mul(aff->v->el[1 + pos], aff->v->el[0], v->n);
isl_int_mul(aff->v->el[0], aff->v->el[0], v->d);
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
goto error;
}
isl_val_free(v);
return aff;
error:
isl_aff_free(aff);
isl_val_free(v);
return NULL;
}
/* Add "v" to the coefficient of the variable of type "type"
* at position "pos" of "aff".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_add_coefficient(__isl_take isl_aff *aff,
enum isl_dim_type type, int pos, isl_int v)
{
if (!aff)
return NULL;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
return isl_aff_free(aff));
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(aff->v->ctx, isl_error_invalid,
"position out of bounds", return isl_aff_free(aff));
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
pos += isl_local_space_offset(aff->ls, type);
isl_int_addmul(aff->v->el[1 + pos], aff->v->el[0], v);
return aff;
}
/* Add "v" to the coefficient of the variable of type "type"
* at position "pos" of "aff".
*
* A NaN is unaffected by this operation.
*/
__isl_give isl_aff *isl_aff_add_coefficient_val(__isl_take isl_aff *aff,
enum isl_dim_type type, int pos, __isl_take isl_val *v)
{
if (!aff || !v)
goto error;
if (isl_val_is_zero(v)) {
isl_val_free(v);
return aff;
}
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"output/set dimension does not have a coefficient",
goto error);
if (type == isl_dim_in)
type = isl_dim_set;
if (pos >= isl_local_space_dim(aff->ls, type))
isl_die(aff->v->ctx, isl_error_invalid,
"position out of bounds", goto error);
if (isl_aff_is_nan(aff)) {
isl_val_free(v);
return aff;
}
if (!isl_val_is_rat(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting rational value", goto error);
aff = isl_aff_cow(aff);
if (!aff)
goto error;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
goto error;
pos += isl_local_space_offset(aff->ls, type);
if (isl_int_is_one(v->d)) {
isl_int_addmul(aff->v->el[1 + pos], aff->v->el[0], v->n);
} else if (isl_int_eq(aff->v->el[0], v->d)) {
isl_int_add(aff->v->el[1 + pos], aff->v->el[1 + pos], v->n);
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
goto error;
} else {
isl_seq_scale(aff->v->el + 1,
aff->v->el + 1, v->d, aff->v->size - 1);
isl_int_addmul(aff->v->el[1 + pos], aff->v->el[0], v->n);
isl_int_mul(aff->v->el[0], aff->v->el[0], v->d);
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
goto error;
}
isl_val_free(v);
return aff;
error:
isl_aff_free(aff);
isl_val_free(v);
return NULL;
}
__isl_give isl_aff *isl_aff_add_coefficient_si(__isl_take isl_aff *aff,
enum isl_dim_type type, int pos, int v)
{
isl_int t;
isl_int_init(t);
isl_int_set_si(t, v);
aff = isl_aff_add_coefficient(aff, type, pos, t);
isl_int_clear(t);
return aff;
}
__isl_give isl_aff *isl_aff_get_div(__isl_keep isl_aff *aff, int pos)
{
if (!aff)
return NULL;
return isl_local_space_get_div(aff->ls, pos);
}
/* Return the negation of "aff".
*
* As a special case, -NaN = NaN.
*/
__isl_give isl_aff *isl_aff_neg(__isl_take isl_aff *aff)
{
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_seq_neg(aff->v->el + 1, aff->v->el + 1, aff->v->size - 1);
return aff;
}
/* Remove divs from the local space that do not appear in the affine
* expression.
* We currently only remove divs at the end.
* Some intermediate divs may also not appear directly in the affine
* expression, but we would also need to check that no other divs are
* defined in terms of them.
*/
__isl_give isl_aff *isl_aff_remove_unused_divs(__isl_take isl_aff *aff)
{
int pos;
int off;
int n;
if (!aff)
return NULL;
n = isl_local_space_dim(aff->ls, isl_dim_div);
off = isl_local_space_offset(aff->ls, isl_dim_div);
pos = isl_seq_last_non_zero(aff->v->el + 1 + off, n) + 1;
if (pos == n)
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->ls = isl_local_space_drop_dims(aff->ls, isl_dim_div, pos, n - pos);
aff->v = isl_vec_drop_els(aff->v, 1 + off + pos, n - pos);
if (!aff->ls || !aff->v)
return isl_aff_free(aff);
return aff;
}
/* Given two affine expressions "p" of length p_len (including the
* denominator and the constant term) and "subs" of length subs_len,
* plug in "subs" for the variable at position "pos".
* The variables of "subs" and "p" are assumed to match up to subs_len,
* but "p" may have additional variables.
* "v" is an initialized isl_int that can be used internally.
*
* In particular, if "p" represents the expression
*
* (a i + g)/m
*
* with i the variable at position "pos" and "subs" represents the expression
*
* f/d
*
* then the result represents the expression
*
* (a f + d g)/(m d)
*
*/
void isl_seq_substitute(isl_int *p, int pos, isl_int *subs,
int p_len, int subs_len, isl_int v)
{
isl_int_set(v, p[1 + pos]);
isl_int_set_si(p[1 + pos], 0);
isl_seq_combine(p + 1, subs[0], p + 1, v, subs + 1, subs_len - 1);
isl_seq_scale(p + subs_len, p + subs_len, subs[0], p_len - subs_len);
isl_int_mul(p[0], p[0], subs[0]);
}
/* Look for any divs in the aff->ls with a denominator equal to one
* and plug them into the affine expression and any subsequent divs
* that may reference the div.
*/
static __isl_give isl_aff *plug_in_integral_divs(__isl_take isl_aff *aff)
{
int i, n;
int len;
isl_int v;
isl_vec *vec;
isl_local_space *ls;
unsigned pos;
if (!aff)
return NULL;
n = isl_local_space_dim(aff->ls, isl_dim_div);
len = aff->v->size;
for (i = 0; i < n; ++i) {
if (!isl_int_is_one(aff->ls->div->row[i][0]))
continue;
ls = isl_local_space_copy(aff->ls);
ls = isl_local_space_substitute_seq(ls, isl_dim_div, i,
aff->ls->div->row[i], len, i + 1, n - (i + 1));
vec = isl_vec_copy(aff->v);
vec = isl_vec_cow(vec);
if (!ls || !vec)
goto error;
isl_int_init(v);
pos = isl_local_space_offset(aff->ls, isl_dim_div) + i;
isl_seq_substitute(vec->el, pos, aff->ls->div->row[i],
len, len, v);
isl_int_clear(v);
isl_vec_free(aff->v);
aff->v = vec;
isl_local_space_free(aff->ls);
aff->ls = ls;
}
return aff;
error:
isl_vec_free(vec);
isl_local_space_free(ls);
return isl_aff_free(aff);
}
/* Look for any divs j that appear with a unit coefficient inside
* the definitions of other divs i and plug them into the definitions
* of the divs i.
*
* In particular, an expression of the form
*
* floor((f(..) + floor(g(..)/n))/m)
*
* is simplified to
*
* floor((n * f(..) + g(..))/(n * m))
*
* This simplification is correct because we can move the expression
* f(..) into the inner floor in the original expression to obtain
*
* floor(floor((n * f(..) + g(..))/n)/m)
*
* from which we can derive the simplified expression.
*/
static __isl_give isl_aff *plug_in_unit_divs(__isl_take isl_aff *aff)
{
int i, j, n;
int off;
if (!aff)
return NULL;
n = isl_local_space_dim(aff->ls, isl_dim_div);
off = isl_local_space_offset(aff->ls, isl_dim_div);
for (i = 1; i < n; ++i) {
for (j = 0; j < i; ++j) {
if (!isl_int_is_one(aff->ls->div->row[i][1 + off + j]))
continue;
aff->ls = isl_local_space_substitute_seq(aff->ls,
isl_dim_div, j, aff->ls->div->row[j],
aff->v->size, i, 1);
if (!aff->ls)
return isl_aff_free(aff);
}
}
return aff;
}
/* Swap divs "a" and "b" in "aff", which is assumed to be non-NULL.
*
* Even though this function is only called on isl_affs with a single
* reference, we are careful to only change aff->v and aff->ls together.
*/
static __isl_give isl_aff *swap_div(__isl_take isl_aff *aff, int a, int b)
{
unsigned off = isl_local_space_offset(aff->ls, isl_dim_div);
isl_local_space *ls;
isl_vec *v;
ls = isl_local_space_copy(aff->ls);
ls = isl_local_space_swap_div(ls, a, b);
v = isl_vec_copy(aff->v);
v = isl_vec_cow(v);
if (!ls || !v)
goto error;
isl_int_swap(v->el[1 + off + a], v->el[1 + off + b]);
isl_vec_free(aff->v);
aff->v = v;
isl_local_space_free(aff->ls);
aff->ls = ls;
return aff;
error:
isl_vec_free(v);
isl_local_space_free(ls);
return isl_aff_free(aff);
}
/* Merge divs "a" and "b" in "aff", which is assumed to be non-NULL.
*
* We currently do not actually remove div "b", but simply add its
* coefficient to that of "a" and then zero it out.
*/
static __isl_give isl_aff *merge_divs(__isl_take isl_aff *aff, int a, int b)
{
unsigned off = isl_local_space_offset(aff->ls, isl_dim_div);
if (isl_int_is_zero(aff->v->el[1 + off + b]))
return aff;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_add(aff->v->el[1 + off + a],
aff->v->el[1 + off + a], aff->v->el[1 + off + b]);
isl_int_set_si(aff->v->el[1 + off + b], 0);
return aff;
}
/* Sort the divs in the local space of "aff" according to
* the comparison function "cmp_row" in isl_local_space.c,
* combining the coefficients of identical divs.
*
* Reordering divs does not change the semantics of "aff",
* so there is no need to call isl_aff_cow.
* Moreover, this function is currently only called on isl_affs
* with a single reference.
*/
static __isl_give isl_aff *sort_divs(__isl_take isl_aff *aff)
{
int i, j, n;
if (!aff)
return NULL;
n = isl_aff_dim(aff, isl_dim_div);
for (i = 1; i < n; ++i) {
for (j = i - 1; j >= 0; --j) {
int cmp = isl_mat_cmp_div(aff->ls->div, j, j + 1);
if (cmp < 0)
break;
if (cmp == 0)
aff = merge_divs(aff, j, j + 1);
else
aff = swap_div(aff, j, j + 1);
if (!aff)
return NULL;
}
}
return aff;
}
/* Normalize the representation of "aff".
*
* This function should only be called of "new" isl_affs, i.e.,
* with only a single reference. We therefore do not need to
* worry about affecting other instances.
*/
__isl_give isl_aff *isl_aff_normalize(__isl_take isl_aff *aff)
{
if (!aff)
return NULL;
aff->v = isl_vec_normalize(aff->v);
if (!aff->v)
return isl_aff_free(aff);
aff = plug_in_integral_divs(aff);
aff = plug_in_unit_divs(aff);
aff = sort_divs(aff);
aff = isl_aff_remove_unused_divs(aff);
return aff;
}
/* Given f, return floor(f).
* If f is an integer expression, then just return f.
* If f is a constant, then return the constant floor(f).
* Otherwise, if f = g/m, write g = q m + r,
* create a new div d = [r/m] and return the expression q + d.
* The coefficients in r are taken to lie between -m/2 and m/2.
*
* As a special case, floor(NaN) = NaN.
*/
__isl_give isl_aff *isl_aff_floor(__isl_take isl_aff *aff)
{
int i;
int size;
isl_ctx *ctx;
isl_vec *div;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
if (isl_int_is_one(aff->v->el[0]))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
if (isl_aff_is_cst(aff)) {
isl_int_fdiv_q(aff->v->el[1], aff->v->el[1], aff->v->el[0]);
isl_int_set_si(aff->v->el[0], 1);
return aff;
}
div = isl_vec_copy(aff->v);
div = isl_vec_cow(div);
if (!div)
return isl_aff_free(aff);
ctx = isl_aff_get_ctx(aff);
isl_int_fdiv_q(aff->v->el[0], aff->v->el[0], ctx->two);
for (i = 1; i < aff->v->size; ++i) {
isl_int_fdiv_r(div->el[i], div->el[i], div->el[0]);
isl_int_fdiv_q(aff->v->el[i], aff->v->el[i], div->el[0]);
if (isl_int_gt(div->el[i], aff->v->el[0])) {
isl_int_sub(div->el[i], div->el[i], div->el[0]);
isl_int_add_ui(aff->v->el[i], aff->v->el[i], 1);
}
}
aff->ls = isl_local_space_add_div(aff->ls, div);
if (!aff->ls)
return isl_aff_free(aff);
size = aff->v->size;
aff->v = isl_vec_extend(aff->v, size + 1);
if (!aff->v)
return isl_aff_free(aff);
isl_int_set_si(aff->v->el[0], 1);
isl_int_set_si(aff->v->el[size], 1);
aff = isl_aff_normalize(aff);
return aff;
}
/* Compute
*
* aff mod m = aff - m * floor(aff/m)
*/
__isl_give isl_aff *isl_aff_mod(__isl_take isl_aff *aff, isl_int m)
{
isl_aff *res;
res = isl_aff_copy(aff);
aff = isl_aff_scale_down(aff, m);
aff = isl_aff_floor(aff);
aff = isl_aff_scale(aff, m);
res = isl_aff_sub(res, aff);
return res;
}
/* Compute
*
* aff mod m = aff - m * floor(aff/m)
*
* with m an integer value.
*/
__isl_give isl_aff *isl_aff_mod_val(__isl_take isl_aff *aff,
__isl_take isl_val *m)
{
isl_aff *res;
if (!aff || !m)
goto error;
if (!isl_val_is_int(m))
isl_die(isl_val_get_ctx(m), isl_error_invalid,
"expecting integer modulo", goto error);
res = isl_aff_copy(aff);
aff = isl_aff_scale_down_val(aff, isl_val_copy(m));
aff = isl_aff_floor(aff);
aff = isl_aff_scale_val(aff, m);
res = isl_aff_sub(res, aff);
return res;
error:
isl_aff_free(aff);
isl_val_free(m);
return NULL;
}
/* Compute
*
* pwaff mod m = pwaff - m * floor(pwaff/m)
*/
__isl_give isl_pw_aff *isl_pw_aff_mod(__isl_take isl_pw_aff *pwaff, isl_int m)
{
isl_pw_aff *res;
res = isl_pw_aff_copy(pwaff);
pwaff = isl_pw_aff_scale_down(pwaff, m);
pwaff = isl_pw_aff_floor(pwaff);
pwaff = isl_pw_aff_scale(pwaff, m);
res = isl_pw_aff_sub(res, pwaff);
return res;
}
/* Compute
*
* pa mod m = pa - m * floor(pa/m)
*
* with m an integer value.
*/
__isl_give isl_pw_aff *isl_pw_aff_mod_val(__isl_take isl_pw_aff *pa,
__isl_take isl_val *m)
{
if (!pa || !m)
goto error;
if (!isl_val_is_int(m))
isl_die(isl_pw_aff_get_ctx(pa), isl_error_invalid,
"expecting integer modulo", goto error);
pa = isl_pw_aff_mod(pa, m->n);
isl_val_free(m);
return pa;
error:
isl_pw_aff_free(pa);
isl_val_free(m);
return NULL;
}
/* Given f, return ceil(f).
* If f is an integer expression, then just return f.
* Otherwise, let f be the expression
*
* e/m
*
* then return
*
* floor((e + m - 1)/m)
*
* As a special case, ceil(NaN) = NaN.
*/
__isl_give isl_aff *isl_aff_ceil(__isl_take isl_aff *aff)
{
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
if (isl_int_is_one(aff->v->el[0]))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_add(aff->v->el[1], aff->v->el[1], aff->v->el[0]);
isl_int_sub_ui(aff->v->el[1], aff->v->el[1], 1);
aff = isl_aff_floor(aff);
return aff;
}
/* Apply the expansion computed by isl_merge_divs.
* The expansion itself is given by "exp" while the resulting
* list of divs is given by "div".
*/
__isl_give isl_aff *isl_aff_expand_divs( __isl_take isl_aff *aff,
__isl_take isl_mat *div, int *exp)
{
int i, j;
int old_n_div;
int new_n_div;
int offset;
aff = isl_aff_cow(aff);
if (!aff || !div)
goto error;
old_n_div = isl_local_space_dim(aff->ls, isl_dim_div);
new_n_div = isl_mat_rows(div);
if (new_n_div < old_n_div)
isl_die(isl_mat_get_ctx(div), isl_error_invalid,
"not an expansion", goto error);
aff->v = isl_vec_extend(aff->v, aff->v->size + new_n_div - old_n_div);
if (!aff->v)
goto error;
offset = 1 + isl_local_space_offset(aff->ls, isl_dim_div);
j = old_n_div - 1;
for (i = new_n_div - 1; i >= 0; --i) {
if (j >= 0 && exp[j] == i) {
if (i != j)
isl_int_swap(aff->v->el[offset + i],
aff->v->el[offset + j]);
j--;
} else
isl_int_set_si(aff->v->el[offset + i], 0);
}
aff->ls = isl_local_space_replace_divs(aff->ls, isl_mat_copy(div));
if (!aff->ls)
goto error;
isl_mat_free(div);
return aff;
error:
isl_aff_free(aff);
isl_mat_free(div);
return NULL;
}
/* Add two affine expressions that live in the same local space.
*/
static __isl_give isl_aff *add_expanded(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
isl_int gcd, f;
aff1 = isl_aff_cow(aff1);
if (!aff1 || !aff2)
goto error;
aff1->v = isl_vec_cow(aff1->v);
if (!aff1->v)
goto error;
isl_int_init(gcd);
isl_int_init(f);
isl_int_gcd(gcd, aff1->v->el[0], aff2->v->el[0]);
isl_int_divexact(f, aff2->v->el[0], gcd);
isl_seq_scale(aff1->v->el + 1, aff1->v->el + 1, f, aff1->v->size - 1);
isl_int_divexact(f, aff1->v->el[0], gcd);
isl_seq_addmul(aff1->v->el + 1, f, aff2->v->el + 1, aff1->v->size - 1);
isl_int_divexact(f, aff2->v->el[0], gcd);
isl_int_mul(aff1->v->el[0], aff1->v->el[0], f);
isl_int_clear(f);
isl_int_clear(gcd);
isl_aff_free(aff2);
return aff1;
error:
isl_aff_free(aff1);
isl_aff_free(aff2);
return NULL;
}
/* Return the sum of "aff1" and "aff2".
*
* If either of the two is NaN, then the result is NaN.
*/
__isl_give isl_aff *isl_aff_add(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
isl_ctx *ctx;
int *exp1 = NULL;
int *exp2 = NULL;
isl_mat *div;
int n_div1, n_div2;
if (!aff1 || !aff2)
goto error;
ctx = isl_aff_get_ctx(aff1);
if (!isl_space_is_equal(aff1->ls->dim, aff2->ls->dim))
isl_die(ctx, isl_error_invalid,
"spaces don't match", goto error);
if (isl_aff_is_nan(aff1)) {
isl_aff_free(aff2);
return aff1;
}
if (isl_aff_is_nan(aff2)) {
isl_aff_free(aff1);
return aff2;
}
n_div1 = isl_aff_dim(aff1, isl_dim_div);
n_div2 = isl_aff_dim(aff2, isl_dim_div);
if (n_div1 == 0 && n_div2 == 0)
return add_expanded(aff1, aff2);
exp1 = isl_alloc_array(ctx, int, n_div1);
exp2 = isl_alloc_array(ctx, int, n_div2);
if ((n_div1 && !exp1) || (n_div2 && !exp2))
goto error;
div = isl_merge_divs(aff1->ls->div, aff2->ls->div, exp1, exp2);
aff1 = isl_aff_expand_divs(aff1, isl_mat_copy(div), exp1);
aff2 = isl_aff_expand_divs(aff2, div, exp2);
free(exp1);
free(exp2);
return add_expanded(aff1, aff2);
error:
free(exp1);
free(exp2);
isl_aff_free(aff1);
isl_aff_free(aff2);
return NULL;
}
__isl_give isl_aff *isl_aff_sub(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
return isl_aff_add(aff1, isl_aff_neg(aff2));
}
/* Return the result of scaling "aff" by a factor of "f".
*
* As a special case, f * NaN = NaN.
*/
__isl_give isl_aff *isl_aff_scale(__isl_take isl_aff *aff, isl_int f)
{
isl_int gcd;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
if (isl_int_is_one(f))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
if (isl_int_is_pos(f) && isl_int_is_divisible_by(aff->v->el[0], f)) {
isl_int_divexact(aff->v->el[0], aff->v->el[0], f);
return aff;
}
isl_int_init(gcd);
isl_int_gcd(gcd, aff->v->el[0], f);
isl_int_divexact(aff->v->el[0], aff->v->el[0], gcd);
isl_int_divexact(gcd, f, gcd);
isl_seq_scale(aff->v->el + 1, aff->v->el + 1, gcd, aff->v->size - 1);
isl_int_clear(gcd);
return aff;
}
/* Multiple "aff" by "v".
*/
__isl_give isl_aff *isl_aff_scale_val(__isl_take isl_aff *aff,
__isl_take isl_val *v)
{
if (!aff || !v)
goto error;
if (isl_val_is_one(v)) {
isl_val_free(v);
return aff;
}
if (!isl_val_is_rat(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting rational factor", goto error);
aff = isl_aff_scale(aff, v->n);
aff = isl_aff_scale_down(aff, v->d);
isl_val_free(v);
return aff;
error:
isl_aff_free(aff);
isl_val_free(v);
return NULL;
}
/* Return the result of scaling "aff" down by a factor of "f".
*
* As a special case, NaN/f = NaN.
*/
__isl_give isl_aff *isl_aff_scale_down(__isl_take isl_aff *aff, isl_int f)
{
isl_int gcd;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff))
return aff;
if (isl_int_is_one(f))
return aff;
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
if (isl_int_is_zero(f))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot scale down by zero", return isl_aff_free(aff));
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
isl_int_init(gcd);
isl_seq_gcd(aff->v->el + 1, aff->v->size - 1, &gcd);
isl_int_gcd(gcd, gcd, f);
isl_seq_scale_down(aff->v->el + 1, aff->v->el + 1, gcd, aff->v->size - 1);
isl_int_divexact(gcd, f, gcd);
isl_int_mul(aff->v->el[0], aff->v->el[0], gcd);
isl_int_clear(gcd);
return aff;
}
/* Divide "aff" by "v".
*/
__isl_give isl_aff *isl_aff_scale_down_val(__isl_take isl_aff *aff,
__isl_take isl_val *v)
{
if (!aff || !v)
goto error;
if (isl_val_is_one(v)) {
isl_val_free(v);
return aff;
}
if (!isl_val_is_rat(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting rational factor", goto error);
if (!isl_val_is_pos(v))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"factor needs to be positive", goto error);
aff = isl_aff_scale(aff, v->d);
aff = isl_aff_scale_down(aff, v->n);
isl_val_free(v);
return aff;
error:
isl_aff_free(aff);
isl_val_free(v);
return NULL;
}
__isl_give isl_aff *isl_aff_scale_down_ui(__isl_take isl_aff *aff, unsigned f)
{
isl_int v;
if (f == 1)
return aff;
isl_int_init(v);
isl_int_set_ui(v, f);
aff = isl_aff_scale_down(aff, v);
isl_int_clear(v);
return aff;
}
__isl_give isl_aff *isl_aff_set_dim_name(__isl_take isl_aff *aff,
enum isl_dim_type type, unsigned pos, const char *s)
{
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"cannot set name of output/set dimension",
return isl_aff_free(aff));
if (type == isl_dim_in)
type = isl_dim_set;
aff->ls = isl_local_space_set_dim_name(aff->ls, type, pos, s);
if (!aff->ls)
return isl_aff_free(aff);
return aff;
}
__isl_give isl_aff *isl_aff_set_dim_id(__isl_take isl_aff *aff,
enum isl_dim_type type, unsigned pos, __isl_take isl_id *id)
{
aff = isl_aff_cow(aff);
if (!aff)
goto error;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"cannot set name of output/set dimension",
goto error);
if (type == isl_dim_in)
type = isl_dim_set;
aff->ls = isl_local_space_set_dim_id(aff->ls, type, pos, id);
if (!aff->ls)
return isl_aff_free(aff);
return aff;
error:
isl_id_free(id);
isl_aff_free(aff);
return NULL;
}
/* Replace the identifier of the input tuple of "aff" by "id".
* type is currently required to be equal to isl_dim_in
*/
__isl_give isl_aff *isl_aff_set_tuple_id(__isl_take isl_aff *aff,
enum isl_dim_type type, __isl_take isl_id *id)
{
aff = isl_aff_cow(aff);
if (!aff)
goto error;
if (type != isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"cannot only set id of input tuple", goto error);
aff->ls = isl_local_space_set_tuple_id(aff->ls, isl_dim_set, id);
if (!aff->ls)
return isl_aff_free(aff);
return aff;
error:
isl_id_free(id);
isl_aff_free(aff);
return NULL;
}
/* Exploit the equalities in "eq" to simplify the affine expression
* and the expressions of the integer divisions in the local space.
* The integer divisions in this local space are assumed to appear
* as regular dimensions in "eq".
*/
static __isl_give isl_aff *isl_aff_substitute_equalities_lifted(
__isl_take isl_aff *aff, __isl_take isl_basic_set *eq)
{
int i, j;
unsigned total;
unsigned n_div;
if (!eq)
goto error;
if (eq->n_eq == 0) {
isl_basic_set_free(eq);
return aff;
}
aff = isl_aff_cow(aff);
if (!aff)
goto error;
aff->ls = isl_local_space_substitute_equalities(aff->ls,
isl_basic_set_copy(eq));
aff->v = isl_vec_cow(aff->v);
if (!aff->ls || !aff->v)
goto error;
total = 1 + isl_space_dim(eq->dim, isl_dim_all);
n_div = eq->n_div;
for (i = 0; i < eq->n_eq; ++i) {
j = isl_seq_last_non_zero(eq->eq[i], total + n_div);
if (j < 0 || j == 0 || j >= total)
continue;
isl_seq_elim(aff->v->el + 1, eq->eq[i], j, total,
&aff->v->el[0]);
}
isl_basic_set_free(eq);
aff = isl_aff_normalize(aff);
return aff;
error:
isl_basic_set_free(eq);
isl_aff_free(aff);
return NULL;
}
/* Exploit the equalities in "eq" to simplify the affine expression
* and the expressions of the integer divisions in the local space.
*/
__isl_give isl_aff *isl_aff_substitute_equalities(__isl_take isl_aff *aff,
__isl_take isl_basic_set *eq)
{
int n_div;
if (!aff || !eq)
goto error;
n_div = isl_local_space_dim(aff->ls, isl_dim_div);
if (n_div > 0)
eq = isl_basic_set_add_dims(eq, isl_dim_set, n_div);
return isl_aff_substitute_equalities_lifted(aff, eq);
error:
isl_basic_set_free(eq);
isl_aff_free(aff);
return NULL;
}
/* Look for equalities among the variables shared by context and aff
* and the integer divisions of aff, if any.
* The equalities are then used to eliminate coefficients and/or integer
* divisions from aff.
*/
__isl_give isl_aff *isl_aff_gist(__isl_take isl_aff *aff,
__isl_take isl_set *context)
{
isl_basic_set *hull;
int n_div;
if (!aff)
goto error;
n_div = isl_local_space_dim(aff->ls, isl_dim_div);
if (n_div > 0) {
isl_basic_set *bset;
isl_local_space *ls;
context = isl_set_add_dims(context, isl_dim_set, n_div);
ls = isl_aff_get_domain_local_space(aff);
bset = isl_basic_set_from_local_space(ls);
bset = isl_basic_set_lift(bset);
bset = isl_basic_set_flatten(bset);
context = isl_set_intersect(context,
isl_set_from_basic_set(bset));
}
hull = isl_set_affine_hull(context);
return isl_aff_substitute_equalities_lifted(aff, hull);
error:
isl_aff_free(aff);
isl_set_free(context);
return NULL;
}
__isl_give isl_aff *isl_aff_gist_params(__isl_take isl_aff *aff,
__isl_take isl_set *context)
{
isl_set *dom_context = isl_set_universe(isl_aff_get_domain_space(aff));
dom_context = isl_set_intersect_params(dom_context, context);
return isl_aff_gist(aff, dom_context);
}
/* Return a basic set containing those elements in the space
* of aff where it is positive. "rational" should not be set.
*
* If "aff" is NaN, then it is not positive.
*/
static __isl_give isl_basic_set *aff_pos_basic_set(__isl_take isl_aff *aff,
int rational)
{
isl_constraint *ineq;
isl_basic_set *bset;
isl_val *c;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff)) {
isl_space *space = isl_aff_get_domain_space(aff);
isl_aff_free(aff);
return isl_basic_set_empty(space);
}
if (rational)
isl_die(isl_aff_get_ctx(aff), isl_error_unsupported,
"rational sets not supported", goto error);
ineq = isl_inequality_from_aff(aff);
c = isl_constraint_get_constant_val(ineq);
c = isl_val_sub_ui(c, 1);
ineq = isl_constraint_set_constant_val(ineq, c);
bset = isl_basic_set_from_constraint(ineq);
bset = isl_basic_set_simplify(bset);
return bset;
error:
isl_aff_free(aff);
return NULL;
}
/* Return a basic set containing those elements in the space
* of aff where it is non-negative.
* If "rational" is set, then return a rational basic set.
*
* If "aff" is NaN, then it is not non-negative (it's not negative either).
*/
static __isl_give isl_basic_set *aff_nonneg_basic_set(
__isl_take isl_aff *aff, int rational)
{
isl_constraint *ineq;
isl_basic_set *bset;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff)) {
isl_space *space = isl_aff_get_domain_space(aff);
isl_aff_free(aff);
return isl_basic_set_empty(space);
}
ineq = isl_inequality_from_aff(aff);
bset = isl_basic_set_from_constraint(ineq);
if (rational)
bset = isl_basic_set_set_rational(bset);
bset = isl_basic_set_simplify(bset);
return bset;
}
/* Return a basic set containing those elements in the space
* of aff where it is non-negative.
*/
__isl_give isl_basic_set *isl_aff_nonneg_basic_set(__isl_take isl_aff *aff)
{
return aff_nonneg_basic_set(aff, 0);
}
/* Return a basic set containing those elements in the domain space
* of aff where it is negative.
*/
__isl_give isl_basic_set *isl_aff_neg_basic_set(__isl_take isl_aff *aff)
{
aff = isl_aff_neg(aff);
aff = isl_aff_add_constant_num_si(aff, -1);
return isl_aff_nonneg_basic_set(aff);
}
/* Return a basic set containing those elements in the space
* of aff where it is zero.
* If "rational" is set, then return a rational basic set.
*
* If "aff" is NaN, then it is not zero.
*/
static __isl_give isl_basic_set *aff_zero_basic_set(__isl_take isl_aff *aff,
int rational)
{
isl_constraint *ineq;
isl_basic_set *bset;
if (!aff)
return NULL;
if (isl_aff_is_nan(aff)) {
isl_space *space = isl_aff_get_domain_space(aff);
isl_aff_free(aff);
return isl_basic_set_empty(space);
}
ineq = isl_equality_from_aff(aff);
bset = isl_basic_set_from_constraint(ineq);
if (rational)
bset = isl_basic_set_set_rational(bset);
bset = isl_basic_set_simplify(bset);
return bset;
}
/* Return a basic set containing those elements in the space
* of aff where it is zero.
*/
__isl_give isl_basic_set *isl_aff_zero_basic_set(__isl_take isl_aff *aff)
{
return aff_zero_basic_set(aff, 0);
}
/* Return a basic set containing those elements in the shared space
* of aff1 and aff2 where aff1 is greater than or equal to aff2.
*/
__isl_give isl_basic_set *isl_aff_ge_basic_set(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
aff1 = isl_aff_sub(aff1, aff2);
return isl_aff_nonneg_basic_set(aff1);
}
/* Return a set containing those elements in the shared space
* of aff1 and aff2 where aff1 is greater than or equal to aff2.
*/
__isl_give isl_set *isl_aff_ge_set(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
return isl_set_from_basic_set(isl_aff_ge_basic_set(aff1, aff2));
}
/* Return a basic set containing those elements in the shared space
* of aff1 and aff2 where aff1 is smaller than or equal to aff2.
*/
__isl_give isl_basic_set *isl_aff_le_basic_set(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
return isl_aff_ge_basic_set(aff2, aff1);
}
/* Return a set containing those elements in the shared space
* of aff1 and aff2 where aff1 is smaller than or equal to aff2.
*/
__isl_give isl_set *isl_aff_le_set(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
return isl_aff_ge_set(aff2, aff1);
}
/* Return a basic set containing those elements in the shared space
* of aff1 and aff2 where aff1 and aff2 are equal.
*/
__isl_give isl_basic_set *isl_aff_eq_basic_set(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
aff1 = isl_aff_sub(aff1, aff2);
return isl_aff_zero_basic_set(aff1);
}
/* Return a set containing those elements in the shared space
* of aff1 and aff2 where aff1 and aff2 are equal.
*/
__isl_give isl_set *isl_aff_eq_set(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
return isl_set_from_basic_set(isl_aff_eq_basic_set(aff1, aff2));
}
__isl_give isl_aff *isl_aff_add_on_domain(__isl_keep isl_set *dom,
__isl_take isl_aff *aff1, __isl_take isl_aff *aff2)
{
aff1 = isl_aff_add(aff1, aff2);
aff1 = isl_aff_gist(aff1, isl_set_copy(dom));
return aff1;
}
int isl_aff_is_empty(__isl_keep isl_aff *aff)
{
if (!aff)
return -1;
return 0;
}
/* Check whether the given affine expression has non-zero coefficient
* for any dimension in the given range or if any of these dimensions
* appear with non-zero coefficients in any of the integer divisions
* involved in the affine expression.
*/
isl_bool isl_aff_involves_dims(__isl_keep isl_aff *aff,
enum isl_dim_type type, unsigned first, unsigned n)
{
int i;
isl_ctx *ctx;
int *active = NULL;
isl_bool involves = isl_bool_false;
if (!aff)
return isl_bool_error;
if (n == 0)
return isl_bool_false;
ctx = isl_aff_get_ctx(aff);
if (first + n > isl_aff_dim(aff, type))
isl_die(ctx, isl_error_invalid,
"range out of bounds", return isl_bool_error);
active = isl_local_space_get_active(aff->ls, aff->v->el + 2);
if (!active)
goto error;
first += isl_local_space_offset(aff->ls, type) - 1;
for (i = 0; i < n; ++i)
if (active[first + i]) {
involves = isl_bool_true;
break;
}
free(active);
return involves;
error:
free(active);
return isl_bool_error;
}
__isl_give isl_aff *isl_aff_drop_dims(__isl_take isl_aff *aff,
enum isl_dim_type type, unsigned first, unsigned n)
{
isl_ctx *ctx;
if (!aff)
return NULL;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"cannot drop output/set dimension",
return isl_aff_free(aff));
if (type == isl_dim_in)
type = isl_dim_set;
if (n == 0 && !isl_local_space_is_named_or_nested(aff->ls, type))
return aff;
ctx = isl_aff_get_ctx(aff);
if (first + n > isl_local_space_dim(aff->ls, type))
isl_die(ctx, isl_error_invalid, "range out of bounds",
return isl_aff_free(aff));
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->ls = isl_local_space_drop_dims(aff->ls, type, first, n);
if (!aff->ls)
return isl_aff_free(aff);
first += 1 + isl_local_space_offset(aff->ls, type);
aff->v = isl_vec_drop_els(aff->v, first, n);
if (!aff->v)
return isl_aff_free(aff);
return aff;
}
/* Project the domain of the affine expression onto its parameter space.
* The affine expression may not involve any of the domain dimensions.
*/
__isl_give isl_aff *isl_aff_project_domain_on_params(__isl_take isl_aff *aff)
{
isl_space *space;
unsigned n;
int involves;
n = isl_aff_dim(aff, isl_dim_in);
involves = isl_aff_involves_dims(aff, isl_dim_in, 0, n);
if (involves < 0)
return isl_aff_free(aff);
if (involves)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"affine expression involves some of the domain dimensions",
return isl_aff_free(aff));
aff = isl_aff_drop_dims(aff, isl_dim_in, 0, n);
space = isl_aff_get_domain_space(aff);
space = isl_space_params(space);
aff = isl_aff_reset_domain_space(aff, space);
return aff;
}
__isl_give isl_aff *isl_aff_insert_dims(__isl_take isl_aff *aff,
enum isl_dim_type type, unsigned first, unsigned n)
{
isl_ctx *ctx;
if (!aff)
return NULL;
if (type == isl_dim_out)
isl_die(aff->v->ctx, isl_error_invalid,
"cannot insert output/set dimensions",
return isl_aff_free(aff));
if (type == isl_dim_in)
type = isl_dim_set;
if (n == 0 && !isl_local_space_is_named_or_nested(aff->ls, type))
return aff;
ctx = isl_aff_get_ctx(aff);
if (first > isl_local_space_dim(aff->ls, type))
isl_die(ctx, isl_error_invalid, "position out of bounds",
return isl_aff_free(aff));
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->ls = isl_local_space_insert_dims(aff->ls, type, first, n);
if (!aff->ls)
return isl_aff_free(aff);
first += 1 + isl_local_space_offset(aff->ls, type);
aff->v = isl_vec_insert_zero_els(aff->v, first, n);
if (!aff->v)
return isl_aff_free(aff);
return aff;
}
__isl_give isl_aff *isl_aff_add_dims(__isl_take isl_aff *aff,
enum isl_dim_type type, unsigned n)
{
unsigned pos;
pos = isl_aff_dim(aff, type);
return isl_aff_insert_dims(aff, type, pos, n);
}
__isl_give isl_pw_aff *isl_pw_aff_add_dims(__isl_take isl_pw_aff *pwaff,
enum isl_dim_type type, unsigned n)
{
unsigned pos;
pos = isl_pw_aff_dim(pwaff, type);
return isl_pw_aff_insert_dims(pwaff, type, pos, n);
}
/* Move the "n" dimensions of "src_type" starting at "src_pos" of "aff"
* to dimensions of "dst_type" at "dst_pos".
*
* We only support moving input dimensions to parameters and vice versa.
*/
__isl_give isl_aff *isl_aff_move_dims(__isl_take isl_aff *aff,
enum isl_dim_type dst_type, unsigned dst_pos,
enum isl_dim_type src_type, unsigned src_pos, unsigned n)
{
unsigned g_dst_pos;
unsigned g_src_pos;
if (!aff)
return NULL;
if (n == 0 &&
!isl_local_space_is_named_or_nested(aff->ls, src_type) &&
!isl_local_space_is_named_or_nested(aff->ls, dst_type))
return aff;
if (dst_type == isl_dim_out || src_type == isl_dim_out)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot move output/set dimension",
return isl_aff_free(aff));
if (dst_type == isl_dim_div || src_type == isl_dim_div)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot move divs", return isl_aff_free(aff));
if (dst_type == isl_dim_in)
dst_type = isl_dim_set;
if (src_type == isl_dim_in)
src_type = isl_dim_set;
if (src_pos + n > isl_local_space_dim(aff->ls, src_type))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"range out of bounds", return isl_aff_free(aff));
if (dst_type == src_type)
isl_die(isl_aff_get_ctx(aff), isl_error_unsupported,
"moving dims within the same type not supported",
return isl_aff_free(aff));
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
g_src_pos = 1 + isl_local_space_offset(aff->ls, src_type) + src_pos;
g_dst_pos = 1 + isl_local_space_offset(aff->ls, dst_type) + dst_pos;
if (dst_type > src_type)
g_dst_pos -= n;
aff->v = isl_vec_move_els(aff->v, g_dst_pos, g_src_pos, n);
aff->ls = isl_local_space_move_dims(aff->ls, dst_type, dst_pos,
src_type, src_pos, n);
if (!aff->v || !aff->ls)
return isl_aff_free(aff);
aff = sort_divs(aff);
return aff;
}
__isl_give isl_pw_aff *isl_pw_aff_from_aff(__isl_take isl_aff *aff)
{
isl_set *dom = isl_set_universe(isl_aff_get_domain_space(aff));
return isl_pw_aff_alloc(dom, aff);
}
#undef PW
#define PW isl_pw_aff
#undef EL
#define EL isl_aff
#undef EL_IS_ZERO
#define EL_IS_ZERO is_empty
#undef ZERO
#define ZERO empty
#undef IS_ZERO
#define IS_ZERO is_empty
#undef FIELD
#define FIELD aff
#undef DEFAULT_IS_ZERO
#define DEFAULT_IS_ZERO 0
#define NO_EVAL
#define NO_OPT
#define NO_LIFT
#define NO_MORPH
#include <isl_pw_templ.c>
#include <isl_pw_hash.c>
#include <isl_pw_union_opt.c>
#undef UNION
#define UNION isl_union_pw_aff
#undef PART
#define PART isl_pw_aff
#undef PARTS
#define PARTS pw_aff
#include <isl_union_single.c>
#include <isl_union_neg.c>
static __isl_give isl_set *align_params_pw_pw_set_and(
__isl_take isl_pw_aff *pwaff1, __isl_take isl_pw_aff *pwaff2,
__isl_give isl_set *(*fn)(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2))
{
if (!pwaff1 || !pwaff2)
goto error;
if (isl_space_match(pwaff1->dim, isl_dim_param,
pwaff2->dim, isl_dim_param))
return fn(pwaff1, pwaff2);
if (!isl_space_has_named_params(pwaff1->dim) ||
!isl_space_has_named_params(pwaff2->dim))
isl_die(isl_pw_aff_get_ctx(pwaff1), isl_error_invalid,
"unaligned unnamed parameters", goto error);
pwaff1 = isl_pw_aff_align_params(pwaff1, isl_pw_aff_get_space(pwaff2));
pwaff2 = isl_pw_aff_align_params(pwaff2, isl_pw_aff_get_space(pwaff1));
return fn(pwaff1, pwaff2);
error:
isl_pw_aff_free(pwaff1);
isl_pw_aff_free(pwaff2);
return NULL;
}
/* Align the parameters of the to isl_pw_aff arguments and
* then apply a function "fn" on them that returns an isl_map.
*/
static __isl_give isl_map *align_params_pw_pw_map_and(
__isl_take isl_pw_aff *pa1, __isl_take isl_pw_aff *pa2,
__isl_give isl_map *(*fn)(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2))
{
if (!pa1 || !pa2)
goto error;
if (isl_space_match(pa1->dim, isl_dim_param, pa2->dim, isl_dim_param))
return fn(pa1, pa2);
if (!isl_space_has_named_params(pa1->dim) ||
!isl_space_has_named_params(pa2->dim))
isl_die(isl_pw_aff_get_ctx(pa1), isl_error_invalid,
"unaligned unnamed parameters", goto error);
pa1 = isl_pw_aff_align_params(pa1, isl_pw_aff_get_space(pa2));
pa2 = isl_pw_aff_align_params(pa2, isl_pw_aff_get_space(pa1));
return fn(pa1, pa2);
error:
isl_pw_aff_free(pa1);
isl_pw_aff_free(pa2);
return NULL;
}
/* Compute a piecewise quasi-affine expression with a domain that
* is the union of those of pwaff1 and pwaff2 and such that on each
* cell, the quasi-affine expression is the maximum of those of pwaff1
* and pwaff2. If only one of pwaff1 or pwaff2 is defined on a given
* cell, then the associated expression is the defined one.
*/
static __isl_give isl_pw_aff *pw_aff_union_max(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_union_opt_cmp(pwaff1, pwaff2, &isl_aff_ge_set);
}
__isl_give isl_pw_aff *isl_pw_aff_union_max(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_align_params_pw_pw_and(pwaff1, pwaff2,
&pw_aff_union_max);
}
/* Compute a piecewise quasi-affine expression with a domain that
* is the union of those of pwaff1 and pwaff2 and such that on each
* cell, the quasi-affine expression is the minimum of those of pwaff1
* and pwaff2. If only one of pwaff1 or pwaff2 is defined on a given
* cell, then the associated expression is the defined one.
*/
static __isl_give isl_pw_aff *pw_aff_union_min(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_union_opt_cmp(pwaff1, pwaff2, &isl_aff_le_set);
}
__isl_give isl_pw_aff *isl_pw_aff_union_min(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_align_params_pw_pw_and(pwaff1, pwaff2,
&pw_aff_union_min);
}
__isl_give isl_pw_aff *isl_pw_aff_union_opt(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2, int max)
{
if (max)
return isl_pw_aff_union_max(pwaff1, pwaff2);
else
return isl_pw_aff_union_min(pwaff1, pwaff2);
}
/* Construct a map with as domain the domain of pwaff and
* one-dimensional range corresponding to the affine expressions.
*/
static __isl_give isl_map *map_from_pw_aff(__isl_take isl_pw_aff *pwaff)
{
int i;
isl_space *dim;
isl_map *map;
if (!pwaff)
return NULL;
dim = isl_pw_aff_get_space(pwaff);
map = isl_map_empty(dim);
for (i = 0; i < pwaff->n; ++i) {
isl_basic_map *bmap;
isl_map *map_i;
bmap = isl_basic_map_from_aff(isl_aff_copy(pwaff->p[i].aff));
map_i = isl_map_from_basic_map(bmap);
map_i = isl_map_intersect_domain(map_i,
isl_set_copy(pwaff->p[i].set));
map = isl_map_union_disjoint(map, map_i);
}
isl_pw_aff_free(pwaff);
return map;
}
/* Construct a map with as domain the domain of pwaff and
* one-dimensional range corresponding to the affine expressions.
*/
__isl_give isl_map *isl_map_from_pw_aff(__isl_take isl_pw_aff *pwaff)
{
if (!pwaff)
return NULL;
if (isl_space_is_set(pwaff->dim))
isl_die(isl_pw_aff_get_ctx(pwaff), isl_error_invalid,
"space of input is not a map", goto error);
return map_from_pw_aff(pwaff);
error:
isl_pw_aff_free(pwaff);
return NULL;
}
/* Construct a one-dimensional set with as parameter domain
* the domain of pwaff and the single set dimension
* corresponding to the affine expressions.
*/
__isl_give isl_set *isl_set_from_pw_aff(__isl_take isl_pw_aff *pwaff)
{
if (!pwaff)
return NULL;
if (!isl_space_is_set(pwaff->dim))
isl_die(isl_pw_aff_get_ctx(pwaff), isl_error_invalid,
"space of input is not a set", goto error);
return map_from_pw_aff(pwaff);
error:
isl_pw_aff_free(pwaff);
return NULL;
}
/* Return a set containing those elements in the domain
* of "pwaff" where it satisfies "fn" (if complement is 0) or
* does not satisfy "fn" (if complement is 1).
*
* The pieces with a NaN never belong to the result since
* NaN does not satisfy any property.
*/
static __isl_give isl_set *pw_aff_locus(__isl_take isl_pw_aff *pwaff,
__isl_give isl_basic_set *(*fn)(__isl_take isl_aff *aff, int rational),
int complement)
{
int i;
isl_set *set;
if (!pwaff)
return NULL;
set = isl_set_empty(isl_pw_aff_get_domain_space(pwaff));
for (i = 0; i < pwaff->n; ++i) {
isl_basic_set *bset;
isl_set *set_i, *locus;
int rational;
if (isl_aff_is_nan(pwaff->p[i].aff))
continue;
rational = isl_set_has_rational(pwaff->p[i].set);
bset = fn(isl_aff_copy(pwaff->p[i].aff), rational);
locus = isl_set_from_basic_set(bset);
set_i = isl_set_copy(pwaff->p[i].set);
if (complement)
set_i = isl_set_subtract(set_i, locus);
else
set_i = isl_set_intersect(set_i, locus);
set = isl_set_union_disjoint(set, set_i);
}
isl_pw_aff_free(pwaff);
return set;
}
/* Return a set containing those elements in the domain
* of "pa" where it is positive.
*/
__isl_give isl_set *isl_pw_aff_pos_set(__isl_take isl_pw_aff *pa)
{
return pw_aff_locus(pa, &aff_pos_basic_set, 0);
}
/* Return a set containing those elements in the domain
* of pwaff where it is non-negative.
*/
__isl_give isl_set *isl_pw_aff_nonneg_set(__isl_take isl_pw_aff *pwaff)
{
return pw_aff_locus(pwaff, &aff_nonneg_basic_set, 0);
}
/* Return a set containing those elements in the domain
* of pwaff where it is zero.
*/
__isl_give isl_set *isl_pw_aff_zero_set(__isl_take isl_pw_aff *pwaff)
{
return pw_aff_locus(pwaff, &aff_zero_basic_set, 0);
}
/* Return a set containing those elements in the domain
* of pwaff where it is not zero.
*/
__isl_give isl_set *isl_pw_aff_non_zero_set(__isl_take isl_pw_aff *pwaff)
{
return pw_aff_locus(pwaff, &aff_zero_basic_set, 1);
}
/* Return a set containing those elements in the shared domain
* of pwaff1 and pwaff2 where pwaff1 is greater than (or equal) to pwaff2.
*
* We compute the difference on the shared domain and then construct
* the set of values where this difference is non-negative.
* If strict is set, we first subtract 1 from the difference.
* If equal is set, we only return the elements where pwaff1 and pwaff2
* are equal.
*/
static __isl_give isl_set *pw_aff_gte_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2, int strict, int equal)
{
isl_set *set1, *set2;
set1 = isl_pw_aff_domain(isl_pw_aff_copy(pwaff1));
set2 = isl_pw_aff_domain(isl_pw_aff_copy(pwaff2));
set1 = isl_set_intersect(set1, set2);
pwaff1 = isl_pw_aff_intersect_domain(pwaff1, isl_set_copy(set1));
pwaff2 = isl_pw_aff_intersect_domain(pwaff2, isl_set_copy(set1));
pwaff1 = isl_pw_aff_add(pwaff1, isl_pw_aff_neg(pwaff2));
if (strict) {
isl_space *dim = isl_set_get_space(set1);
isl_aff *aff;
aff = isl_aff_zero_on_domain(isl_local_space_from_space(dim));
aff = isl_aff_add_constant_si(aff, -1);
pwaff1 = isl_pw_aff_add(pwaff1, isl_pw_aff_alloc(set1, aff));
} else
isl_set_free(set1);
if (equal)
return isl_pw_aff_zero_set(pwaff1);
return isl_pw_aff_nonneg_set(pwaff1);
}
/* Return a set containing those elements in the shared domain
* of pwaff1 and pwaff2 where pwaff1 is equal to pwaff2.
*/
static __isl_give isl_set *pw_aff_eq_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return pw_aff_gte_set(pwaff1, pwaff2, 0, 1);
}
__isl_give isl_set *isl_pw_aff_eq_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return align_params_pw_pw_set_and(pwaff1, pwaff2, &pw_aff_eq_set);
}
/* Return a set containing those elements in the shared domain
* of pwaff1 and pwaff2 where pwaff1 is greater than or equal to pwaff2.
*/
static __isl_give isl_set *pw_aff_ge_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return pw_aff_gte_set(pwaff1, pwaff2, 0, 0);
}
__isl_give isl_set *isl_pw_aff_ge_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return align_params_pw_pw_set_and(pwaff1, pwaff2, &pw_aff_ge_set);
}
/* Return a set containing those elements in the shared domain
* of pwaff1 and pwaff2 where pwaff1 is strictly greater than pwaff2.
*/
static __isl_give isl_set *pw_aff_gt_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return pw_aff_gte_set(pwaff1, pwaff2, 1, 0);
}
__isl_give isl_set *isl_pw_aff_gt_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return align_params_pw_pw_set_and(pwaff1, pwaff2, &pw_aff_gt_set);
}
__isl_give isl_set *isl_pw_aff_le_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_ge_set(pwaff2, pwaff1);
}
__isl_give isl_set *isl_pw_aff_lt_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_gt_set(pwaff2, pwaff1);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function values are ordered in the same way as "order",
* which returns a set in the shared domain of its two arguments.
* The parameters of "pa1" and "pa2" are assumed to have been aligned.
*
* Let "pa1" and "pa2" be defined on domains A and B respectively.
* We first pull back the two functions such that they are defined on
* the domain [A -> B]. Then we apply "order", resulting in a set
* in the space [A -> B]. Finally, we unwrap this set to obtain
* a map in the space A -> B.
*/
static __isl_give isl_map *isl_pw_aff_order_map_aligned(
__isl_take isl_pw_aff *pa1, __isl_take isl_pw_aff *pa2,
__isl_give isl_set *(*order)(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2))
{
isl_space *space1, *space2;
isl_multi_aff *ma;
isl_set *set;
space1 = isl_space_domain(isl_pw_aff_get_space(pa1));
space2 = isl_space_domain(isl_pw_aff_get_space(pa2));
space1 = isl_space_map_from_domain_and_range(space1, space2);
ma = isl_multi_aff_domain_map(isl_space_copy(space1));
pa1 = isl_pw_aff_pullback_multi_aff(pa1, ma);
ma = isl_multi_aff_range_map(space1);
pa2 = isl_pw_aff_pullback_multi_aff(pa2, ma);
set = order(pa1, pa2);
return isl_set_unwrap(set);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function values are equal.
* The parameters of "pa1" and "pa2" are assumed to have been aligned.
*/
static __isl_give isl_map *isl_pw_aff_eq_map_aligned(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return isl_pw_aff_order_map_aligned(pa1, pa2, &isl_pw_aff_eq_set);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function values are equal.
*/
__isl_give isl_map *isl_pw_aff_eq_map(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return align_params_pw_pw_map_and(pa1, pa2, &isl_pw_aff_eq_map_aligned);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function value of "pa1" is less than the function value of "pa2".
* The parameters of "pa1" and "pa2" are assumed to have been aligned.
*/
static __isl_give isl_map *isl_pw_aff_lt_map_aligned(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return isl_pw_aff_order_map_aligned(pa1, pa2, &isl_pw_aff_lt_set);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function value of "pa1" is less than the function value of "pa2".
*/
__isl_give isl_map *isl_pw_aff_lt_map(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return align_params_pw_pw_map_and(pa1, pa2, &isl_pw_aff_lt_map_aligned);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function value of "pa1" is greater than the function value
* of "pa2".
* The parameters of "pa1" and "pa2" are assumed to have been aligned.
*/
static __isl_give isl_map *isl_pw_aff_gt_map_aligned(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return isl_pw_aff_order_map_aligned(pa1, pa2, &isl_pw_aff_gt_set);
}
/* Return a map containing pairs of elements in the domains of "pa1" and "pa2"
* where the function value of "pa1" is greater than the function value
* of "pa2".
*/
__isl_give isl_map *isl_pw_aff_gt_map(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return align_params_pw_pw_map_and(pa1, pa2, &isl_pw_aff_gt_map_aligned);
}
/* Return a set containing those elements in the shared domain
* of the elements of list1 and list2 where each element in list1
* has the relation specified by "fn" with each element in list2.
*/
static __isl_give isl_set *pw_aff_list_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2,
__isl_give isl_set *(*fn)(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2))
{
int i, j;
isl_ctx *ctx;
isl_set *set;
if (!list1 || !list2)
goto error;
ctx = isl_pw_aff_list_get_ctx(list1);
if (list1->n < 1 || list2->n < 1)
isl_die(ctx, isl_error_invalid,
"list should contain at least one element", goto error);
set = isl_set_universe(isl_pw_aff_get_domain_space(list1->p[0]));
for (i = 0; i < list1->n; ++i)
for (j = 0; j < list2->n; ++j) {
isl_set *set_ij;
set_ij = fn(isl_pw_aff_copy(list1->p[i]),
isl_pw_aff_copy(list2->p[j]));
set = isl_set_intersect(set, set_ij);
}
isl_pw_aff_list_free(list1);
isl_pw_aff_list_free(list2);
return set;
error:
isl_pw_aff_list_free(list1);
isl_pw_aff_list_free(list2);
return NULL;
}
/* Return a set containing those elements in the shared domain
* of the elements of list1 and list2 where each element in list1
* is equal to each element in list2.
*/
__isl_give isl_set *isl_pw_aff_list_eq_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2)
{
return pw_aff_list_set(list1, list2, &isl_pw_aff_eq_set);
}
__isl_give isl_set *isl_pw_aff_list_ne_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2)
{
return pw_aff_list_set(list1, list2, &isl_pw_aff_ne_set);
}
/* Return a set containing those elements in the shared domain
* of the elements of list1 and list2 where each element in list1
* is less than or equal to each element in list2.
*/
__isl_give isl_set *isl_pw_aff_list_le_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2)
{
return pw_aff_list_set(list1, list2, &isl_pw_aff_le_set);
}
__isl_give isl_set *isl_pw_aff_list_lt_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2)
{
return pw_aff_list_set(list1, list2, &isl_pw_aff_lt_set);
}
__isl_give isl_set *isl_pw_aff_list_ge_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2)
{
return pw_aff_list_set(list1, list2, &isl_pw_aff_ge_set);
}
__isl_give isl_set *isl_pw_aff_list_gt_set(__isl_take isl_pw_aff_list *list1,
__isl_take isl_pw_aff_list *list2)
{
return pw_aff_list_set(list1, list2, &isl_pw_aff_gt_set);
}
/* Return a set containing those elements in the shared domain
* of pwaff1 and pwaff2 where pwaff1 is not equal to pwaff2.
*/
static __isl_give isl_set *pw_aff_ne_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
isl_set *set_lt, *set_gt;
set_lt = isl_pw_aff_lt_set(isl_pw_aff_copy(pwaff1),
isl_pw_aff_copy(pwaff2));
set_gt = isl_pw_aff_gt_set(pwaff1, pwaff2);
return isl_set_union_disjoint(set_lt, set_gt);
}
__isl_give isl_set *isl_pw_aff_ne_set(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return align_params_pw_pw_set_and(pwaff1, pwaff2, &pw_aff_ne_set);
}
__isl_give isl_pw_aff *isl_pw_aff_scale_down(__isl_take isl_pw_aff *pwaff,
isl_int v)
{
int i;
if (isl_int_is_one(v))
return pwaff;
if (!isl_int_is_pos(v))
isl_die(isl_pw_aff_get_ctx(pwaff), isl_error_invalid,
"factor needs to be positive",
return isl_pw_aff_free(pwaff));
pwaff = isl_pw_aff_cow(pwaff);
if (!pwaff)
return NULL;
if (pwaff->n == 0)
return pwaff;
for (i = 0; i < pwaff->n; ++i) {
pwaff->p[i].aff = isl_aff_scale_down(pwaff->p[i].aff, v);
if (!pwaff->p[i].aff)
return isl_pw_aff_free(pwaff);
}
return pwaff;
}
__isl_give isl_pw_aff *isl_pw_aff_floor(__isl_take isl_pw_aff *pwaff)
{
int i;
pwaff = isl_pw_aff_cow(pwaff);
if (!pwaff)
return NULL;
if (pwaff->n == 0)
return pwaff;
for (i = 0; i < pwaff->n; ++i) {
pwaff->p[i].aff = isl_aff_floor(pwaff->p[i].aff);
if (!pwaff->p[i].aff)
return isl_pw_aff_free(pwaff);
}
return pwaff;
}
__isl_give isl_pw_aff *isl_pw_aff_ceil(__isl_take isl_pw_aff *pwaff)
{
int i;
pwaff = isl_pw_aff_cow(pwaff);
if (!pwaff)
return NULL;
if (pwaff->n == 0)
return pwaff;
for (i = 0; i < pwaff->n; ++i) {
pwaff->p[i].aff = isl_aff_ceil(pwaff->p[i].aff);
if (!pwaff->p[i].aff)
return isl_pw_aff_free(pwaff);
}
return pwaff;
}
/* Assuming that "cond1" and "cond2" are disjoint,
* return an affine expression that is equal to pwaff1 on cond1
* and to pwaff2 on cond2.
*/
static __isl_give isl_pw_aff *isl_pw_aff_select(
__isl_take isl_set *cond1, __isl_take isl_pw_aff *pwaff1,
__isl_take isl_set *cond2, __isl_take isl_pw_aff *pwaff2)
{
pwaff1 = isl_pw_aff_intersect_domain(pwaff1, cond1);
pwaff2 = isl_pw_aff_intersect_domain(pwaff2, cond2);
return isl_pw_aff_add_disjoint(pwaff1, pwaff2);
}
/* Return an affine expression that is equal to pwaff_true for elements
* where "cond" is non-zero and to pwaff_false for elements where "cond"
* is zero.
* That is, return cond ? pwaff_true : pwaff_false;
*
* If "cond" involves and NaN, then we conservatively return a NaN
* on its entire domain. In principle, we could consider the pieces
* where it is NaN separately from those where it is not.
*
* If "pwaff_true" and "pwaff_false" are obviously equal to each other,
* then only use the domain of "cond" to restrict the domain.
*/
__isl_give isl_pw_aff *isl_pw_aff_cond(__isl_take isl_pw_aff *cond,
__isl_take isl_pw_aff *pwaff_true, __isl_take isl_pw_aff *pwaff_false)
{
isl_set *cond_true, *cond_false;
isl_bool equal;
if (!cond)
goto error;
if (isl_pw_aff_involves_nan(cond)) {
isl_space *space = isl_pw_aff_get_domain_space(cond);
isl_local_space *ls = isl_local_space_from_space(space);
isl_pw_aff_free(cond);
isl_pw_aff_free(pwaff_true);
isl_pw_aff_free(pwaff_false);
return isl_pw_aff_nan_on_domain(ls);
}
pwaff_true = isl_pw_aff_align_params(pwaff_true,
isl_pw_aff_get_space(pwaff_false));
pwaff_false = isl_pw_aff_align_params(pwaff_false,
isl_pw_aff_get_space(pwaff_true));
equal = isl_pw_aff_plain_is_equal(pwaff_true, pwaff_false);
if (equal < 0)
goto error;
if (equal) {
isl_set *dom;
dom = isl_set_coalesce(isl_pw_aff_domain(cond));
isl_pw_aff_free(pwaff_false);
return isl_pw_aff_intersect_domain(pwaff_true, dom);
}
cond_true = isl_pw_aff_non_zero_set(isl_pw_aff_copy(cond));
cond_false = isl_pw_aff_zero_set(cond);
return isl_pw_aff_select(cond_true, pwaff_true,
cond_false, pwaff_false);
error:
isl_pw_aff_free(cond);
isl_pw_aff_free(pwaff_true);
isl_pw_aff_free(pwaff_false);
return NULL;
}
isl_bool isl_aff_is_cst(__isl_keep isl_aff *aff)
{
if (!aff)
return isl_bool_error;
return isl_seq_first_non_zero(aff->v->el + 2, aff->v->size - 2) == -1;
}
/* Check whether pwaff is a piecewise constant.
*/
isl_bool isl_pw_aff_is_cst(__isl_keep isl_pw_aff *pwaff)
{
int i;
if (!pwaff)
return isl_bool_error;
for (i = 0; i < pwaff->n; ++i) {
isl_bool is_cst = isl_aff_is_cst(pwaff->p[i].aff);
if (is_cst < 0 || !is_cst)
return is_cst;
}
return isl_bool_true;
}
/* Are all elements of "mpa" piecewise constants?
*/
isl_bool isl_multi_pw_aff_is_cst(__isl_keep isl_multi_pw_aff *mpa)
{
int i;
if (!mpa)
return isl_bool_error;
for (i = 0; i < mpa->n; ++i) {
isl_bool is_cst = isl_pw_aff_is_cst(mpa->p[i]);
if (is_cst < 0 || !is_cst)
return is_cst;
}
return isl_bool_true;
}
/* Return the product of "aff1" and "aff2".
*
* If either of the two is NaN, then the result is NaN.
*
* Otherwise, at least one of "aff1" or "aff2" needs to be a constant.
*/
__isl_give isl_aff *isl_aff_mul(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
if (!aff1 || !aff2)
goto error;
if (isl_aff_is_nan(aff1)) {
isl_aff_free(aff2);
return aff1;
}
if (isl_aff_is_nan(aff2)) {
isl_aff_free(aff1);
return aff2;
}
if (!isl_aff_is_cst(aff2) && isl_aff_is_cst(aff1))
return isl_aff_mul(aff2, aff1);
if (!isl_aff_is_cst(aff2))
isl_die(isl_aff_get_ctx(aff1), isl_error_invalid,
"at least one affine expression should be constant",
goto error);
aff1 = isl_aff_cow(aff1);
if (!aff1 || !aff2)
goto error;
aff1 = isl_aff_scale(aff1, aff2->v->el[1]);
aff1 = isl_aff_scale_down(aff1, aff2->v->el[0]);
isl_aff_free(aff2);
return aff1;
error:
isl_aff_free(aff1);
isl_aff_free(aff2);
return NULL;
}
/* Divide "aff1" by "aff2", assuming "aff2" is a constant.
*
* If either of the two is NaN, then the result is NaN.
*/
__isl_give isl_aff *isl_aff_div(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
int is_cst;
int neg;
if (!aff1 || !aff2)
goto error;
if (isl_aff_is_nan(aff1)) {
isl_aff_free(aff2);
return aff1;
}
if (isl_aff_is_nan(aff2)) {
isl_aff_free(aff1);
return aff2;
}
is_cst = isl_aff_is_cst(aff2);
if (is_cst < 0)
goto error;
if (!is_cst)
isl_die(isl_aff_get_ctx(aff2), isl_error_invalid,
"second argument should be a constant", goto error);
if (!aff2)
goto error;
neg = isl_int_is_neg(aff2->v->el[1]);
if (neg) {
isl_int_neg(aff2->v->el[0], aff2->v->el[0]);
isl_int_neg(aff2->v->el[1], aff2->v->el[1]);
}
aff1 = isl_aff_scale(aff1, aff2->v->el[0]);
aff1 = isl_aff_scale_down(aff1, aff2->v->el[1]);
if (neg) {
isl_int_neg(aff2->v->el[0], aff2->v->el[0]);
isl_int_neg(aff2->v->el[1], aff2->v->el[1]);
}
isl_aff_free(aff2);
return aff1;
error:
isl_aff_free(aff1);
isl_aff_free(aff2);
return NULL;
}
static __isl_give isl_pw_aff *pw_aff_add(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_on_shared_domain(pwaff1, pwaff2, &isl_aff_add);
}
__isl_give isl_pw_aff *isl_pw_aff_add(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_align_params_pw_pw_and(pwaff1, pwaff2, &pw_aff_add);
}
__isl_give isl_pw_aff *isl_pw_aff_union_add(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_union_add_(pwaff1, pwaff2);
}
static __isl_give isl_pw_aff *pw_aff_mul(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_on_shared_domain(pwaff1, pwaff2, &isl_aff_mul);
}
__isl_give isl_pw_aff *isl_pw_aff_mul(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_align_params_pw_pw_and(pwaff1, pwaff2, &pw_aff_mul);
}
static __isl_give isl_pw_aff *pw_aff_div(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
return isl_pw_aff_on_shared_domain(pa1, pa2, &isl_aff_div);
}
/* Divide "pa1" by "pa2", assuming "pa2" is a piecewise constant.
*/
__isl_give isl_pw_aff *isl_pw_aff_div(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
int is_cst;
is_cst = isl_pw_aff_is_cst(pa2);
if (is_cst < 0)
goto error;
if (!is_cst)
isl_die(isl_pw_aff_get_ctx(pa2), isl_error_invalid,
"second argument should be a piecewise constant",
goto error);
return isl_pw_aff_align_params_pw_pw_and(pa1, pa2, &pw_aff_div);
error:
isl_pw_aff_free(pa1);
isl_pw_aff_free(pa2);
return NULL;
}
/* Compute the quotient of the integer division of "pa1" by "pa2"
* with rounding towards zero.
* "pa2" is assumed to be a piecewise constant.
*
* In particular, return
*
* pa1 >= 0 ? floor(pa1/pa2) : ceil(pa1/pa2)
*
*/
__isl_give isl_pw_aff *isl_pw_aff_tdiv_q(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
int is_cst;
isl_set *cond;
isl_pw_aff *f, *c;
is_cst = isl_pw_aff_is_cst(pa2);
if (is_cst < 0)
goto error;
if (!is_cst)
isl_die(isl_pw_aff_get_ctx(pa2), isl_error_invalid,
"second argument should be a piecewise constant",
goto error);
pa1 = isl_pw_aff_div(pa1, pa2);
cond = isl_pw_aff_nonneg_set(isl_pw_aff_copy(pa1));
f = isl_pw_aff_floor(isl_pw_aff_copy(pa1));
c = isl_pw_aff_ceil(pa1);
return isl_pw_aff_cond(isl_set_indicator_function(cond), f, c);
error:
isl_pw_aff_free(pa1);
isl_pw_aff_free(pa2);
return NULL;
}
/* Compute the remainder of the integer division of "pa1" by "pa2"
* with rounding towards zero.
* "pa2" is assumed to be a piecewise constant.
*
* In particular, return
*
* pa1 - pa2 * (pa1 >= 0 ? floor(pa1/pa2) : ceil(pa1/pa2))
*
*/
__isl_give isl_pw_aff *isl_pw_aff_tdiv_r(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2)
{
int is_cst;
isl_pw_aff *res;
is_cst = isl_pw_aff_is_cst(pa2);
if (is_cst < 0)
goto error;
if (!is_cst)
isl_die(isl_pw_aff_get_ctx(pa2), isl_error_invalid,
"second argument should be a piecewise constant",
goto error);
res = isl_pw_aff_tdiv_q(isl_pw_aff_copy(pa1), isl_pw_aff_copy(pa2));
res = isl_pw_aff_mul(pa2, res);
res = isl_pw_aff_sub(pa1, res);
return res;
error:
isl_pw_aff_free(pa1);
isl_pw_aff_free(pa2);
return NULL;
}
static __isl_give isl_pw_aff *pw_aff_min(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
isl_set *le;
isl_set *dom;
dom = isl_set_intersect(isl_pw_aff_domain(isl_pw_aff_copy(pwaff1)),
isl_pw_aff_domain(isl_pw_aff_copy(pwaff2)));
le = isl_pw_aff_le_set(isl_pw_aff_copy(pwaff1),
isl_pw_aff_copy(pwaff2));
dom = isl_set_subtract(dom, isl_set_copy(le));
return isl_pw_aff_select(le, pwaff1, dom, pwaff2);
}
__isl_give isl_pw_aff *isl_pw_aff_min(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_align_params_pw_pw_and(pwaff1, pwaff2, &pw_aff_min);
}
static __isl_give isl_pw_aff *pw_aff_max(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
isl_set *ge;
isl_set *dom;
dom = isl_set_intersect(isl_pw_aff_domain(isl_pw_aff_copy(pwaff1)),
isl_pw_aff_domain(isl_pw_aff_copy(pwaff2)));
ge = isl_pw_aff_ge_set(isl_pw_aff_copy(pwaff1),
isl_pw_aff_copy(pwaff2));
dom = isl_set_subtract(dom, isl_set_copy(ge));
return isl_pw_aff_select(ge, pwaff1, dom, pwaff2);
}
__isl_give isl_pw_aff *isl_pw_aff_max(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2)
{
return isl_pw_aff_align_params_pw_pw_and(pwaff1, pwaff2, &pw_aff_max);
}
static __isl_give isl_pw_aff *pw_aff_list_reduce(
__isl_take isl_pw_aff_list *list,
__isl_give isl_pw_aff *(*fn)(__isl_take isl_pw_aff *pwaff1,
__isl_take isl_pw_aff *pwaff2))
{
int i;
isl_ctx *ctx;
isl_pw_aff *res;
if (!list)
return NULL;
ctx = isl_pw_aff_list_get_ctx(list);
if (list->n < 1)
isl_die(ctx, isl_error_invalid,
"list should contain at least one element", goto error);
res = isl_pw_aff_copy(list->p[0]);
for (i = 1; i < list->n; ++i)
res = fn(res, isl_pw_aff_copy(list->p[i]));
isl_pw_aff_list_free(list);
return res;
error:
isl_pw_aff_list_free(list);
return NULL;
}
/* Return an isl_pw_aff that maps each element in the intersection of the
* domains of the elements of list to the minimal corresponding affine
* expression.
*/
__isl_give isl_pw_aff *isl_pw_aff_list_min(__isl_take isl_pw_aff_list *list)
{
return pw_aff_list_reduce(list, &isl_pw_aff_min);
}
/* Return an isl_pw_aff that maps each element in the intersection of the
* domains of the elements of list to the maximal corresponding affine
* expression.
*/
__isl_give isl_pw_aff *isl_pw_aff_list_max(__isl_take isl_pw_aff_list *list)
{
return pw_aff_list_reduce(list, &isl_pw_aff_max);
}
/* Mark the domains of "pwaff" as rational.
*/
__isl_give isl_pw_aff *isl_pw_aff_set_rational(__isl_take isl_pw_aff *pwaff)
{
int i;
pwaff = isl_pw_aff_cow(pwaff);
if (!pwaff)
return NULL;
if (pwaff->n == 0)
return pwaff;
for (i = 0; i < pwaff->n; ++i) {
pwaff->p[i].set = isl_set_set_rational(pwaff->p[i].set);
if (!pwaff->p[i].set)
return isl_pw_aff_free(pwaff);
}
return pwaff;
}
/* Mark the domains of the elements of "list" as rational.
*/
__isl_give isl_pw_aff_list *isl_pw_aff_list_set_rational(
__isl_take isl_pw_aff_list *list)
{
int i, n;
if (!list)
return NULL;
if (list->n == 0)
return list;
n = list->n;
for (i = 0; i < n; ++i) {
isl_pw_aff *pa;
pa = isl_pw_aff_list_get_pw_aff(list, i);
pa = isl_pw_aff_set_rational(pa);
list = isl_pw_aff_list_set_pw_aff(list, i, pa);
}
return list;
}
/* Do the parameters of "aff" match those of "space"?
*/
int isl_aff_matching_params(__isl_keep isl_aff *aff,
__isl_keep isl_space *space)
{
isl_space *aff_space;
int match;
if (!aff || !space)
return -1;
aff_space = isl_aff_get_domain_space(aff);
match = isl_space_match(space, isl_dim_param, aff_space, isl_dim_param);
isl_space_free(aff_space);
return match;
}
/* Check that the domain space of "aff" matches "space".
*
* Return 0 on success and -1 on error.
*/
int isl_aff_check_match_domain_space(__isl_keep isl_aff *aff,
__isl_keep isl_space *space)
{
isl_space *aff_space;
int match;
if (!aff || !space)
return -1;
aff_space = isl_aff_get_domain_space(aff);
match = isl_space_match(space, isl_dim_param, aff_space, isl_dim_param);
if (match < 0)
goto error;
if (!match)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"parameters don't match", goto error);
match = isl_space_tuple_is_equal(space, isl_dim_in,
aff_space, isl_dim_set);
if (match < 0)
goto error;
if (!match)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"domains don't match", goto error);
isl_space_free(aff_space);
return 0;
error:
isl_space_free(aff_space);
return -1;
}
#undef BASE
#define BASE aff
#undef DOMBASE
#define DOMBASE set
#define NO_DOMAIN
#include <isl_multi_templ.c>
#include <isl_multi_apply_set.c>
#include <isl_multi_cmp.c>
#include <isl_multi_floor.c>
#include <isl_multi_gist.c>
#undef NO_DOMAIN
/* Remove any internal structure of the domain of "ma".
* If there is any such internal structure in the input,
* then the name of the corresponding space is also removed.
*/
__isl_give isl_multi_aff *isl_multi_aff_flatten_domain(
__isl_take isl_multi_aff *ma)
{
isl_space *space;
if (!ma)
return NULL;
if (!ma->space->nested[0])
return ma;
space = isl_multi_aff_get_space(ma);
space = isl_space_flatten_domain(space);
ma = isl_multi_aff_reset_space(ma, space);
return ma;
}
/* Given a map space, return an isl_multi_aff that maps a wrapped copy
* of the space to its domain.
*/
__isl_give isl_multi_aff *isl_multi_aff_domain_map(__isl_take isl_space *space)
{
int i, n_in;
isl_local_space *ls;
isl_multi_aff *ma;
if (!space)
return NULL;
if (!isl_space_is_map(space))
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"not a map space", goto error);
n_in = isl_space_dim(space, isl_dim_in);
space = isl_space_domain_map(space);
ma = isl_multi_aff_alloc(isl_space_copy(space));
if (n_in == 0) {
isl_space_free(space);
return ma;
}
space = isl_space_domain(space);
ls = isl_local_space_from_space(space);
for (i = 0; i < n_in; ++i) {
isl_aff *aff;
aff = isl_aff_var_on_domain(isl_local_space_copy(ls),
isl_dim_set, i);
ma = isl_multi_aff_set_aff(ma, i, aff);
}
isl_local_space_free(ls);
return ma;
error:
isl_space_free(space);
return NULL;
}
/* Given a map space, return an isl_multi_aff that maps a wrapped copy
* of the space to its range.
*/
__isl_give isl_multi_aff *isl_multi_aff_range_map(__isl_take isl_space *space)
{
int i, n_in, n_out;
isl_local_space *ls;
isl_multi_aff *ma;
if (!space)
return NULL;
if (!isl_space_is_map(space))
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"not a map space", goto error);
n_in = isl_space_dim(space, isl_dim_in);
n_out = isl_space_dim(space, isl_dim_out);
space = isl_space_range_map(space);
ma = isl_multi_aff_alloc(isl_space_copy(space));
if (n_out == 0) {
isl_space_free(space);
return ma;
}
space = isl_space_domain(space);
ls = isl_local_space_from_space(space);
for (i = 0; i < n_out; ++i) {
isl_aff *aff;
aff = isl_aff_var_on_domain(isl_local_space_copy(ls),
isl_dim_set, n_in + i);
ma = isl_multi_aff_set_aff(ma, i, aff);
}
isl_local_space_free(ls);
return ma;
error:
isl_space_free(space);
return NULL;
}
/* Given a map space, return an isl_pw_multi_aff that maps a wrapped copy
* of the space to its range.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_range_map(
__isl_take isl_space *space)
{
return isl_pw_multi_aff_from_multi_aff(isl_multi_aff_range_map(space));
}
/* Given the space of a set and a range of set dimensions,
* construct an isl_multi_aff that projects out those dimensions.
*/
__isl_give isl_multi_aff *isl_multi_aff_project_out_map(
__isl_take isl_space *space, enum isl_dim_type type,
unsigned first, unsigned n)
{
int i, dim;
isl_local_space *ls;
isl_multi_aff *ma;
if (!space)
return NULL;
if (!isl_space_is_set(space))
isl_die(isl_space_get_ctx(space), isl_error_unsupported,
"expecting set space", goto error);
if (type != isl_dim_set)
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"only set dimensions can be projected out", goto error);
dim = isl_space_dim(space, isl_dim_set);
if (first + n > dim)
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"range out of bounds", goto error);
space = isl_space_from_domain(space);
space = isl_space_add_dims(space, isl_dim_out, dim - n);
if (dim == n)
return isl_multi_aff_alloc(space);
ma = isl_multi_aff_alloc(isl_space_copy(space));
space = isl_space_domain(space);
ls = isl_local_space_from_space(space);
for (i = 0; i < first; ++i) {
isl_aff *aff;
aff = isl_aff_var_on_domain(isl_local_space_copy(ls),
isl_dim_set, i);
ma = isl_multi_aff_set_aff(ma, i, aff);
}
for (i = 0; i < dim - (first + n); ++i) {
isl_aff *aff;
aff = isl_aff_var_on_domain(isl_local_space_copy(ls),
isl_dim_set, first + n + i);
ma = isl_multi_aff_set_aff(ma, first + i, aff);
}
isl_local_space_free(ls);
return ma;
error:
isl_space_free(space);
return NULL;
}
/* Given the space of a set and a range of set dimensions,
* construct an isl_pw_multi_aff that projects out those dimensions.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_project_out_map(
__isl_take isl_space *space, enum isl_dim_type type,
unsigned first, unsigned n)
{
isl_multi_aff *ma;
ma = isl_multi_aff_project_out_map(space, type, first, n);
return isl_pw_multi_aff_from_multi_aff(ma);
}
/* Create an isl_pw_multi_aff with the given isl_multi_aff on a universe
* domain.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_from_multi_aff(
__isl_take isl_multi_aff *ma)
{
isl_set *dom = isl_set_universe(isl_multi_aff_get_domain_space(ma));
return isl_pw_multi_aff_alloc(dom, ma);
}
/* Create a piecewise multi-affine expression in the given space that maps each
* input dimension to the corresponding output dimension.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_identity(
__isl_take isl_space *space)
{
return isl_pw_multi_aff_from_multi_aff(isl_multi_aff_identity(space));
}
/* Exploit the equalities in "eq" to simplify the affine expressions.
*/
static __isl_give isl_multi_aff *isl_multi_aff_substitute_equalities(
__isl_take isl_multi_aff *maff, __isl_take isl_basic_set *eq)
{
int i;
maff = isl_multi_aff_cow(maff);
if (!maff || !eq)
goto error;
for (i = 0; i < maff->n; ++i) {
maff->p[i] = isl_aff_substitute_equalities(maff->p[i],
isl_basic_set_copy(eq));
if (!maff->p[i])
goto error;
}
isl_basic_set_free(eq);
return maff;
error:
isl_basic_set_free(eq);
isl_multi_aff_free(maff);
return NULL;
}
__isl_give isl_multi_aff *isl_multi_aff_scale(__isl_take isl_multi_aff *maff,
isl_int f)
{
int i;
maff = isl_multi_aff_cow(maff);
if (!maff)
return NULL;
for (i = 0; i < maff->n; ++i) {
maff->p[i] = isl_aff_scale(maff->p[i], f);
if (!maff->p[i])
return isl_multi_aff_free(maff);
}
return maff;
}
__isl_give isl_multi_aff *isl_multi_aff_add_on_domain(__isl_keep isl_set *dom,
__isl_take isl_multi_aff *maff1, __isl_take isl_multi_aff *maff2)
{
maff1 = isl_multi_aff_add(maff1, maff2);
maff1 = isl_multi_aff_gist(maff1, isl_set_copy(dom));
return maff1;
}
int isl_multi_aff_is_empty(__isl_keep isl_multi_aff *maff)
{
if (!maff)
return -1;
return 0;
}
/* Return the set of domain elements where "ma1" is lexicographically
* smaller than or equal to "ma2".
*/
__isl_give isl_set *isl_multi_aff_lex_le_set(__isl_take isl_multi_aff *ma1,
__isl_take isl_multi_aff *ma2)
{
return isl_multi_aff_lex_ge_set(ma2, ma1);
}
/* Return the set of domain elements where "ma1" is lexicographically
* smaller than "ma2".
*/
__isl_give isl_set *isl_multi_aff_lex_lt_set(__isl_take isl_multi_aff *ma1,
__isl_take isl_multi_aff *ma2)
{
return isl_multi_aff_lex_gt_set(ma2, ma1);
}
/* Return the set of domain elements where "ma1" and "ma2"
* satisfy "order".
*/
static __isl_give isl_set *isl_multi_aff_order_set(
__isl_take isl_multi_aff *ma1, __isl_take isl_multi_aff *ma2,
__isl_give isl_map *order(__isl_take isl_space *set_space))
{
isl_space *space;
isl_map *map1, *map2;
isl_map *map, *ge;
map1 = isl_map_from_multi_aff(ma1);
map2 = isl_map_from_multi_aff(ma2);
map = isl_map_range_product(map1, map2);
space = isl_space_range(isl_map_get_space(map));
space = isl_space_domain(isl_space_unwrap(space));
ge = order(space);
map = isl_map_intersect_range(map, isl_map_wrap(ge));
return isl_map_domain(map);
}
/* Return the set of domain elements where "ma1" is lexicographically
* greater than or equal to "ma2".
*/
__isl_give isl_set *isl_multi_aff_lex_ge_set(__isl_take isl_multi_aff *ma1,
__isl_take isl_multi_aff *ma2)
{
return isl_multi_aff_order_set(ma1, ma2, &isl_map_lex_ge);
}
/* Return the set of domain elements where "ma1" is lexicographically
* greater than "ma2".
*/
__isl_give isl_set *isl_multi_aff_lex_gt_set(__isl_take isl_multi_aff *ma1,
__isl_take isl_multi_aff *ma2)
{
return isl_multi_aff_order_set(ma1, ma2, &isl_map_lex_gt);
}
#undef PW
#define PW isl_pw_multi_aff
#undef EL
#define EL isl_multi_aff
#undef EL_IS_ZERO
#define EL_IS_ZERO is_empty
#undef ZERO
#define ZERO empty
#undef IS_ZERO
#define IS_ZERO is_empty
#undef FIELD
#define FIELD maff
#undef DEFAULT_IS_ZERO
#define DEFAULT_IS_ZERO 0
#define NO_SUB
#define NO_EVAL
#define NO_OPT
#define NO_INVOLVES_DIMS
#define NO_INSERT_DIMS
#define NO_LIFT
#define NO_MORPH
#include <isl_pw_templ.c>
#include <isl_pw_union_opt.c>
#undef NO_SUB
#undef UNION
#define UNION isl_union_pw_multi_aff
#undef PART
#define PART isl_pw_multi_aff
#undef PARTS
#define PARTS pw_multi_aff
#include <isl_union_multi.c>
#include <isl_union_neg.c>
static __isl_give isl_pw_multi_aff *pw_multi_aff_union_lexmax(
__isl_take isl_pw_multi_aff *pma1,
__isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_union_opt_cmp(pma1, pma2,
&isl_multi_aff_lex_ge_set);
}
/* Given two piecewise multi affine expressions, return a piecewise
* multi-affine expression defined on the union of the definition domains
* of the inputs that is equal to the lexicographic maximum of the two
* inputs on each cell. If only one of the two inputs is defined on
* a given cell, then it is considered to be the maximum.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_union_lexmax(
__isl_take isl_pw_multi_aff *pma1,
__isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_union_lexmax);
}
static __isl_give isl_pw_multi_aff *pw_multi_aff_union_lexmin(
__isl_take isl_pw_multi_aff *pma1,
__isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_union_opt_cmp(pma1, pma2,
&isl_multi_aff_lex_le_set);
}
/* Given two piecewise multi affine expressions, return a piecewise
* multi-affine expression defined on the union of the definition domains
* of the inputs that is equal to the lexicographic minimum of the two
* inputs on each cell. If only one of the two inputs is defined on
* a given cell, then it is considered to be the minimum.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_union_lexmin(
__isl_take isl_pw_multi_aff *pma1,
__isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_union_lexmin);
}
static __isl_give isl_pw_multi_aff *pw_multi_aff_add(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_on_shared_domain(pma1, pma2,
&isl_multi_aff_add);
}
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_add(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_add);
}
static __isl_give isl_pw_multi_aff *pw_multi_aff_sub(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_on_shared_domain(pma1, pma2,
&isl_multi_aff_sub);
}
/* Subtract "pma2" from "pma1" and return the result.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_sub(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_sub);
}
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_union_add(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_union_add_(pma1, pma2);
}
/* Compute the sum of "upa1" and "upa2" on the union of their domains,
* with the actual sum on the shared domain and
* the defined expression on the symmetric difference of the domains.
*/
__isl_give isl_union_pw_aff *isl_union_pw_aff_union_add(
__isl_take isl_union_pw_aff *upa1, __isl_take isl_union_pw_aff *upa2)
{
return isl_union_pw_aff_union_add_(upa1, upa2);
}
/* Compute the sum of "upma1" and "upma2" on the union of their domains,
* with the actual sum on the shared domain and
* the defined expression on the symmetric difference of the domains.
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_union_add(
__isl_take isl_union_pw_multi_aff *upma1,
__isl_take isl_union_pw_multi_aff *upma2)
{
return isl_union_pw_multi_aff_union_add_(upma1, upma2);
}
/* Given two piecewise multi-affine expressions A -> B and C -> D,
* construct a piecewise multi-affine expression [A -> C] -> [B -> D].
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_product(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
int i, j, n;
isl_space *space;
isl_pw_multi_aff *res;
if (!pma1 || !pma2)
goto error;
n = pma1->n * pma2->n;
space = isl_space_product(isl_space_copy(pma1->dim),
isl_space_copy(pma2->dim));
res = isl_pw_multi_aff_alloc_size(space, n);
for (i = 0; i < pma1->n; ++i) {
for (j = 0; j < pma2->n; ++j) {
isl_set *domain;
isl_multi_aff *ma;
domain = isl_set_product(isl_set_copy(pma1->p[i].set),
isl_set_copy(pma2->p[j].set));
ma = isl_multi_aff_product(
isl_multi_aff_copy(pma1->p[i].maff),
isl_multi_aff_copy(pma2->p[j].maff));
res = isl_pw_multi_aff_add_piece(res, domain, ma);
}
}
isl_pw_multi_aff_free(pma1);
isl_pw_multi_aff_free(pma2);
return res;
error:
isl_pw_multi_aff_free(pma1);
isl_pw_multi_aff_free(pma2);
return NULL;
}
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_product(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_product);
}
/* Construct a map mapping the domain of the piecewise multi-affine expression
* to its range, with each dimension in the range equated to the
* corresponding affine expression on its cell.
*/
__isl_give isl_map *isl_map_from_pw_multi_aff(__isl_take isl_pw_multi_aff *pma)
{
int i;
isl_map *map;
if (!pma)
return NULL;
map = isl_map_empty(isl_pw_multi_aff_get_space(pma));
for (i = 0; i < pma->n; ++i) {
isl_multi_aff *maff;
isl_basic_map *bmap;
isl_map *map_i;
maff = isl_multi_aff_copy(pma->p[i].maff);
bmap = isl_basic_map_from_multi_aff(maff);
map_i = isl_map_from_basic_map(bmap);
map_i = isl_map_intersect_domain(map_i,
isl_set_copy(pma->p[i].set));
map = isl_map_union_disjoint(map, map_i);
}
isl_pw_multi_aff_free(pma);
return map;
}
__isl_give isl_set *isl_set_from_pw_multi_aff(__isl_take isl_pw_multi_aff *pma)
{
if (!pma)
return NULL;
if (!isl_space_is_set(pma->dim))
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"isl_pw_multi_aff cannot be converted into an isl_set",
goto error);
return isl_map_from_pw_multi_aff(pma);
error:
isl_pw_multi_aff_free(pma);
return NULL;
}
/* Subtract the initial "n" elements in "ma" with coefficients in "c" and
* denominator "denom".
* "denom" is allowed to be negative, in which case the actual denominator
* is -denom and the expressions are added instead.
*/
static __isl_give isl_aff *subtract_initial(__isl_take isl_aff *aff,
__isl_keep isl_multi_aff *ma, int n, isl_int *c, isl_int denom)
{
int i, first;
int sign;
isl_int d;
first = isl_seq_first_non_zero(c, n);
if (first == -1)
return aff;
sign = isl_int_sgn(denom);
isl_int_init(d);
isl_int_abs(d, denom);
for (i = first; i < n; ++i) {
isl_aff *aff_i;
if (isl_int_is_zero(c[i]))
continue;
aff_i = isl_multi_aff_get_aff(ma, i);
aff_i = isl_aff_scale(aff_i, c[i]);
aff_i = isl_aff_scale_down(aff_i, d);
if (sign >= 0)
aff = isl_aff_sub(aff, aff_i);
else
aff = isl_aff_add(aff, aff_i);
}
isl_int_clear(d);
return aff;
}
/* Extract an affine expression that expresses the output dimension "pos"
* of "bmap" in terms of the parameters and input dimensions from
* equality "eq".
* Note that this expression may involve integer divisions defined
* in terms of parameters and input dimensions.
* The equality may also involve references to earlier (but not later)
* output dimensions. These are replaced by the corresponding elements
* in "ma".
*
* If the equality is of the form
*
* f(i) + h(j) + a x + g(i) = 0,
*
* with f(i) a linear combinations of the parameters and input dimensions,
* g(i) a linear combination of integer divisions defined in terms of the same
* and h(j) a linear combinations of earlier output dimensions,
* then the affine expression is
*
* (-f(i) - g(i))/a - h(j)/a
*
* If the equality is of the form
*
* f(i) + h(j) - a x + g(i) = 0,
*
* then the affine expression is
*
* (f(i) + g(i))/a - h(j)/(-a)
*
*
* If "div" refers to an integer division (i.e., it is smaller than
* the number of integer divisions), then the equality constraint
* does involve an integer division (the one at position "div") that
* is defined in terms of output dimensions. However, this integer
* division can be eliminated by exploiting a pair of constraints
* x >= l and x <= l + n, with n smaller than the coefficient of "div"
* in the equality constraint. "ineq" refers to inequality x >= l, i.e.,
* -l + x >= 0.
* In particular, let
*
* x = e(i) + m floor(...)
*
* with e(i) the expression derived above and floor(...) the integer
* division involving output dimensions.
* From
*
* l <= x <= l + n,
*
* we have
*
* 0 <= x - l <= n
*
* This means
*
* e(i) + m floor(...) - l = (e(i) + m floor(...) - l) mod m
* = (e(i) - l) mod m
*
* Therefore,
*
* x - l = (e(i) - l) mod m
*
* or
*
* x = ((e(i) - l) mod m) + l
*
* The variable "shift" below contains the expression -l, which may
* also involve a linear combination of earlier output dimensions.
*/
static __isl_give isl_aff *extract_aff_from_equality(
__isl_keep isl_basic_map *bmap, int pos, int eq, int div, int ineq,
__isl_keep isl_multi_aff *ma)
{
unsigned o_out;
unsigned n_div, n_out;
isl_ctx *ctx;
isl_local_space *ls;
isl_aff *aff, *shift;
isl_val *mod;
ctx = isl_basic_map_get_ctx(bmap);
ls = isl_basic_map_get_local_space(bmap);
ls = isl_local_space_domain(ls);
aff = isl_aff_alloc(isl_local_space_copy(ls));
if (!aff)
goto error;
o_out = isl_basic_map_offset(bmap, isl_dim_out);
n_out = isl_basic_map_dim(bmap, isl_dim_out);
n_div = isl_basic_map_dim(bmap, isl_dim_div);
if (isl_int_is_neg(bmap->eq[eq][o_out + pos])) {
isl_seq_cpy(aff->v->el + 1, bmap->eq[eq], o_out);
isl_seq_cpy(aff->v->el + 1 + o_out,
bmap->eq[eq] + o_out + n_out, n_div);
} else {
isl_seq_neg(aff->v->el + 1, bmap->eq[eq], o_out);
isl_seq_neg(aff->v->el + 1 + o_out,
bmap->eq[eq] + o_out + n_out, n_div);
}
if (div < n_div)
isl_int_set_si(aff->v->el[1 + o_out + div], 0);
isl_int_abs(aff->v->el[0], bmap->eq[eq][o_out + pos]);
aff = subtract_initial(aff, ma, pos, bmap->eq[eq] + o_out,
bmap->eq[eq][o_out + pos]);
if (div < n_div) {
shift = isl_aff_alloc(isl_local_space_copy(ls));
if (!shift)
goto error;
isl_seq_cpy(shift->v->el + 1, bmap->ineq[ineq], o_out);
isl_seq_cpy(shift->v->el + 1 + o_out,
bmap->ineq[ineq] + o_out + n_out, n_div);
isl_int_set_si(shift->v->el[0], 1);
shift = subtract_initial(shift, ma, pos,
bmap->ineq[ineq] + o_out, ctx->negone);
aff = isl_aff_add(aff, isl_aff_copy(shift));
mod = isl_val_int_from_isl_int(ctx,
bmap->eq[eq][o_out + n_out + div]);
mod = isl_val_abs(mod);
aff = isl_aff_mod_val(aff, mod);
aff = isl_aff_sub(aff, shift);
}
isl_local_space_free(ls);
return aff;
error:
isl_local_space_free(ls);
isl_aff_free(aff);
return NULL;
}
/* Given a basic map with output dimensions defined
* in terms of the parameters input dimensions and earlier
* output dimensions using an equality (and possibly a pair on inequalities),
* extract an isl_aff that expresses output dimension "pos" in terms
* of the parameters and input dimensions.
* Note that this expression may involve integer divisions defined
* in terms of parameters and input dimensions.
* "ma" contains the expressions corresponding to earlier output dimensions.
*
* This function shares some similarities with
* isl_basic_map_has_defining_equality and isl_constraint_get_bound.
*/
static __isl_give isl_aff *extract_isl_aff_from_basic_map(
__isl_keep isl_basic_map *bmap, int pos, __isl_keep isl_multi_aff *ma)
{
int eq, div, ineq;
isl_aff *aff;
if (!bmap)
return NULL;
eq = isl_basic_map_output_defining_equality(bmap, pos, &div, &ineq);
if (eq >= bmap->n_eq)
isl_die(isl_basic_map_get_ctx(bmap), isl_error_invalid,
"unable to find suitable equality", return NULL);
aff = extract_aff_from_equality(bmap, pos, eq, div, ineq, ma);
aff = isl_aff_remove_unused_divs(aff);
return aff;
}
/* Given a basic map where each output dimension is defined
* in terms of the parameters and input dimensions using an equality,
* extract an isl_multi_aff that expresses the output dimensions in terms
* of the parameters and input dimensions.
*/
static __isl_give isl_multi_aff *extract_isl_multi_aff_from_basic_map(
__isl_take isl_basic_map *bmap)
{
int i;
unsigned n_out;
isl_multi_aff *ma;
if (!bmap)
return NULL;
ma = isl_multi_aff_alloc(isl_basic_map_get_space(bmap));
n_out = isl_basic_map_dim(bmap, isl_dim_out);
for (i = 0; i < n_out; ++i) {
isl_aff *aff;
aff = extract_isl_aff_from_basic_map(bmap, i, ma);
ma = isl_multi_aff_set_aff(ma, i, aff);
}
isl_basic_map_free(bmap);
return ma;
}
/* Given a basic set where each set dimension is defined
* in terms of the parameters using an equality,
* extract an isl_multi_aff that expresses the set dimensions in terms
* of the parameters.
*/
__isl_give isl_multi_aff *isl_multi_aff_from_basic_set_equalities(
__isl_take isl_basic_set *bset)
{
return extract_isl_multi_aff_from_basic_map(bset);
}
/* Create an isl_pw_multi_aff that is equivalent to
* isl_map_intersect_domain(isl_map_from_basic_map(bmap), domain).
* The given basic map is such that each output dimension is defined
* in terms of the parameters and input dimensions using an equality.
*
* Since some applications expect the result of isl_pw_multi_aff_from_map
* to only contain integer affine expressions, we compute the floor
* of the expression before returning.
*
* Remove all constraints involving local variables without
* an explicit representation (resulting in the removal of those
* local variables) prior to the actual extraction to ensure
* that the local spaces in which the resulting affine expressions
* are created do not contain any unknown local variables.
* Removing such constraints is safe because constraints involving
* unknown local variables are not used to determine whether
* a basic map is obviously single-valued.
*/
static __isl_give isl_pw_multi_aff *plain_pw_multi_aff_from_map(
__isl_take isl_set *domain, __isl_take isl_basic_map *bmap)
{
isl_multi_aff *ma;
bmap = isl_basic_map_drop_constraint_involving_unknown_divs(bmap);
ma = extract_isl_multi_aff_from_basic_map(bmap);
ma = isl_multi_aff_floor(ma);
return isl_pw_multi_aff_alloc(domain, ma);
}
/* Try and create an isl_pw_multi_aff that is equivalent to the given isl_map.
* This obviously only works if the input "map" is single-valued.
* If so, we compute the lexicographic minimum of the image in the form
* of an isl_pw_multi_aff. Since the image is unique, it is equal
* to its lexicographic minimum.
* If the input is not single-valued, we produce an error.
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_from_map_base(
__isl_take isl_map *map)
{
int i;
int sv;
isl_pw_multi_aff *pma;
sv = isl_map_is_single_valued(map);
if (sv < 0)
goto error;
if (!sv)
isl_die(isl_map_get_ctx(map), isl_error_invalid,
"map is not single-valued", goto error);
map = isl_map_make_disjoint(map);
if (!map)
return NULL;
pma = isl_pw_multi_aff_empty(isl_map_get_space(map));
for (i = 0; i < map->n; ++i) {
isl_pw_multi_aff *pma_i;
isl_basic_map *bmap;
bmap = isl_basic_map_copy(map->p[i]);
pma_i = isl_basic_map_lexmin_pw_multi_aff(bmap);
pma = isl_pw_multi_aff_add_disjoint(pma, pma_i);
}
isl_map_free(map);
return pma;
error:
isl_map_free(map);
return NULL;
}
/* Try and create an isl_pw_multi_aff that is equivalent to the given isl_map,
* taking into account that the output dimension at position "d"
* can be represented as
*
* x = floor((e(...) + c1) / m)
*
* given that constraint "i" is of the form
*
* e(...) + c1 - m x >= 0
*
*
* Let "map" be of the form
*
* A -> B
*
* We construct a mapping
*
* A -> [A -> x = floor(...)]
*
* apply that to the map, obtaining
*
* [A -> x = floor(...)] -> B
*
* and equate dimension "d" to x.
* We then compute a isl_pw_multi_aff representation of the resulting map
* and plug in the mapping above.
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_from_map_div(
__isl_take isl_map *map, __isl_take isl_basic_map *hull, int d, int i)
{
isl_ctx *ctx;
isl_space *space;
isl_local_space *ls;
isl_multi_aff *ma;
isl_aff *aff;
isl_vec *v;
isl_map *insert;
int offset;
int n;
int n_in;
isl_pw_multi_aff *pma;
int is_set;
is_set = isl_map_is_set(map);
offset = isl_basic_map_offset(hull, isl_dim_out);
ctx = isl_map_get_ctx(map);
space = isl_space_domain(isl_map_get_space(map));
n_in = isl_space_dim(space, isl_dim_set);
n = isl_space_dim(space, isl_dim_all);
v = isl_vec_alloc(ctx, 1 + 1 + n);
if (v) {
isl_int_neg(v->el[0], hull->ineq[i][offset + d]);
isl_seq_cpy(v->el + 1, hull->ineq[i], 1 + n);
}
isl_basic_map_free(hull);
ls = isl_local_space_from_space(isl_space_copy(space));
aff = isl_aff_alloc_vec(ls, v);
aff = isl_aff_floor(aff);
if (is_set) {
isl_space_free(space);
ma = isl_multi_aff_from_aff(aff);
} else {
ma = isl_multi_aff_identity(isl_space_map_from_set(space));
ma = isl_multi_aff_range_product(ma,
isl_multi_aff_from_aff(aff));
}
insert = isl_map_from_multi_aff(isl_multi_aff_copy(ma));
map = isl_map_apply_domain(map, insert);
map = isl_map_equate(map, isl_dim_in, n_in, isl_dim_out, d);
pma = isl_pw_multi_aff_from_map(map);
pma = isl_pw_multi_aff_pullback_multi_aff(pma, ma);
return pma;
}
/* Is constraint "c" of the form
*
* e(...) + c1 - m x >= 0
*
* or
*
* -e(...) + c2 + m x >= 0
*
* where m > 1 and e only depends on parameters and input dimemnsions?
*
* "offset" is the offset of the output dimensions
* "pos" is the position of output dimension x.
*/
static int is_potential_div_constraint(isl_int *c, int offset, int d, int total)
{
if (isl_int_is_zero(c[offset + d]))
return 0;
if (isl_int_is_one(c[offset + d]))
return 0;
if (isl_int_is_negone(c[offset + d]))
return 0;
if (isl_seq_first_non_zero(c + offset, d) != -1)
return 0;
if (isl_seq_first_non_zero(c + offset + d + 1,
total - (offset + d + 1)) != -1)
return 0;
return 1;
}
/* Try and create an isl_pw_multi_aff that is equivalent to the given isl_map.
*
* As a special case, we first check if there is any pair of constraints,
* shared by all the basic maps in "map" that force a given dimension
* to be equal to the floor of some affine combination of the input dimensions.
*
* In particular, if we can find two constraints
*
* e(...) + c1 - m x >= 0 i.e., m x <= e(...) + c1
*
* and
*
* -e(...) + c2 + m x >= 0 i.e., m x >= e(...) - c2
*
* where m > 1 and e only depends on parameters and input dimemnsions,
* and such that
*
* c1 + c2 < m i.e., -c2 >= c1 - (m - 1)
*
* then we know that we can take
*
* x = floor((e(...) + c1) / m)
*
* without having to perform any computation.
*
* Note that we know that
*
* c1 + c2 >= 1
*
* If c1 + c2 were 0, then we would have detected an equality during
* simplification. If c1 + c2 were negative, then we would have detected
* a contradiction.
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_from_map_check_div(
__isl_take isl_map *map)
{
int d, dim;
int i, j, n;
int offset, total;
isl_int sum;
isl_basic_map *hull;
hull = isl_map_unshifted_simple_hull(isl_map_copy(map));
if (!hull)
goto error;
isl_int_init(sum);
dim = isl_map_dim(map, isl_dim_out);
offset = isl_basic_map_offset(hull, isl_dim_out);
total = 1 + isl_basic_map_total_dim(hull);
n = hull->n_ineq;
for (d = 0; d < dim; ++d) {
for (i = 0; i < n; ++i) {
if (!is_potential_div_constraint(hull->ineq[i],
offset, d, total))
continue;
for (j = i + 1; j < n; ++j) {
if (!isl_seq_is_neg(hull->ineq[i] + 1,
hull->ineq[j] + 1, total - 1))
continue;
isl_int_add(sum, hull->ineq[i][0],
hull->ineq[j][0]);
if (isl_int_abs_lt(sum,
hull->ineq[i][offset + d]))
break;
}
if (j >= n)
continue;
isl_int_clear(sum);
if (isl_int_is_pos(hull->ineq[j][offset + d]))
j = i;
return pw_multi_aff_from_map_div(map, hull, d, j);
}
}
isl_int_clear(sum);
isl_basic_map_free(hull);
return pw_multi_aff_from_map_base(map);
error:
isl_map_free(map);
isl_basic_map_free(hull);
return NULL;
}
/* Given an affine expression
*
* [A -> B] -> f(A,B)
*
* construct an isl_multi_aff
*
* [A -> B] -> B'
*
* such that dimension "d" in B' is set to "aff" and the remaining
* dimensions are set equal to the corresponding dimensions in B.
* "n_in" is the dimension of the space A.
* "n_out" is the dimension of the space B.
*
* If "is_set" is set, then the affine expression is of the form
*
* [B] -> f(B)
*
* and we construct an isl_multi_aff
*
* B -> B'
*/
static __isl_give isl_multi_aff *range_map(__isl_take isl_aff *aff, int d,
unsigned n_in, unsigned n_out, int is_set)
{
int i;
isl_multi_aff *ma;
isl_space *space, *space2;
isl_local_space *ls;
space = isl_aff_get_domain_space(aff);
ls = isl_local_space_from_space(isl_space_copy(space));
space2 = isl_space_copy(space);
if (!is_set)
space2 = isl_space_range(isl_space_unwrap(space2));
space = isl_space_map_from_domain_and_range(space, space2);
ma = isl_multi_aff_alloc(space);
ma = isl_multi_aff_set_aff(ma, d, aff);
for (i = 0; i < n_out; ++i) {
if (i == d)
continue;
aff = isl_aff_var_on_domain(isl_local_space_copy(ls),
isl_dim_set, n_in + i);
ma = isl_multi_aff_set_aff(ma, i, aff);
}
isl_local_space_free(ls);
return ma;
}
/* Try and create an isl_pw_multi_aff that is equivalent to the given isl_map,
* taking into account that the dimension at position "d" can be written as
*
* x = m a + f(..) (1)
*
* where m is equal to "gcd".
* "i" is the index of the equality in "hull" that defines f(..).
* In particular, the equality is of the form
*
* f(..) - x + m g(existentials) = 0
*
* or
*
* -f(..) + x + m g(existentials) = 0
*
* We basically plug (1) into "map", resulting in a map with "a"
* in the range instead of "x". The corresponding isl_pw_multi_aff
* defining "a" is then plugged back into (1) to obtain a definition for "x".
*
* Specifically, given the input map
*
* A -> B
*
* We first wrap it into a set
*
* [A -> B]
*
* and define (1) on top of the corresponding space, resulting in "aff".
* We use this to create an isl_multi_aff that maps the output position "d"
* from "a" to "x", leaving all other (intput and output) dimensions unchanged.
* We plug this into the wrapped map, unwrap the result and compute the
* corresponding isl_pw_multi_aff.
* The result is an expression
*
* A -> T(A)
*
* We adjust that to
*
* A -> [A -> T(A)]
*
* so that we can plug that into "aff", after extending the latter to
* a mapping
*
* [A -> B] -> B'
*
*
* If "map" is actually a set, then there is no "A" space, meaning
* that we do not need to perform any wrapping, and that the result
* of the recursive call is of the form
*
* [T]
*
* which is plugged into a mapping of the form
*
* B -> B'
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_from_map_stride(
__isl_take isl_map *map, __isl_take isl_basic_map *hull, int d, int i,
isl_int gcd)
{
isl_set *set;
isl_space *space;
isl_local_space *ls;
isl_aff *aff;
isl_multi_aff *ma;
isl_pw_multi_aff *pma, *id;
unsigned n_in;
unsigned o_out;
unsigned n_out;
int is_set;
is_set = isl_map_is_set(map);
n_in = isl_basic_map_dim(hull, isl_dim_in);
n_out = isl_basic_map_dim(hull, isl_dim_out);
o_out = isl_basic_map_offset(hull, isl_dim_out);
if (is_set)
set = map;
else
set = isl_map_wrap(map);
space = isl_space_map_from_set(isl_set_get_space(set));
ma = isl_multi_aff_identity(space);
ls = isl_local_space_from_space(isl_set_get_space(set));
aff = isl_aff_alloc(ls);
if (aff) {
isl_int_set_si(aff->v->el[0], 1);
if (isl_int_is_one(hull->eq[i][o_out + d]))
isl_seq_neg(aff->v->el + 1, hull->eq[i],
aff->v->size - 1);
else
isl_seq_cpy(aff->v->el + 1, hull->eq[i],
aff->v->size - 1);
isl_int_set(aff->v->el[1 + o_out + d], gcd);
}
ma = isl_multi_aff_set_aff(ma, n_in + d, isl_aff_copy(aff));
set = isl_set_preimage_multi_aff(set, ma);
ma = range_map(aff, d, n_in, n_out, is_set);
if (is_set)
map = set;
else
map = isl_set_unwrap(set);
pma = isl_pw_multi_aff_from_map(map);
if (!is_set) {
space = isl_pw_multi_aff_get_domain_space(pma);
space = isl_space_map_from_set(space);
id = isl_pw_multi_aff_identity(space);
pma = isl_pw_multi_aff_range_product(id, pma);
}
id = isl_pw_multi_aff_from_multi_aff(ma);
pma = isl_pw_multi_aff_pullback_pw_multi_aff(id, pma);
isl_basic_map_free(hull);
return pma;
}
/* Try and create an isl_pw_multi_aff that is equivalent to the given isl_map.
*
* As a special case, we first check if all output dimensions are uniquely
* defined in terms of the parameters and input dimensions over the entire
* domain. If so, we extract the desired isl_pw_multi_aff directly
* from the affine hull of "map" and its domain.
*
* Otherwise, we check if any of the output dimensions is "strided".
* That is, we check if can be written as
*
* x = m a + f(..)
*
* with m greater than 1, a some combination of existentially quantified
* variables and f an expression in the parameters and input dimensions.
* If so, we remove the stride in pw_multi_aff_from_map_stride.
*
* Otherwise, we continue with pw_multi_aff_from_map_check_div for a further
* special case.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_from_map(__isl_take isl_map *map)
{
int i, j;
isl_bool sv;
isl_basic_map *hull;
unsigned n_out;
unsigned o_out;
unsigned n_div;
unsigned o_div;
isl_int gcd;
if (!map)
return NULL;
map = isl_map_detect_equalities(map);
hull = isl_map_unshifted_simple_hull(isl_map_copy(map));
sv = isl_basic_map_plain_is_single_valued(hull);
if (sv >= 0 && sv)
return plain_pw_multi_aff_from_map(isl_map_domain(map), hull);
if (sv < 0)
hull = isl_basic_map_free(hull);
if (!hull)
goto error;
n_div = isl_basic_map_dim(hull, isl_dim_div);
o_div = isl_basic_map_offset(hull, isl_dim_div);
if (n_div == 0) {
isl_basic_map_free(hull);
return pw_multi_aff_from_map_check_div(map);
}
isl_int_init(gcd);
n_out = isl_basic_map_dim(hull, isl_dim_out);
o_out = isl_basic_map_offset(hull, isl_dim_out);
for (i = 0; i < n_out; ++i) {
for (j = 0; j < hull->n_eq; ++j) {
isl_int *eq = hull->eq[j];
isl_pw_multi_aff *res;
if (!isl_int_is_one(eq[o_out + i]) &&
!isl_int_is_negone(eq[o_out + i]))
continue;
if (isl_seq_first_non_zero(eq + o_out, i) != -1)
continue;
if (isl_seq_first_non_zero(eq + o_out + i + 1,
n_out - (i + 1)) != -1)
continue;
isl_seq_gcd(eq + o_div, n_div, &gcd);
if (isl_int_is_zero(gcd))
continue;
if (isl_int_is_one(gcd))
continue;
res = pw_multi_aff_from_map_stride(map, hull,
i, j, gcd);
isl_int_clear(gcd);
return res;
}
}
isl_int_clear(gcd);
isl_basic_map_free(hull);
return pw_multi_aff_from_map_check_div(map);
error:
isl_map_free(map);
return NULL;
}
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_from_set(__isl_take isl_set *set)
{
return isl_pw_multi_aff_from_map(set);
}
/* Convert "map" into an isl_pw_multi_aff (if possible) and
* add it to *user.
*/
static isl_stat pw_multi_aff_from_map(__isl_take isl_map *map, void *user)
{
isl_union_pw_multi_aff **upma = user;
isl_pw_multi_aff *pma;
pma = isl_pw_multi_aff_from_map(map);
*upma = isl_union_pw_multi_aff_add_pw_multi_aff(*upma, pma);
return *upma ? isl_stat_ok : isl_stat_error;
}
/* Create an isl_union_pw_multi_aff with the given isl_aff on a universe
* domain.
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_from_aff(
__isl_take isl_aff *aff)
{
isl_multi_aff *ma;
isl_pw_multi_aff *pma;
ma = isl_multi_aff_from_aff(aff);
pma = isl_pw_multi_aff_from_multi_aff(ma);
return isl_union_pw_multi_aff_from_pw_multi_aff(pma);
}
/* Try and create an isl_union_pw_multi_aff that is equivalent
* to the given isl_union_map.
* The isl_union_map is required to be single-valued in each space.
* Otherwise, an error is produced.
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_from_union_map(
__isl_take isl_union_map *umap)
{
isl_space *space;
isl_union_pw_multi_aff *upma;
space = isl_union_map_get_space(umap);
upma = isl_union_pw_multi_aff_empty(space);
if (isl_union_map_foreach_map(umap, &pw_multi_aff_from_map, &upma) < 0)
upma = isl_union_pw_multi_aff_free(upma);
isl_union_map_free(umap);
return upma;
}
/* Try and create an isl_union_pw_multi_aff that is equivalent
* to the given isl_union_set.
* The isl_union_set is required to be a singleton in each space.
* Otherwise, an error is produced.
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_from_union_set(
__isl_take isl_union_set *uset)
{
return isl_union_pw_multi_aff_from_union_map(uset);
}
/* Return the piecewise affine expression "set ? 1 : 0".
*/
__isl_give isl_pw_aff *isl_set_indicator_function(__isl_take isl_set *set)
{
isl_pw_aff *pa;
isl_space *space = isl_set_get_space(set);
isl_local_space *ls = isl_local_space_from_space(space);
isl_aff *zero = isl_aff_zero_on_domain(isl_local_space_copy(ls));
isl_aff *one = isl_aff_zero_on_domain(ls);
one = isl_aff_add_constant_si(one, 1);
pa = isl_pw_aff_alloc(isl_set_copy(set), one);
set = isl_set_complement(set);
pa = isl_pw_aff_add_disjoint(pa, isl_pw_aff_alloc(set, zero));
return pa;
}
/* Plug in "subs" for dimension "type", "pos" of "aff".
*
* Let i be the dimension to replace and let "subs" be of the form
*
* f/d
*
* and "aff" of the form
*
* (a i + g)/m
*
* The result is
*
* (a f + d g')/(m d)
*
* where g' is the result of plugging in "subs" in each of the integer
* divisions in g.
*/
__isl_give isl_aff *isl_aff_substitute(__isl_take isl_aff *aff,
enum isl_dim_type type, unsigned pos, __isl_keep isl_aff *subs)
{
isl_ctx *ctx;
isl_int v;
aff = isl_aff_cow(aff);
if (!aff || !subs)
return isl_aff_free(aff);
ctx = isl_aff_get_ctx(aff);
if (!isl_space_is_equal(aff->ls->dim, subs->ls->dim))
isl_die(ctx, isl_error_invalid,
"spaces don't match", return isl_aff_free(aff));
if (isl_local_space_dim(subs->ls, isl_dim_div) != 0)
isl_die(ctx, isl_error_unsupported,
"cannot handle divs yet", return isl_aff_free(aff));
aff->ls = isl_local_space_substitute(aff->ls, type, pos, subs);
if (!aff->ls)
return isl_aff_free(aff);
aff->v = isl_vec_cow(aff->v);
if (!aff->v)
return isl_aff_free(aff);
pos += isl_local_space_offset(aff->ls, type);
isl_int_init(v);
isl_seq_substitute(aff->v->el, pos, subs->v->el,
aff->v->size, subs->v->size, v);
isl_int_clear(v);
return aff;
}
/* Plug in "subs" for dimension "type", "pos" in each of the affine
* expressions in "maff".
*/
__isl_give isl_multi_aff *isl_multi_aff_substitute(
__isl_take isl_multi_aff *maff, enum isl_dim_type type, unsigned pos,
__isl_keep isl_aff *subs)
{
int i;
maff = isl_multi_aff_cow(maff);
if (!maff || !subs)
return isl_multi_aff_free(maff);
if (type == isl_dim_in)
type = isl_dim_set;
for (i = 0; i < maff->n; ++i) {
maff->p[i] = isl_aff_substitute(maff->p[i], type, pos, subs);
if (!maff->p[i])
return isl_multi_aff_free(maff);
}
return maff;
}
/* Plug in "subs" for dimension "type", "pos" of "pma".
*
* pma is of the form
*
* A_i(v) -> M_i(v)
*
* while subs is of the form
*
* v' = B_j(v) -> S_j
*
* Each pair i,j such that C_ij = A_i \cap B_i is non-empty
* has a contribution in the result, in particular
*
* C_ij(S_j) -> M_i(S_j)
*
* Note that plugging in S_j in C_ij may also result in an empty set
* and this contribution should simply be discarded.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_substitute(
__isl_take isl_pw_multi_aff *pma, enum isl_dim_type type, unsigned pos,
__isl_keep isl_pw_aff *subs)
{
int i, j, n;
isl_pw_multi_aff *res;
if (!pma || !subs)
return isl_pw_multi_aff_free(pma);
n = pma->n * subs->n;
res = isl_pw_multi_aff_alloc_size(isl_space_copy(pma->dim), n);
for (i = 0; i < pma->n; ++i) {
for (j = 0; j < subs->n; ++j) {
isl_set *common;
isl_multi_aff *res_ij;
int empty;
common = isl_set_intersect(
isl_set_copy(pma->p[i].set),
isl_set_copy(subs->p[j].set));
common = isl_set_substitute(common,
type, pos, subs->p[j].aff);
empty = isl_set_plain_is_empty(common);
if (empty < 0 || empty) {
isl_set_free(common);
if (empty < 0)
goto error;
continue;
}
res_ij = isl_multi_aff_substitute(
isl_multi_aff_copy(pma->p[i].maff),
type, pos, subs->p[j].aff);
res = isl_pw_multi_aff_add_piece(res, common, res_ij);
}
}
isl_pw_multi_aff_free(pma);
return res;
error:
isl_pw_multi_aff_free(pma);
isl_pw_multi_aff_free(res);
return NULL;
}
/* Compute the preimage of a range of dimensions in the affine expression "src"
* under "ma" and put the result in "dst". The number of dimensions in "src"
* that precede the range is given by "n_before". The number of dimensions
* in the range is given by the number of output dimensions of "ma".
* The number of dimensions that follow the range is given by "n_after".
* If "has_denom" is set (to one),
* then "src" and "dst" have an extra initial denominator.
* "n_div_ma" is the number of existentials in "ma"
* "n_div_bset" is the number of existentials in "src"
* The resulting "dst" (which is assumed to have been allocated by
* the caller) contains coefficients for both sets of existentials,
* first those in "ma" and then those in "src".
* f, c1, c2 and g are temporary objects that have been initialized
* by the caller.
*
* Let src represent the expression
*
* (a(p) + f_u u + b v + f_w w + c(divs))/d
*
* and let ma represent the expressions
*
* v_i = (r_i(p) + s_i(y) + t_i(divs'))/m_i
*
* We start out with the following expression for dst:
*
* (a(p) + f_u u + 0 y + f_w w + 0 divs' + c(divs) + f \sum_i b_i v_i)/d
*
* with the multiplication factor f initially equal to 1
* and f \sum_i b_i v_i kept separately.
* For each x_i that we substitute, we multiply the numerator
* (and denominator) of dst by c_1 = m_i and add the numerator
* of the x_i expression multiplied by c_2 = f b_i,
* after removing the common factors of c_1 and c_2.
* The multiplication factor f also needs to be multiplied by c_1
* for the next x_j, j > i.
*/
void isl_seq_preimage(isl_int *dst, isl_int *src,
__isl_keep isl_multi_aff *ma, int n_before, int n_after,
int n_div_ma, int n_div_bmap,
isl_int f, isl_int c1, isl_int c2, isl_int g, int has_denom)
{
int i;
int n_param, n_in, n_out;
int o_dst, o_src;
n_param = isl_multi_aff_dim(ma, isl_dim_param);
n_in = isl_multi_aff_dim(ma, isl_dim_in);
n_out = isl_multi_aff_dim(ma, isl_dim_out);
isl_seq_cpy(dst, src, has_denom + 1 + n_param + n_before);
o_dst = o_src = has_denom + 1 + n_param + n_before;
isl_seq_clr(dst + o_dst, n_in);
o_dst += n_in;
o_src += n_out;
isl_seq_cpy(dst + o_dst, src + o_src, n_after);
o_dst += n_after;
o_src += n_after;
isl_seq_clr(dst + o_dst, n_div_ma);
o_dst += n_div_ma;
isl_seq_cpy(dst + o_dst, src + o_src, n_div_bmap);
isl_int_set_si(f, 1);
for (i = 0; i < n_out; ++i) {
int offset = has_denom + 1 + n_param + n_before + i;
if (isl_int_is_zero(src[offset]))
continue;
isl_int_set(c1, ma->p[i]->v->el[0]);
isl_int_mul(c2, f, src[offset]);
isl_int_gcd(g, c1, c2);
isl_int_divexact(c1, c1, g);
isl_int_divexact(c2, c2, g);
isl_int_mul(f, f, c1);
o_dst = has_denom;
o_src = 1;
isl_seq_combine(dst + o_dst, c1, dst + o_dst,
c2, ma->p[i]->v->el + o_src, 1 + n_param);
o_dst += 1 + n_param;
o_src += 1 + n_param;
isl_seq_scale(dst + o_dst, dst + o_dst, c1, n_before);
o_dst += n_before;
isl_seq_combine(dst + o_dst, c1, dst + o_dst,
c2, ma->p[i]->v->el + o_src, n_in);
o_dst += n_in;
o_src += n_in;
isl_seq_scale(dst + o_dst, dst + o_dst, c1, n_after);
o_dst += n_after;
isl_seq_combine(dst + o_dst, c1, dst + o_dst,
c2, ma->p[i]->v->el + o_src, n_div_ma);
o_dst += n_div_ma;
o_src += n_div_ma;
isl_seq_scale(dst + o_dst, dst + o_dst, c1, n_div_bmap);
if (has_denom)
isl_int_mul(dst[0], dst[0], c1);
}
}
/* Compute the pullback of "aff" by the function represented by "ma".
* In other words, plug in "ma" in "aff". The result is an affine expression
* defined over the domain space of "ma".
*
* If "aff" is represented by
*
* (a(p) + b x + c(divs))/d
*
* and ma is represented by
*
* x = D(p) + F(y) + G(divs')
*
* then the result is
*
* (a(p) + b D(p) + b F(y) + b G(divs') + c(divs))/d
*
* The divs in the local space of the input are similarly adjusted
* through a call to isl_local_space_preimage_multi_aff.
*/
__isl_give isl_aff *isl_aff_pullback_multi_aff(__isl_take isl_aff *aff,
__isl_take isl_multi_aff *ma)
{
isl_aff *res = NULL;
isl_local_space *ls;
int n_div_aff, n_div_ma;
isl_int f, c1, c2, g;
ma = isl_multi_aff_align_divs(ma);
if (!aff || !ma)
goto error;
n_div_aff = isl_aff_dim(aff, isl_dim_div);
n_div_ma = ma->n ? isl_aff_dim(ma->p[0], isl_dim_div) : 0;
ls = isl_aff_get_domain_local_space(aff);
ls = isl_local_space_preimage_multi_aff(ls, isl_multi_aff_copy(ma));
res = isl_aff_alloc(ls);
if (!res)
goto error;
isl_int_init(f);
isl_int_init(c1);
isl_int_init(c2);
isl_int_init(g);
isl_seq_preimage(res->v->el, aff->v->el, ma, 0, 0, n_div_ma, n_div_aff,
f, c1, c2, g, 1);
isl_int_clear(f);
isl_int_clear(c1);
isl_int_clear(c2);
isl_int_clear(g);
isl_aff_free(aff);
isl_multi_aff_free(ma);
res = isl_aff_normalize(res);
return res;
error:
isl_aff_free(aff);
isl_multi_aff_free(ma);
isl_aff_free(res);
return NULL;
}
/* Compute the pullback of "aff1" by the function represented by "aff2".
* In other words, plug in "aff2" in "aff1". The result is an affine expression
* defined over the domain space of "aff1".
*
* The domain of "aff1" should match the range of "aff2", which means
* that it should be single-dimensional.
*/
__isl_give isl_aff *isl_aff_pullback_aff(__isl_take isl_aff *aff1,
__isl_take isl_aff *aff2)
{
isl_multi_aff *ma;
ma = isl_multi_aff_from_aff(aff2);
return isl_aff_pullback_multi_aff(aff1, ma);
}
/* Compute the pullback of "ma1" by the function represented by "ma2".
* In other words, plug in "ma2" in "ma1".
*
* The parameters of "ma1" and "ma2" are assumed to have been aligned.
*/
static __isl_give isl_multi_aff *isl_multi_aff_pullback_multi_aff_aligned(
__isl_take isl_multi_aff *ma1, __isl_take isl_multi_aff *ma2)
{
int i;
isl_space *space = NULL;
ma2 = isl_multi_aff_align_divs(ma2);
ma1 = isl_multi_aff_cow(ma1);
if (!ma1 || !ma2)
goto error;
space = isl_space_join(isl_multi_aff_get_space(ma2),
isl_multi_aff_get_space(ma1));
for (i = 0; i < ma1->n; ++i) {
ma1->p[i] = isl_aff_pullback_multi_aff(ma1->p[i],
isl_multi_aff_copy(ma2));
if (!ma1->p[i])
goto error;
}
ma1 = isl_multi_aff_reset_space(ma1, space);
isl_multi_aff_free(ma2);
return ma1;
error:
isl_space_free(space);
isl_multi_aff_free(ma2);
isl_multi_aff_free(ma1);
return NULL;
}
/* Compute the pullback of "ma1" by the function represented by "ma2".
* In other words, plug in "ma2" in "ma1".
*/
__isl_give isl_multi_aff *isl_multi_aff_pullback_multi_aff(
__isl_take isl_multi_aff *ma1, __isl_take isl_multi_aff *ma2)
{
return isl_multi_aff_align_params_multi_multi_and(ma1, ma2,
&isl_multi_aff_pullback_multi_aff_aligned);
}
/* Extend the local space of "dst" to include the divs
* in the local space of "src".
*/
__isl_give isl_aff *isl_aff_align_divs(__isl_take isl_aff *dst,
__isl_keep isl_aff *src)
{
isl_ctx *ctx;
int *exp1 = NULL;
int *exp2 = NULL;
isl_mat *div;
if (!src || !dst)
return isl_aff_free(dst);
ctx = isl_aff_get_ctx(src);
if (!isl_space_is_equal(src->ls->dim, dst->ls->dim))
isl_die(ctx, isl_error_invalid,
"spaces don't match", goto error);
if (src->ls->div->n_row == 0)
return dst;
exp1 = isl_alloc_array(ctx, int, src->ls->div->n_row);
exp2 = isl_alloc_array(ctx, int, dst->ls->div->n_row);
if (!exp1 || (dst->ls->div->n_row && !exp2))
goto error;
div = isl_merge_divs(src->ls->div, dst->ls->div, exp1, exp2);
dst = isl_aff_expand_divs(dst, div, exp2);
free(exp1);
free(exp2);
return dst;
error:
free(exp1);
free(exp2);
return isl_aff_free(dst);
}
/* Adjust the local spaces of the affine expressions in "maff"
* such that they all have the save divs.
*/
__isl_give isl_multi_aff *isl_multi_aff_align_divs(
__isl_take isl_multi_aff *maff)
{
int i;
if (!maff)
return NULL;
if (maff->n == 0)
return maff;
maff = isl_multi_aff_cow(maff);
if (!maff)
return NULL;
for (i = 1; i < maff->n; ++i)
maff->p[0] = isl_aff_align_divs(maff->p[0], maff->p[i]);
for (i = 1; i < maff->n; ++i) {
maff->p[i] = isl_aff_align_divs(maff->p[i], maff->p[0]);
if (!maff->p[i])
return isl_multi_aff_free(maff);
}
return maff;
}
__isl_give isl_aff *isl_aff_lift(__isl_take isl_aff *aff)
{
aff = isl_aff_cow(aff);
if (!aff)
return NULL;
aff->ls = isl_local_space_lift(aff->ls);
if (!aff->ls)
return isl_aff_free(aff);
return aff;
}
/* Lift "maff" to a space with extra dimensions such that the result
* has no more existentially quantified variables.
* If "ls" is not NULL, then *ls is assigned the local space that lies
* at the basis of the lifting applied to "maff".
*/
__isl_give isl_multi_aff *isl_multi_aff_lift(__isl_take isl_multi_aff *maff,
__isl_give isl_local_space **ls)
{
int i;
isl_space *space;
unsigned n_div;
if (ls)
*ls = NULL;
if (!maff)
return NULL;
if (maff->n == 0) {
if (ls) {
isl_space *space = isl_multi_aff_get_domain_space(maff);
*ls = isl_local_space_from_space(space);
if (!*ls)
return isl_multi_aff_free(maff);
}
return maff;
}
maff = isl_multi_aff_cow(maff);
maff = isl_multi_aff_align_divs(maff);
if (!maff)
return NULL;
n_div = isl_aff_dim(maff->p[0], isl_dim_div);
space = isl_multi_aff_get_space(maff);
space = isl_space_lift(isl_space_domain(space), n_div);
space = isl_space_extend_domain_with_range(space,
isl_multi_aff_get_space(maff));
if (!space)
return isl_multi_aff_free(maff);
isl_space_free(maff->space);
maff->space = space;
if (ls) {
*ls = isl_aff_get_domain_local_space(maff->p[0]);
if (!*ls)
return isl_multi_aff_free(maff);
}
for (i = 0; i < maff->n; ++i) {
maff->p[i] = isl_aff_lift(maff->p[i]);
if (!maff->p[i])
goto error;
}
return maff;
error:
if (ls)
isl_local_space_free(*ls);
return isl_multi_aff_free(maff);
}
/* Extract an isl_pw_aff corresponding to output dimension "pos" of "pma".
*/
__isl_give isl_pw_aff *isl_pw_multi_aff_get_pw_aff(
__isl_keep isl_pw_multi_aff *pma, int pos)
{
int i;
int n_out;
isl_space *space;
isl_pw_aff *pa;
if (!pma)
return NULL;
n_out = isl_pw_multi_aff_dim(pma, isl_dim_out);
if (pos < 0 || pos >= n_out)
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"index out of bounds", return NULL);
space = isl_pw_multi_aff_get_space(pma);
space = isl_space_drop_dims(space, isl_dim_out,
pos + 1, n_out - pos - 1);
space = isl_space_drop_dims(space, isl_dim_out, 0, pos);
pa = isl_pw_aff_alloc_size(space, pma->n);
for (i = 0; i < pma->n; ++i) {
isl_aff *aff;
aff = isl_multi_aff_get_aff(pma->p[i].maff, pos);
pa = isl_pw_aff_add_piece(pa, isl_set_copy(pma->p[i].set), aff);
}
return pa;
}
/* Return an isl_pw_multi_aff with the given "set" as domain and
* an unnamed zero-dimensional range.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_from_domain(
__isl_take isl_set *set)
{
isl_multi_aff *ma;
isl_space *space;
space = isl_set_get_space(set);
space = isl_space_from_domain(space);
ma = isl_multi_aff_zero(space);
return isl_pw_multi_aff_alloc(set, ma);
}
/* Add an isl_pw_multi_aff with the given "set" as domain and
* an unnamed zero-dimensional range to *user.
*/
static isl_stat add_pw_multi_aff_from_domain(__isl_take isl_set *set,
void *user)
{
isl_union_pw_multi_aff **upma = user;
isl_pw_multi_aff *pma;
pma = isl_pw_multi_aff_from_domain(set);
*upma = isl_union_pw_multi_aff_add_pw_multi_aff(*upma, pma);
return isl_stat_ok;
}
/* Return an isl_union_pw_multi_aff with the given "uset" as domain and
* an unnamed zero-dimensional range.
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_from_domain(
__isl_take isl_union_set *uset)
{
isl_space *space;
isl_union_pw_multi_aff *upma;
if (!uset)
return NULL;
space = isl_union_set_get_space(uset);
upma = isl_union_pw_multi_aff_empty(space);
if (isl_union_set_foreach_set(uset,
&add_pw_multi_aff_from_domain, &upma) < 0)
goto error;
isl_union_set_free(uset);
return upma;
error:
isl_union_set_free(uset);
isl_union_pw_multi_aff_free(upma);
return NULL;
}
/* Convert "pma" to an isl_map and add it to *umap.
*/
static isl_stat map_from_pw_multi_aff(__isl_take isl_pw_multi_aff *pma,
void *user)
{
isl_union_map **umap = user;
isl_map *map;
map = isl_map_from_pw_multi_aff(pma);
*umap = isl_union_map_add_map(*umap, map);
return isl_stat_ok;
}
/* Construct a union map mapping the domain of the union
* piecewise multi-affine expression to its range, with each dimension
* in the range equated to the corresponding affine expression on its cell.
*/
__isl_give isl_union_map *isl_union_map_from_union_pw_multi_aff(
__isl_take isl_union_pw_multi_aff *upma)
{
isl_space *space;
isl_union_map *umap;
if (!upma)
return NULL;
space = isl_union_pw_multi_aff_get_space(upma);
umap = isl_union_map_empty(space);
if (isl_union_pw_multi_aff_foreach_pw_multi_aff(upma,
&map_from_pw_multi_aff, &umap) < 0)
goto error;
isl_union_pw_multi_aff_free(upma);
return umap;
error:
isl_union_pw_multi_aff_free(upma);
isl_union_map_free(umap);
return NULL;
}
/* Local data for bin_entry and the callback "fn".
*/
struct isl_union_pw_multi_aff_bin_data {
isl_union_pw_multi_aff *upma2;
isl_union_pw_multi_aff *res;
isl_pw_multi_aff *pma;
isl_stat (*fn)(__isl_take isl_pw_multi_aff *pma, void *user);
};
/* Given an isl_pw_multi_aff from upma1, store it in data->pma
* and call data->fn for each isl_pw_multi_aff in data->upma2.
*/
static isl_stat bin_entry(__isl_take isl_pw_multi_aff *pma, void *user)
{
struct isl_union_pw_multi_aff_bin_data *data = user;
isl_stat r;
data->pma = pma;
r = isl_union_pw_multi_aff_foreach_pw_multi_aff(data->upma2,
data->fn, data);
isl_pw_multi_aff_free(pma);
return r;
}
/* Call "fn" on each pair of isl_pw_multi_affs in "upma1" and "upma2".
* The isl_pw_multi_aff from upma1 is stored in data->pma (where data is
* passed as user field) and the isl_pw_multi_aff from upma2 is available
* as *entry. The callback should adjust data->res if desired.
*/
static __isl_give isl_union_pw_multi_aff *bin_op(
__isl_take isl_union_pw_multi_aff *upma1,
__isl_take isl_union_pw_multi_aff *upma2,
isl_stat (*fn)(__isl_take isl_pw_multi_aff *pma, void *user))
{
isl_space *space;
struct isl_union_pw_multi_aff_bin_data data = { NULL, NULL, NULL, fn };
space = isl_union_pw_multi_aff_get_space(upma2);
upma1 = isl_union_pw_multi_aff_align_params(upma1, space);
space = isl_union_pw_multi_aff_get_space(upma1);
upma2 = isl_union_pw_multi_aff_align_params(upma2, space);
if (!upma1 || !upma2)
goto error;
data.upma2 = upma2;
data.res = isl_union_pw_multi_aff_alloc_same_size(upma1);
if (isl_union_pw_multi_aff_foreach_pw_multi_aff(upma1,
&bin_entry, &data) < 0)
goto error;
isl_union_pw_multi_aff_free(upma1);
isl_union_pw_multi_aff_free(upma2);
return data.res;
error:
isl_union_pw_multi_aff_free(upma1);
isl_union_pw_multi_aff_free(upma2);
isl_union_pw_multi_aff_free(data.res);
return NULL;
}
/* Given two aligned isl_pw_multi_affs A -> B and C -> D,
* construct an isl_pw_multi_aff (A * C) -> [B -> D].
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_range_product(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
isl_space *space;
space = isl_space_range_product(isl_pw_multi_aff_get_space(pma1),
isl_pw_multi_aff_get_space(pma2));
return isl_pw_multi_aff_on_shared_domain_in(pma1, pma2, space,
&isl_multi_aff_range_product);
}
/* Given two isl_pw_multi_affs A -> B and C -> D,
* construct an isl_pw_multi_aff (A * C) -> [B -> D].
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_range_product(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_range_product);
}
/* Given two aligned isl_pw_multi_affs A -> B and C -> D,
* construct an isl_pw_multi_aff (A * C) -> (B, D).
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_flat_range_product(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
isl_space *space;
space = isl_space_range_product(isl_pw_multi_aff_get_space(pma1),
isl_pw_multi_aff_get_space(pma2));
space = isl_space_flatten_range(space);
return isl_pw_multi_aff_on_shared_domain_in(pma1, pma2, space,
&isl_multi_aff_flat_range_product);
}
/* Given two isl_pw_multi_affs A -> B and C -> D,
* construct an isl_pw_multi_aff (A * C) -> (B, D).
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_flat_range_product(
__isl_take isl_pw_multi_aff *pma1, __isl_take isl_pw_multi_aff *pma2)
{
return isl_pw_multi_aff_align_params_pw_pw_and(pma1, pma2,
&pw_multi_aff_flat_range_product);
}
/* If data->pma and "pma2" have the same domain space, then compute
* their flat range product and the result to data->res.
*/
static isl_stat flat_range_product_entry(__isl_take isl_pw_multi_aff *pma2,
void *user)
{
struct isl_union_pw_multi_aff_bin_data *data = user;
if (!isl_space_tuple_is_equal(data->pma->dim, isl_dim_in,
pma2->dim, isl_dim_in)) {
isl_pw_multi_aff_free(pma2);
return isl_stat_ok;
}
pma2 = isl_pw_multi_aff_flat_range_product(
isl_pw_multi_aff_copy(data->pma), pma2);
data->res = isl_union_pw_multi_aff_add_pw_multi_aff(data->res, pma2);
return isl_stat_ok;
}
/* Given two isl_union_pw_multi_affs A -> B and C -> D,
* construct an isl_union_pw_multi_aff (A * C) -> (B, D).
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_flat_range_product(
__isl_take isl_union_pw_multi_aff *upma1,
__isl_take isl_union_pw_multi_aff *upma2)
{
return bin_op(upma1, upma2, &flat_range_product_entry);
}
/* Replace the affine expressions at position "pos" in "pma" by "pa".
* The parameters are assumed to have been aligned.
*
* The implementation essentially performs an isl_pw_*_on_shared_domain,
* except that it works on two different isl_pw_* types.
*/
static __isl_give isl_pw_multi_aff *pw_multi_aff_set_pw_aff(
__isl_take isl_pw_multi_aff *pma, unsigned pos,
__isl_take isl_pw_aff *pa)
{
int i, j, n;
isl_pw_multi_aff *res = NULL;
if (!pma || !pa)
goto error;
if (!isl_space_tuple_is_equal(pma->dim, isl_dim_in,
pa->dim, isl_dim_in))
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"domains don't match", goto error);
if (pos >= isl_pw_multi_aff_dim(pma, isl_dim_out))
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"index out of bounds", goto error);
n = pma->n * pa->n;
res = isl_pw_multi_aff_alloc_size(isl_pw_multi_aff_get_space(pma), n);
for (i = 0; i < pma->n; ++i) {
for (j = 0; j < pa->n; ++j) {
isl_set *common;
isl_multi_aff *res_ij;
int empty;
common = isl_set_intersect(isl_set_copy(pma->p[i].set),
isl_set_copy(pa->p[j].set));
empty = isl_set_plain_is_empty(common);
if (empty < 0 || empty) {
isl_set_free(common);
if (empty < 0)
goto error;
continue;
}
res_ij = isl_multi_aff_set_aff(
isl_multi_aff_copy(pma->p[i].maff), pos,
isl_aff_copy(pa->p[j].aff));
res_ij = isl_multi_aff_gist(res_ij,
isl_set_copy(common));
res = isl_pw_multi_aff_add_piece(res, common, res_ij);
}
}
isl_pw_multi_aff_free(pma);
isl_pw_aff_free(pa);
return res;
error:
isl_pw_multi_aff_free(pma);
isl_pw_aff_free(pa);
return isl_pw_multi_aff_free(res);
}
/* Replace the affine expressions at position "pos" in "pma" by "pa".
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_set_pw_aff(
__isl_take isl_pw_multi_aff *pma, unsigned pos,
__isl_take isl_pw_aff *pa)
{
if (!pma || !pa)
goto error;
if (isl_space_match(pma->dim, isl_dim_param, pa->dim, isl_dim_param))
return pw_multi_aff_set_pw_aff(pma, pos, pa);
if (!isl_space_has_named_params(pma->dim) ||
!isl_space_has_named_params(pa->dim))
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"unaligned unnamed parameters", goto error);
pma = isl_pw_multi_aff_align_params(pma, isl_pw_aff_get_space(pa));
pa = isl_pw_aff_align_params(pa, isl_pw_multi_aff_get_space(pma));
return pw_multi_aff_set_pw_aff(pma, pos, pa);
error:
isl_pw_multi_aff_free(pma);
isl_pw_aff_free(pa);
return NULL;
}
/* Do the parameters of "pa" match those of "space"?
*/
int isl_pw_aff_matching_params(__isl_keep isl_pw_aff *pa,
__isl_keep isl_space *space)
{
isl_space *pa_space;
int match;
if (!pa || !space)
return -1;
pa_space = isl_pw_aff_get_space(pa);
match = isl_space_match(space, isl_dim_param, pa_space, isl_dim_param);
isl_space_free(pa_space);
return match;
}
/* Check that the domain space of "pa" matches "space".
*
* Return 0 on success and -1 on error.
*/
int isl_pw_aff_check_match_domain_space(__isl_keep isl_pw_aff *pa,
__isl_keep isl_space *space)
{
isl_space *pa_space;
int match;
if (!pa || !space)
return -1;
pa_space = isl_pw_aff_get_space(pa);
match = isl_space_match(space, isl_dim_param, pa_space, isl_dim_param);
if (match < 0)
goto error;
if (!match)
isl_die(isl_pw_aff_get_ctx(pa), isl_error_invalid,
"parameters don't match", goto error);
match = isl_space_tuple_is_equal(space, isl_dim_in,
pa_space, isl_dim_in);
if (match < 0)
goto error;
if (!match)
isl_die(isl_pw_aff_get_ctx(pa), isl_error_invalid,
"domains don't match", goto error);
isl_space_free(pa_space);
return 0;
error:
isl_space_free(pa_space);
return -1;
}
#undef BASE
#define BASE pw_aff
#undef DOMBASE
#define DOMBASE set
#include <isl_multi_templ.c>
#include <isl_multi_apply_set.c>
#include <isl_multi_coalesce.c>
#include <isl_multi_gist.c>
#include <isl_multi_hash.c>
#include <isl_multi_intersect.c>
/* Scale the elements of "pma" by the corresponding elements of "mv".
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_scale_multi_val(
__isl_take isl_pw_multi_aff *pma, __isl_take isl_multi_val *mv)
{
int i;
pma = isl_pw_multi_aff_cow(pma);
if (!pma || !mv)
goto error;
if (!isl_space_tuple_is_equal(pma->dim, isl_dim_out,
mv->space, isl_dim_set))
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"spaces don't match", goto error);
if (!isl_space_match(pma->dim, isl_dim_param,
mv->space, isl_dim_param)) {
pma = isl_pw_multi_aff_align_params(pma,
isl_multi_val_get_space(mv));
mv = isl_multi_val_align_params(mv,
isl_pw_multi_aff_get_space(pma));
if (!pma || !mv)
goto error;
}
for (i = 0; i < pma->n; ++i) {
pma->p[i].maff = isl_multi_aff_scale_multi_val(pma->p[i].maff,
isl_multi_val_copy(mv));
if (!pma->p[i].maff)
goto error;
}
isl_multi_val_free(mv);
return pma;
error:
isl_multi_val_free(mv);
isl_pw_multi_aff_free(pma);
return NULL;
}
/* This function is called for each entry of an isl_union_pw_multi_aff.
* If the space of the entry matches that of data->mv,
* then apply isl_pw_multi_aff_scale_multi_val and return the result.
* Otherwise, return an empty isl_pw_multi_aff.
*/
static __isl_give isl_pw_multi_aff *union_pw_multi_aff_scale_multi_val_entry(
__isl_take isl_pw_multi_aff *pma, void *user)
{
isl_multi_val *mv = user;
if (!pma)
return NULL;
if (!isl_space_tuple_is_equal(pma->dim, isl_dim_out,
mv->space, isl_dim_set)) {
isl_space *space = isl_pw_multi_aff_get_space(pma);
isl_pw_multi_aff_free(pma);
return isl_pw_multi_aff_empty(space);
}
return isl_pw_multi_aff_scale_multi_val(pma, isl_multi_val_copy(mv));
}
/* Scale the elements of "upma" by the corresponding elements of "mv",
* for those entries that match the space of "mv".
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_scale_multi_val(
__isl_take isl_union_pw_multi_aff *upma, __isl_take isl_multi_val *mv)
{
upma = isl_union_pw_multi_aff_align_params(upma,
isl_multi_val_get_space(mv));
mv = isl_multi_val_align_params(mv,
isl_union_pw_multi_aff_get_space(upma));
if (!upma || !mv)
goto error;
return isl_union_pw_multi_aff_transform(upma,
&union_pw_multi_aff_scale_multi_val_entry, mv);
isl_multi_val_free(mv);
return upma;
error:
isl_multi_val_free(mv);
isl_union_pw_multi_aff_free(upma);
return NULL;
}
/* Construct and return a piecewise multi affine expression
* in the given space with value zero in each of the output dimensions and
* a universe domain.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_zero(__isl_take isl_space *space)
{
return isl_pw_multi_aff_from_multi_aff(isl_multi_aff_zero(space));
}
/* Construct and return a piecewise multi affine expression
* that is equal to the given piecewise affine expression.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_from_pw_aff(
__isl_take isl_pw_aff *pa)
{
int i;
isl_space *space;
isl_pw_multi_aff *pma;
if (!pa)
return NULL;
space = isl_pw_aff_get_space(pa);
pma = isl_pw_multi_aff_alloc_size(space, pa->n);
for (i = 0; i < pa->n; ++i) {
isl_set *set;
isl_multi_aff *ma;
set = isl_set_copy(pa->p[i].set);
ma = isl_multi_aff_from_aff(isl_aff_copy(pa->p[i].aff));
pma = isl_pw_multi_aff_add_piece(pma, set, ma);
}
isl_pw_aff_free(pa);
return pma;
}
/* Construct a set or map mapping the shared (parameter) domain
* of the piecewise affine expressions to the range of "mpa"
* with each dimension in the range equated to the
* corresponding piecewise affine expression.
*/
static __isl_give isl_map *map_from_multi_pw_aff(
__isl_take isl_multi_pw_aff *mpa)
{
int i;
isl_space *space;
isl_map *map;
if (!mpa)
return NULL;
if (isl_space_dim(mpa->space, isl_dim_out) != mpa->n)
isl_die(isl_multi_pw_aff_get_ctx(mpa), isl_error_internal,
"invalid space", goto error);
space = isl_multi_pw_aff_get_domain_space(mpa);
map = isl_map_universe(isl_space_from_domain(space));
for (i = 0; i < mpa->n; ++i) {
isl_pw_aff *pa;
isl_map *map_i;
pa = isl_pw_aff_copy(mpa->p[i]);
map_i = map_from_pw_aff(pa);
map = isl_map_flat_range_product(map, map_i);
}
map = isl_map_reset_space(map, isl_multi_pw_aff_get_space(mpa));
isl_multi_pw_aff_free(mpa);
return map;
error:
isl_multi_pw_aff_free(mpa);
return NULL;
}
/* Construct a map mapping the shared domain
* of the piecewise affine expressions to the range of "mpa"
* with each dimension in the range equated to the
* corresponding piecewise affine expression.
*/
__isl_give isl_map *isl_map_from_multi_pw_aff(__isl_take isl_multi_pw_aff *mpa)
{
if (!mpa)
return NULL;
if (isl_space_is_set(mpa->space))
isl_die(isl_multi_pw_aff_get_ctx(mpa), isl_error_internal,
"space of input is not a map", goto error);
return map_from_multi_pw_aff(mpa);
error:
isl_multi_pw_aff_free(mpa);
return NULL;
}
/* Construct a set mapping the shared parameter domain
* of the piecewise affine expressions to the space of "mpa"
* with each dimension in the range equated to the
* corresponding piecewise affine expression.
*/
__isl_give isl_set *isl_set_from_multi_pw_aff(__isl_take isl_multi_pw_aff *mpa)
{
if (!mpa)
return NULL;
if (!isl_space_is_set(mpa->space))
isl_die(isl_multi_pw_aff_get_ctx(mpa), isl_error_internal,
"space of input is not a set", goto error);
return map_from_multi_pw_aff(mpa);
error:
isl_multi_pw_aff_free(mpa);
return NULL;
}
/* Construct and return a piecewise multi affine expression
* that is equal to the given multi piecewise affine expression
* on the shared domain of the piecewise affine expressions.
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_from_multi_pw_aff(
__isl_take isl_multi_pw_aff *mpa)
{
int i;
isl_space *space;
isl_pw_aff *pa;
isl_pw_multi_aff *pma;
if (!mpa)
return NULL;
space = isl_multi_pw_aff_get_space(mpa);
if (mpa->n == 0) {
isl_multi_pw_aff_free(mpa);
return isl_pw_multi_aff_zero(space);
}
pa = isl_multi_pw_aff_get_pw_aff(mpa, 0);
pma = isl_pw_multi_aff_from_pw_aff(pa);
for (i = 1; i < mpa->n; ++i) {
isl_pw_multi_aff *pma_i;
pa = isl_multi_pw_aff_get_pw_aff(mpa, i);
pma_i = isl_pw_multi_aff_from_pw_aff(pa);
pma = isl_pw_multi_aff_range_product(pma, pma_i);
}
pma = isl_pw_multi_aff_reset_space(pma, space);
isl_multi_pw_aff_free(mpa);
return pma;
}
/* Construct and return a multi piecewise affine expression
* that is equal to the given multi affine expression.
*/
__isl_give isl_multi_pw_aff *isl_multi_pw_aff_from_multi_aff(
__isl_take isl_multi_aff *ma)
{
int i, n;
isl_multi_pw_aff *mpa;
if (!ma)
return NULL;
n = isl_multi_aff_dim(ma, isl_dim_out);
mpa = isl_multi_pw_aff_alloc(isl_multi_aff_get_space(ma));
for (i = 0; i < n; ++i) {
isl_pw_aff *pa;
pa = isl_pw_aff_from_aff(isl_multi_aff_get_aff(ma, i));
mpa = isl_multi_pw_aff_set_pw_aff(mpa, i, pa);
}
isl_multi_aff_free(ma);
return mpa;
}
/* Construct and return a multi piecewise affine expression
* that is equal to the given piecewise multi affine expression.
*/
__isl_give isl_multi_pw_aff *isl_multi_pw_aff_from_pw_multi_aff(
__isl_take isl_pw_multi_aff *pma)
{
int i, n;
isl_space *space;
isl_multi_pw_aff *mpa;
if (!pma)
return NULL;
n = isl_pw_multi_aff_dim(pma, isl_dim_out);
space = isl_pw_multi_aff_get_space(pma);
mpa = isl_multi_pw_aff_alloc(space);
for (i = 0; i < n; ++i) {
isl_pw_aff *pa;
pa = isl_pw_multi_aff_get_pw_aff(pma, i);
mpa = isl_multi_pw_aff_set_pw_aff(mpa, i, pa);
}
isl_pw_multi_aff_free(pma);
return mpa;
}
/* Do "pa1" and "pa2" represent the same function?
*
* We first check if they are obviously equal.
* If not, we convert them to maps and check if those are equal.
*
* If "pa1" or "pa2" contain any NaNs, then they are considered
* not to be the same. A NaN is not equal to anything, not even
* to another NaN.
*/
int isl_pw_aff_is_equal(__isl_keep isl_pw_aff *pa1, __isl_keep isl_pw_aff *pa2)
{
int equal;
isl_bool has_nan;
isl_map *map1, *map2;
if (!pa1 || !pa2)
return -1;
equal = isl_pw_aff_plain_is_equal(pa1, pa2);
if (equal < 0 || equal)
return equal;
has_nan = isl_pw_aff_involves_nan(pa1);
if (has_nan >= 0 && !has_nan)
has_nan = isl_pw_aff_involves_nan(pa2);
if (has_nan < 0)
return -1;
if (has_nan)
return 0;
map1 = map_from_pw_aff(isl_pw_aff_copy(pa1));
map2 = map_from_pw_aff(isl_pw_aff_copy(pa2));
equal = isl_map_is_equal(map1, map2);
isl_map_free(map1);
isl_map_free(map2);
return equal;
}
/* Do "mpa1" and "mpa2" represent the same function?
*
* Note that we cannot convert the entire isl_multi_pw_aff
* to a map because the domains of the piecewise affine expressions
* may not be the same.
*/
isl_bool isl_multi_pw_aff_is_equal(__isl_keep isl_multi_pw_aff *mpa1,
__isl_keep isl_multi_pw_aff *mpa2)
{
int i;
isl_bool equal;
if (!mpa1 || !mpa2)
return isl_bool_error;
if (!isl_space_match(mpa1->space, isl_dim_param,
mpa2->space, isl_dim_param)) {
if (!isl_space_has_named_params(mpa1->space))
return isl_bool_false;
if (!isl_space_has_named_params(mpa2->space))
return isl_bool_false;
mpa1 = isl_multi_pw_aff_copy(mpa1);
mpa2 = isl_multi_pw_aff_copy(mpa2);
mpa1 = isl_multi_pw_aff_align_params(mpa1,
isl_multi_pw_aff_get_space(mpa2));
mpa2 = isl_multi_pw_aff_align_params(mpa2,
isl_multi_pw_aff_get_space(mpa1));
equal = isl_multi_pw_aff_is_equal(mpa1, mpa2);
isl_multi_pw_aff_free(mpa1);
isl_multi_pw_aff_free(mpa2);
return equal;
}
equal = isl_space_is_equal(mpa1->space, mpa2->space);
if (equal < 0 || !equal)
return equal;
for (i = 0; i < mpa1->n; ++i) {
equal = isl_pw_aff_is_equal(mpa1->p[i], mpa2->p[i]);
if (equal < 0 || !equal)
return equal;
}
return isl_bool_true;
}
/* Compute the pullback of "mpa" by the function represented by "ma".
* In other words, plug in "ma" in "mpa".
*
* The parameters of "mpa" and "ma" are assumed to have been aligned.
*/
static __isl_give isl_multi_pw_aff *isl_multi_pw_aff_pullback_multi_aff_aligned(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_multi_aff *ma)
{
int i;
isl_space *space = NULL;
mpa = isl_multi_pw_aff_cow(mpa);
if (!mpa || !ma)
goto error;
space = isl_space_join(isl_multi_aff_get_space(ma),
isl_multi_pw_aff_get_space(mpa));
if (!space)
goto error;
for (i = 0; i < mpa->n; ++i) {
mpa->p[i] = isl_pw_aff_pullback_multi_aff(mpa->p[i],
isl_multi_aff_copy(ma));
if (!mpa->p[i])
goto error;
}
isl_multi_aff_free(ma);
isl_space_free(mpa->space);
mpa->space = space;
return mpa;
error:
isl_space_free(space);
isl_multi_pw_aff_free(mpa);
isl_multi_aff_free(ma);
return NULL;
}
/* Compute the pullback of "mpa" by the function represented by "ma".
* In other words, plug in "ma" in "mpa".
*/
__isl_give isl_multi_pw_aff *isl_multi_pw_aff_pullback_multi_aff(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_multi_aff *ma)
{
if (!mpa || !ma)
goto error;
if (isl_space_match(mpa->space, isl_dim_param,
ma->space, isl_dim_param))
return isl_multi_pw_aff_pullback_multi_aff_aligned(mpa, ma);
mpa = isl_multi_pw_aff_align_params(mpa, isl_multi_aff_get_space(ma));
ma = isl_multi_aff_align_params(ma, isl_multi_pw_aff_get_space(mpa));
return isl_multi_pw_aff_pullback_multi_aff_aligned(mpa, ma);
error:
isl_multi_pw_aff_free(mpa);
isl_multi_aff_free(ma);
return NULL;
}
/* Compute the pullback of "mpa" by the function represented by "pma".
* In other words, plug in "pma" in "mpa".
*
* The parameters of "mpa" and "mpa" are assumed to have been aligned.
*/
static __isl_give isl_multi_pw_aff *
isl_multi_pw_aff_pullback_pw_multi_aff_aligned(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_pw_multi_aff *pma)
{
int i;
isl_space *space = NULL;
mpa = isl_multi_pw_aff_cow(mpa);
if (!mpa || !pma)
goto error;
space = isl_space_join(isl_pw_multi_aff_get_space(pma),
isl_multi_pw_aff_get_space(mpa));
for (i = 0; i < mpa->n; ++i) {
mpa->p[i] = isl_pw_aff_pullback_pw_multi_aff_aligned(mpa->p[i],
isl_pw_multi_aff_copy(pma));
if (!mpa->p[i])
goto error;
}
isl_pw_multi_aff_free(pma);
isl_space_free(mpa->space);
mpa->space = space;
return mpa;
error:
isl_space_free(space);
isl_multi_pw_aff_free(mpa);
isl_pw_multi_aff_free(pma);
return NULL;
}
/* Compute the pullback of "mpa" by the function represented by "pma".
* In other words, plug in "pma" in "mpa".
*/
__isl_give isl_multi_pw_aff *isl_multi_pw_aff_pullback_pw_multi_aff(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_pw_multi_aff *pma)
{
if (!mpa || !pma)
goto error;
if (isl_space_match(mpa->space, isl_dim_param, pma->dim, isl_dim_param))
return isl_multi_pw_aff_pullback_pw_multi_aff_aligned(mpa, pma);
mpa = isl_multi_pw_aff_align_params(mpa,
isl_pw_multi_aff_get_space(pma));
pma = isl_pw_multi_aff_align_params(pma,
isl_multi_pw_aff_get_space(mpa));
return isl_multi_pw_aff_pullback_pw_multi_aff_aligned(mpa, pma);
error:
isl_multi_pw_aff_free(mpa);
isl_pw_multi_aff_free(pma);
return NULL;
}
/* Apply "aff" to "mpa". The range of "mpa" needs to be compatible
* with the domain of "aff". The domain of the result is the same
* as that of "mpa".
* "mpa" and "aff" are assumed to have been aligned.
*
* We first extract the parametric constant from "aff", defined
* over the correct domain.
* Then we add the appropriate combinations of the members of "mpa".
* Finally, we add the integer divisions through recursive calls.
*/
static __isl_give isl_pw_aff *isl_multi_pw_aff_apply_aff_aligned(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_aff *aff)
{
int i, n_in, n_div;
isl_space *space;
isl_val *v;
isl_pw_aff *pa;
isl_aff *tmp;
n_in = isl_aff_dim(aff, isl_dim_in);
n_div = isl_aff_dim(aff, isl_dim_div);
space = isl_space_domain(isl_multi_pw_aff_get_space(mpa));
tmp = isl_aff_copy(aff);
tmp = isl_aff_drop_dims(tmp, isl_dim_div, 0, n_div);
tmp = isl_aff_drop_dims(tmp, isl_dim_in, 0, n_in);
tmp = isl_aff_add_dims(tmp, isl_dim_in,
isl_space_dim(space, isl_dim_set));
tmp = isl_aff_reset_domain_space(tmp, space);
pa = isl_pw_aff_from_aff(tmp);
for (i = 0; i < n_in; ++i) {
isl_pw_aff *pa_i;
if (!isl_aff_involves_dims(aff, isl_dim_in, i, 1))
continue;
v = isl_aff_get_coefficient_val(aff, isl_dim_in, i);
pa_i = isl_multi_pw_aff_get_pw_aff(mpa, i);
pa_i = isl_pw_aff_scale_val(pa_i, v);
pa = isl_pw_aff_add(pa, pa_i);
}
for (i = 0; i < n_div; ++i) {
isl_aff *div;
isl_pw_aff *pa_i;
if (!isl_aff_involves_dims(aff, isl_dim_div, i, 1))
continue;
div = isl_aff_get_div(aff, i);
pa_i = isl_multi_pw_aff_apply_aff_aligned(
isl_multi_pw_aff_copy(mpa), div);
pa_i = isl_pw_aff_floor(pa_i);
v = isl_aff_get_coefficient_val(aff, isl_dim_div, i);
pa_i = isl_pw_aff_scale_val(pa_i, v);
pa = isl_pw_aff_add(pa, pa_i);
}
isl_multi_pw_aff_free(mpa);
isl_aff_free(aff);
return pa;
}
/* Apply "aff" to "mpa". The range of "mpa" needs to be compatible
* with the domain of "aff". The domain of the result is the same
* as that of "mpa".
*/
__isl_give isl_pw_aff *isl_multi_pw_aff_apply_aff(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_aff *aff)
{
if (!aff || !mpa)
goto error;
if (isl_space_match(aff->ls->dim, isl_dim_param,
mpa->space, isl_dim_param))
return isl_multi_pw_aff_apply_aff_aligned(mpa, aff);
aff = isl_aff_align_params(aff, isl_multi_pw_aff_get_space(mpa));
mpa = isl_multi_pw_aff_align_params(mpa, isl_aff_get_space(aff));
return isl_multi_pw_aff_apply_aff_aligned(mpa, aff);
error:
isl_aff_free(aff);
isl_multi_pw_aff_free(mpa);
return NULL;
}
/* Apply "pa" to "mpa". The range of "mpa" needs to be compatible
* with the domain of "pa". The domain of the result is the same
* as that of "mpa".
* "mpa" and "pa" are assumed to have been aligned.
*
* We consider each piece in turn. Note that the domains of the
* pieces are assumed to be disjoint and they remain disjoint
* after taking the preimage (over the same function).
*/
static __isl_give isl_pw_aff *isl_multi_pw_aff_apply_pw_aff_aligned(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_pw_aff *pa)
{
isl_space *space;
isl_pw_aff *res;
int i;
if (!mpa || !pa)
goto error;
space = isl_space_join(isl_multi_pw_aff_get_space(mpa),
isl_pw_aff_get_space(pa));
res = isl_pw_aff_empty(space);
for (i = 0; i < pa->n; ++i) {
isl_pw_aff *pa_i;
isl_set *domain;
pa_i = isl_multi_pw_aff_apply_aff_aligned(
isl_multi_pw_aff_copy(mpa),
isl_aff_copy(pa->p[i].aff));
domain = isl_set_copy(pa->p[i].set);
domain = isl_set_preimage_multi_pw_aff(domain,
isl_multi_pw_aff_copy(mpa));
pa_i = isl_pw_aff_intersect_domain(pa_i, domain);
res = isl_pw_aff_add_disjoint(res, pa_i);
}
isl_pw_aff_free(pa);
isl_multi_pw_aff_free(mpa);
return res;
error:
isl_pw_aff_free(pa);
isl_multi_pw_aff_free(mpa);
return NULL;
}
/* Apply "pa" to "mpa". The range of "mpa" needs to be compatible
* with the domain of "pa". The domain of the result is the same
* as that of "mpa".
*/
__isl_give isl_pw_aff *isl_multi_pw_aff_apply_pw_aff(
__isl_take isl_multi_pw_aff *mpa, __isl_take isl_pw_aff *pa)
{
if (!pa || !mpa)
goto error;
if (isl_space_match(pa->dim, isl_dim_param, mpa->space, isl_dim_param))
return isl_multi_pw_aff_apply_pw_aff_aligned(mpa, pa);
pa = isl_pw_aff_align_params(pa, isl_multi_pw_aff_get_space(mpa));
mpa = isl_multi_pw_aff_align_params(mpa, isl_pw_aff_get_space(pa));
return isl_multi_pw_aff_apply_pw_aff_aligned(mpa, pa);
error:
isl_pw_aff_free(pa);
isl_multi_pw_aff_free(mpa);
return NULL;
}
/* Compute the pullback of "pa" by the function represented by "mpa".
* In other words, plug in "mpa" in "pa".
* "pa" and "mpa" are assumed to have been aligned.
*
* The pullback is computed by applying "pa" to "mpa".
*/
static __isl_give isl_pw_aff *isl_pw_aff_pullback_multi_pw_aff_aligned(
__isl_take isl_pw_aff *pa, __isl_take isl_multi_pw_aff *mpa)
{
return isl_multi_pw_aff_apply_pw_aff_aligned(mpa, pa);
}
/* Compute the pullback of "pa" by the function represented by "mpa".
* In other words, plug in "mpa" in "pa".
*
* The pullback is computed by applying "pa" to "mpa".
*/
__isl_give isl_pw_aff *isl_pw_aff_pullback_multi_pw_aff(
__isl_take isl_pw_aff *pa, __isl_take isl_multi_pw_aff *mpa)
{
return isl_multi_pw_aff_apply_pw_aff(mpa, pa);
}
/* Compute the pullback of "mpa1" by the function represented by "mpa2".
* In other words, plug in "mpa2" in "mpa1".
*
* The parameters of "mpa1" and "mpa2" are assumed to have been aligned.
*
* We pullback each member of "mpa1" in turn.
*/
static __isl_give isl_multi_pw_aff *
isl_multi_pw_aff_pullback_multi_pw_aff_aligned(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2)
{
int i;
isl_space *space = NULL;
mpa1 = isl_multi_pw_aff_cow(mpa1);
if (!mpa1 || !mpa2)
goto error;
space = isl_space_join(isl_multi_pw_aff_get_space(mpa2),
isl_multi_pw_aff_get_space(mpa1));
for (i = 0; i < mpa1->n; ++i) {
mpa1->p[i] = isl_pw_aff_pullback_multi_pw_aff_aligned(
mpa1->p[i], isl_multi_pw_aff_copy(mpa2));
if (!mpa1->p[i])
goto error;
}
mpa1 = isl_multi_pw_aff_reset_space(mpa1, space);
isl_multi_pw_aff_free(mpa2);
return mpa1;
error:
isl_space_free(space);
isl_multi_pw_aff_free(mpa1);
isl_multi_pw_aff_free(mpa2);
return NULL;
}
/* Compute the pullback of "mpa1" by the function represented by "mpa2".
* In other words, plug in "mpa2" in "mpa1".
*/
__isl_give isl_multi_pw_aff *isl_multi_pw_aff_pullback_multi_pw_aff(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2)
{
return isl_multi_pw_aff_align_params_multi_multi_and(mpa1, mpa2,
&isl_multi_pw_aff_pullback_multi_pw_aff_aligned);
}
/* Align the parameters of "mpa1" and "mpa2", check that the ranges
* of "mpa1" and "mpa2" live in the same space, construct map space
* between the domain spaces of "mpa1" and "mpa2" and call "order"
* with this map space as extract argument.
*/
static __isl_give isl_map *isl_multi_pw_aff_order_map(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2,
__isl_give isl_map *(*order)(__isl_keep isl_multi_pw_aff *mpa1,
__isl_keep isl_multi_pw_aff *mpa2, __isl_take isl_space *space))
{
int match;
isl_space *space1, *space2;
isl_map *res;
mpa1 = isl_multi_pw_aff_align_params(mpa1,
isl_multi_pw_aff_get_space(mpa2));
mpa2 = isl_multi_pw_aff_align_params(mpa2,
isl_multi_pw_aff_get_space(mpa1));
if (!mpa1 || !mpa2)
goto error;
match = isl_space_tuple_is_equal(mpa1->space, isl_dim_out,
mpa2->space, isl_dim_out);
if (match < 0)
goto error;
if (!match)
isl_die(isl_multi_pw_aff_get_ctx(mpa1), isl_error_invalid,
"range spaces don't match", goto error);
space1 = isl_space_domain(isl_multi_pw_aff_get_space(mpa1));
space2 = isl_space_domain(isl_multi_pw_aff_get_space(mpa2));
space1 = isl_space_map_from_domain_and_range(space1, space2);
res = order(mpa1, mpa2, space1);
isl_multi_pw_aff_free(mpa1);
isl_multi_pw_aff_free(mpa2);
return res;
error:
isl_multi_pw_aff_free(mpa1);
isl_multi_pw_aff_free(mpa2);
return NULL;
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function values are equal. "space" is the space of the result.
* The parameters of "mpa1" and "mpa2" are assumed to have been aligned.
*
* "mpa1" and "mpa2" are equal when each of the pairs of elements
* in the sequences are equal.
*/
static __isl_give isl_map *isl_multi_pw_aff_eq_map_on_space(
__isl_keep isl_multi_pw_aff *mpa1, __isl_keep isl_multi_pw_aff *mpa2,
__isl_take isl_space *space)
{
int i, n;
isl_map *res;
res = isl_map_universe(space);
n = isl_multi_pw_aff_dim(mpa1, isl_dim_out);
for (i = 0; i < n; ++i) {
isl_pw_aff *pa1, *pa2;
isl_map *map;
pa1 = isl_multi_pw_aff_get_pw_aff(mpa1, i);
pa2 = isl_multi_pw_aff_get_pw_aff(mpa2, i);
map = isl_pw_aff_eq_map(pa1, pa2);
res = isl_map_intersect(res, map);
}
return res;
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function values are equal.
*/
__isl_give isl_map *isl_multi_pw_aff_eq_map(__isl_take isl_multi_pw_aff *mpa1,
__isl_take isl_multi_pw_aff *mpa2)
{
return isl_multi_pw_aff_order_map(mpa1, mpa2,
&isl_multi_pw_aff_eq_map_on_space);
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function values of "mpa1" is lexicographically satisfies "base"
* compared to that of "mpa2". "space" is the space of the result.
* The parameters of "mpa1" and "mpa2" are assumed to have been aligned.
*
* "mpa1" lexicographically satisfies "base" compared to "mpa2"
* if its i-th element satisfies "base" when compared to
* the i-th element of "mpa2" while all previous elements are
* pairwise equal.
*/
static __isl_give isl_map *isl_multi_pw_aff_lex_map_on_space(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2,
__isl_give isl_map *(*base)(__isl_take isl_pw_aff *pa1,
__isl_take isl_pw_aff *pa2),
__isl_take isl_space *space)
{
int i, n;
isl_map *res, *rest;
res = isl_map_empty(isl_space_copy(space));
rest = isl_map_universe(space);
n = isl_multi_pw_aff_dim(mpa1, isl_dim_out);
for (i = 0; i < n; ++i) {
isl_pw_aff *pa1, *pa2;
isl_map *map;
pa1 = isl_multi_pw_aff_get_pw_aff(mpa1, i);
pa2 = isl_multi_pw_aff_get_pw_aff(mpa2, i);
map = base(pa1, pa2);
map = isl_map_intersect(map, isl_map_copy(rest));
res = isl_map_union(res, map);
if (i == n - 1)
continue;
pa1 = isl_multi_pw_aff_get_pw_aff(mpa1, i);
pa2 = isl_multi_pw_aff_get_pw_aff(mpa2, i);
map = isl_pw_aff_eq_map(pa1, pa2);
rest = isl_map_intersect(rest, map);
}
isl_map_free(rest);
return res;
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function value of "mpa1" is lexicographically less than that
* of "mpa2". "space" is the space of the result.
* The parameters of "mpa1" and "mpa2" are assumed to have been aligned.
*
* "mpa1" is less than "mpa2" if its i-th element is smaller
* than the i-th element of "mpa2" while all previous elements are
* pairwise equal.
*/
__isl_give isl_map *isl_multi_pw_aff_lex_lt_map_on_space(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2,
__isl_take isl_space *space)
{
return isl_multi_pw_aff_lex_map_on_space(mpa1, mpa2,
&isl_pw_aff_lt_map, space);
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function value of "mpa1" is lexicographically less than that
* of "mpa2".
*/
__isl_give isl_map *isl_multi_pw_aff_lex_lt_map(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2)
{
return isl_multi_pw_aff_order_map(mpa1, mpa2,
&isl_multi_pw_aff_lex_lt_map_on_space);
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function value of "mpa1" is lexicographically greater than that
* of "mpa2". "space" is the space of the result.
* The parameters of "mpa1" and "mpa2" are assumed to have been aligned.
*
* "mpa1" is greater than "mpa2" if its i-th element is greater
* than the i-th element of "mpa2" while all previous elements are
* pairwise equal.
*/
__isl_give isl_map *isl_multi_pw_aff_lex_gt_map_on_space(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2,
__isl_take isl_space *space)
{
return isl_multi_pw_aff_lex_map_on_space(mpa1, mpa2,
&isl_pw_aff_gt_map, space);
}
/* Return a map containing pairs of elements in the domains of "mpa1" and "mpa2"
* where the function value of "mpa1" is lexicographically greater than that
* of "mpa2".
*/
__isl_give isl_map *isl_multi_pw_aff_lex_gt_map(
__isl_take isl_multi_pw_aff *mpa1, __isl_take isl_multi_pw_aff *mpa2)
{
return isl_multi_pw_aff_order_map(mpa1, mpa2,
&isl_multi_pw_aff_lex_gt_map_on_space);
}
/* Compare two isl_affs.
*
* Return -1 if "aff1" is "smaller" than "aff2", 1 if "aff1" is "greater"
* than "aff2" and 0 if they are equal.
*
* The order is fairly arbitrary. We do consider expressions that only involve
* earlier dimensions as "smaller".
*/
int isl_aff_plain_cmp(__isl_keep isl_aff *aff1, __isl_keep isl_aff *aff2)
{
int cmp;
int last1, last2;
if (aff1 == aff2)
return 0;
if (!aff1)
return -1;
if (!aff2)
return 1;
cmp = isl_local_space_cmp(aff1->ls, aff2->ls);
if (cmp != 0)
return cmp;
last1 = isl_seq_last_non_zero(aff1->v->el + 1, aff1->v->size - 1);
last2 = isl_seq_last_non_zero(aff2->v->el + 1, aff1->v->size - 1);
if (last1 != last2)
return last1 - last2;
return isl_seq_cmp(aff1->v->el, aff2->v->el, aff1->v->size);
}
/* Compare two isl_pw_affs.
*
* Return -1 if "pa1" is "smaller" than "pa2", 1 if "pa1" is "greater"
* than "pa2" and 0 if they are equal.
*
* The order is fairly arbitrary. We do consider expressions that only involve
* earlier dimensions as "smaller".
*/
int isl_pw_aff_plain_cmp(__isl_keep isl_pw_aff *pa1,
__isl_keep isl_pw_aff *pa2)
{
int i;
int cmp;
if (pa1 == pa2)
return 0;
if (!pa1)
return -1;
if (!pa2)
return 1;
cmp = isl_space_cmp(pa1->dim, pa2->dim);
if (cmp != 0)
return cmp;
if (pa1->n != pa2->n)
return pa1->n - pa2->n;
for (i = 0; i < pa1->n; ++i) {
cmp = isl_set_plain_cmp(pa1->p[i].set, pa2->p[i].set);
if (cmp != 0)
return cmp;
cmp = isl_aff_plain_cmp(pa1->p[i].aff, pa2->p[i].aff);
if (cmp != 0)
return cmp;
}
return 0;
}
/* Return a piecewise affine expression that is equal to "v" on "domain".
*/
__isl_give isl_pw_aff *isl_pw_aff_val_on_domain(__isl_take isl_set *domain,
__isl_take isl_val *v)
{
isl_space *space;
isl_local_space *ls;
isl_aff *aff;
space = isl_set_get_space(domain);
ls = isl_local_space_from_space(space);
aff = isl_aff_val_on_domain(ls, v);
return isl_pw_aff_alloc(domain, aff);
}
/* Return a multi affine expression that is equal to "mv" on domain
* space "space".
*/
__isl_give isl_multi_aff *isl_multi_aff_multi_val_on_space(
__isl_take isl_space *space, __isl_take isl_multi_val *mv)
{
int i, n;
isl_space *space2;
isl_local_space *ls;
isl_multi_aff *ma;
if (!space || !mv)
goto error;
n = isl_multi_val_dim(mv, isl_dim_set);
space2 = isl_multi_val_get_space(mv);
space2 = isl_space_align_params(space2, isl_space_copy(space));
space = isl_space_align_params(space, isl_space_copy(space2));
space = isl_space_map_from_domain_and_range(space, space2);
ma = isl_multi_aff_alloc(isl_space_copy(space));
ls = isl_local_space_from_space(isl_space_domain(space));
for (i = 0; i < n; ++i) {
isl_val *v;
isl_aff *aff;
v = isl_multi_val_get_val(mv, i);
aff = isl_aff_val_on_domain(isl_local_space_copy(ls), v);
ma = isl_multi_aff_set_aff(ma, i, aff);
}
isl_local_space_free(ls);
isl_multi_val_free(mv);
return ma;
error:
isl_space_free(space);
isl_multi_val_free(mv);
return NULL;
}
/* Return a piecewise multi-affine expression
* that is equal to "mv" on "domain".
*/
__isl_give isl_pw_multi_aff *isl_pw_multi_aff_multi_val_on_domain(
__isl_take isl_set *domain, __isl_take isl_multi_val *mv)
{
isl_space *space;
isl_multi_aff *ma;
space = isl_set_get_space(domain);
ma = isl_multi_aff_multi_val_on_space(space, mv);
return isl_pw_multi_aff_alloc(domain, ma);
}
/* Internal data structure for isl_union_pw_multi_aff_multi_val_on_domain.
* mv is the value that should be attained on each domain set
* res collects the results
*/
struct isl_union_pw_multi_aff_multi_val_on_domain_data {
isl_multi_val *mv;
isl_union_pw_multi_aff *res;
};
/* Create an isl_pw_multi_aff equal to data->mv on "domain"
* and add it to data->res.
*/
static isl_stat pw_multi_aff_multi_val_on_domain(__isl_take isl_set *domain,
void *user)
{
struct isl_union_pw_multi_aff_multi_val_on_domain_data *data = user;
isl_pw_multi_aff *pma;
isl_multi_val *mv;
mv = isl_multi_val_copy(data->mv);
pma = isl_pw_multi_aff_multi_val_on_domain(domain, mv);
data->res = isl_union_pw_multi_aff_add_pw_multi_aff(data->res, pma);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Return a union piecewise multi-affine expression
* that is equal to "mv" on "domain".
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_multi_val_on_domain(
__isl_take isl_union_set *domain, __isl_take isl_multi_val *mv)
{
struct isl_union_pw_multi_aff_multi_val_on_domain_data data;
isl_space *space;
space = isl_union_set_get_space(domain);
data.res = isl_union_pw_multi_aff_empty(space);
data.mv = mv;
if (isl_union_set_foreach_set(domain,
&pw_multi_aff_multi_val_on_domain, &data) < 0)
data.res = isl_union_pw_multi_aff_free(data.res);
isl_union_set_free(domain);
isl_multi_val_free(mv);
return data.res;
}
/* Compute the pullback of data->pma by the function represented by "pma2",
* provided the spaces match, and add the results to data->res.
*/
static isl_stat pullback_entry(__isl_take isl_pw_multi_aff *pma2, void *user)
{
struct isl_union_pw_multi_aff_bin_data *data = user;
if (!isl_space_tuple_is_equal(data->pma->dim, isl_dim_in,
pma2->dim, isl_dim_out)) {
isl_pw_multi_aff_free(pma2);
return isl_stat_ok;
}
pma2 = isl_pw_multi_aff_pullback_pw_multi_aff(
isl_pw_multi_aff_copy(data->pma), pma2);
data->res = isl_union_pw_multi_aff_add_pw_multi_aff(data->res, pma2);
if (!data->res)
return isl_stat_error;
return isl_stat_ok;
}
/* Compute the pullback of "upma1" by the function represented by "upma2".
*/
__isl_give isl_union_pw_multi_aff *
isl_union_pw_multi_aff_pullback_union_pw_multi_aff(
__isl_take isl_union_pw_multi_aff *upma1,
__isl_take isl_union_pw_multi_aff *upma2)
{
return bin_op(upma1, upma2, &pullback_entry);
}
/* Check that the domain space of "upa" matches "space".
*
* Return 0 on success and -1 on error.
*
* This function is called from isl_multi_union_pw_aff_set_union_pw_aff and
* can in principle never fail since the space "space" is that
* of the isl_multi_union_pw_aff and is a set space such that
* there is no domain space to match.
*
* We check the parameters and double-check that "space" is
* indeed that of a set.
*/
static int isl_union_pw_aff_check_match_domain_space(
__isl_keep isl_union_pw_aff *upa, __isl_keep isl_space *space)
{
isl_space *upa_space;
int match;
if (!upa || !space)
return -1;
match = isl_space_is_set(space);
if (match < 0)
return -1;
if (!match)
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"expecting set space", return -1);
upa_space = isl_union_pw_aff_get_space(upa);
match = isl_space_match(space, isl_dim_param, upa_space, isl_dim_param);
if (match < 0)
goto error;
if (!match)
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"parameters don't match", goto error);
isl_space_free(upa_space);
return 0;
error:
isl_space_free(upa_space);
return -1;
}
/* Do the parameters of "upa" match those of "space"?
*/
static int isl_union_pw_aff_matching_params(__isl_keep isl_union_pw_aff *upa,
__isl_keep isl_space *space)
{
isl_space *upa_space;
int match;
if (!upa || !space)
return -1;
upa_space = isl_union_pw_aff_get_space(upa);
match = isl_space_match(space, isl_dim_param, upa_space, isl_dim_param);
isl_space_free(upa_space);
return match;
}
/* Internal data structure for isl_union_pw_aff_reset_domain_space.
* space represents the new parameters.
* res collects the results.
*/
struct isl_union_pw_aff_reset_params_data {
isl_space *space;
isl_union_pw_aff *res;
};
/* Replace the parameters of "pa" by data->space and
* add the result to data->res.
*/
static isl_stat reset_params(__isl_take isl_pw_aff *pa, void *user)
{
struct isl_union_pw_aff_reset_params_data *data = user;
isl_space *space;
space = isl_pw_aff_get_space(pa);
space = isl_space_replace(space, isl_dim_param, data->space);
pa = isl_pw_aff_reset_space(pa, space);
data->res = isl_union_pw_aff_add_pw_aff(data->res, pa);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Replace the domain space of "upa" by "space".
* Since a union expression does not have a (single) domain space,
* "space" is necessarily a parameter space.
*
* Since the order and the names of the parameters determine
* the hash value, we need to create a new hash table.
*/
static __isl_give isl_union_pw_aff *isl_union_pw_aff_reset_domain_space(
__isl_take isl_union_pw_aff *upa, __isl_take isl_space *space)
{
struct isl_union_pw_aff_reset_params_data data = { space };
int match;
match = isl_union_pw_aff_matching_params(upa, space);
if (match < 0)
upa = isl_union_pw_aff_free(upa);
else if (match) {
isl_space_free(space);
return upa;
}
data.res = isl_union_pw_aff_empty(isl_space_copy(space));
if (isl_union_pw_aff_foreach_pw_aff(upa, &reset_params, &data) < 0)
data.res = isl_union_pw_aff_free(data.res);
isl_union_pw_aff_free(upa);
isl_space_free(space);
return data.res;
}
/* Return the floor of "pa".
*/
static __isl_give isl_pw_aff *floor_entry(__isl_take isl_pw_aff *pa, void *user)
{
return isl_pw_aff_floor(pa);
}
/* Given f, return floor(f).
*/
__isl_give isl_union_pw_aff *isl_union_pw_aff_floor(
__isl_take isl_union_pw_aff *upa)
{
return isl_union_pw_aff_transform_inplace(upa, &floor_entry, NULL);
}
/* Compute
*
* upa mod m = upa - m * floor(upa/m)
*
* with m an integer value.
*/
__isl_give isl_union_pw_aff *isl_union_pw_aff_mod_val(
__isl_take isl_union_pw_aff *upa, __isl_take isl_val *m)
{
isl_union_pw_aff *res;
if (!upa || !m)
goto error;
if (!isl_val_is_int(m))
isl_die(isl_val_get_ctx(m), isl_error_invalid,
"expecting integer modulo", goto error);
if (!isl_val_is_pos(m))
isl_die(isl_val_get_ctx(m), isl_error_invalid,
"expecting positive modulo", goto error);
res = isl_union_pw_aff_copy(upa);
upa = isl_union_pw_aff_scale_down_val(upa, isl_val_copy(m));
upa = isl_union_pw_aff_floor(upa);
upa = isl_union_pw_aff_scale_val(upa, m);
res = isl_union_pw_aff_sub(res, upa);
return res;
error:
isl_val_free(m);
isl_union_pw_aff_free(upa);
return NULL;
}
/* Internal data structure for isl_union_pw_aff_aff_on_domain.
* "aff" is the symbolic value that the resulting isl_union_pw_aff
* needs to attain.
* "res" collects the results.
*/
struct isl_union_pw_aff_aff_on_domain_data {
isl_aff *aff;
isl_union_pw_aff *res;
};
/* Construct a piecewise affine expression that is equal to data->aff
* on "domain" and add the result to data->res.
*/
static isl_stat pw_aff_aff_on_domain(__isl_take isl_set *domain, void *user)
{
struct isl_union_pw_aff_aff_on_domain_data *data = user;
isl_pw_aff *pa;
isl_aff *aff;
int dim;
aff = isl_aff_copy(data->aff);
dim = isl_set_dim(domain, isl_dim_set);
aff = isl_aff_add_dims(aff, isl_dim_in, dim);
aff = isl_aff_reset_domain_space(aff, isl_set_get_space(domain));
pa = isl_pw_aff_alloc(domain, aff);
data->res = isl_union_pw_aff_add_pw_aff(data->res, pa);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Internal data structure for isl_union_pw_multi_aff_get_union_pw_aff.
* pos is the output position that needs to be extracted.
* res collects the results.
*/
struct isl_union_pw_multi_aff_get_union_pw_aff_data {
int pos;
isl_union_pw_aff *res;
};
/* Extract an isl_pw_aff corresponding to output dimension "pos" of "pma"
* (assuming it has such a dimension) and add it to data->res.
*/
static isl_stat get_union_pw_aff(__isl_take isl_pw_multi_aff *pma, void *user)
{
struct isl_union_pw_multi_aff_get_union_pw_aff_data *data = user;
int n_out;
isl_pw_aff *pa;
if (!pma)
return isl_stat_error;
n_out = isl_pw_multi_aff_dim(pma, isl_dim_out);
if (data->pos >= n_out) {
isl_pw_multi_aff_free(pma);
return isl_stat_ok;
}
pa = isl_pw_multi_aff_get_pw_aff(pma, data->pos);
isl_pw_multi_aff_free(pma);
data->res = isl_union_pw_aff_add_pw_aff(data->res, pa);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Extract an isl_union_pw_aff corresponding to
* output dimension "pos" of "upma".
*/
__isl_give isl_union_pw_aff *isl_union_pw_multi_aff_get_union_pw_aff(
__isl_keep isl_union_pw_multi_aff *upma, int pos)
{
struct isl_union_pw_multi_aff_get_union_pw_aff_data data;
isl_space *space;
if (!upma)
return NULL;
if (pos < 0)
isl_die(isl_union_pw_multi_aff_get_ctx(upma), isl_error_invalid,
"cannot extract at negative position", return NULL);
space = isl_union_pw_multi_aff_get_space(upma);
data.res = isl_union_pw_aff_empty(space);
data.pos = pos;
if (isl_union_pw_multi_aff_foreach_pw_multi_aff(upma,
&get_union_pw_aff, &data) < 0)
data.res = isl_union_pw_aff_free(data.res);
return data.res;
}
/* Return a union piecewise affine expression
* that is equal to "aff" on "domain".
*
* Construct an isl_pw_aff on each of the sets in "domain" and
* collect the results.
*/
__isl_give isl_union_pw_aff *isl_union_pw_aff_aff_on_domain(
__isl_take isl_union_set *domain, __isl_take isl_aff *aff)
{
struct isl_union_pw_aff_aff_on_domain_data data;
isl_space *space;
if (!domain || !aff)
goto error;
if (!isl_local_space_is_params(aff->ls))
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"expecting parametric expression", goto error);
space = isl_union_set_get_space(domain);
data.res = isl_union_pw_aff_empty(space);
data.aff = aff;
if (isl_union_set_foreach_set(domain, &pw_aff_aff_on_domain, &data) < 0)
data.res = isl_union_pw_aff_free(data.res);
isl_union_set_free(domain);
isl_aff_free(aff);
return data.res;
error:
isl_union_set_free(domain);
isl_aff_free(aff);
return NULL;
}
/* Internal data structure for isl_union_pw_aff_val_on_domain.
* "v" is the value that the resulting isl_union_pw_aff needs to attain.
* "res" collects the results.
*/
struct isl_union_pw_aff_val_on_domain_data {
isl_val *v;
isl_union_pw_aff *res;
};
/* Construct a piecewise affine expression that is equal to data->v
* on "domain" and add the result to data->res.
*/
static isl_stat pw_aff_val_on_domain(__isl_take isl_set *domain, void *user)
{
struct isl_union_pw_aff_val_on_domain_data *data = user;
isl_pw_aff *pa;
isl_val *v;
v = isl_val_copy(data->v);
pa = isl_pw_aff_val_on_domain(domain, v);
data->res = isl_union_pw_aff_add_pw_aff(data->res, pa);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Return a union piecewise affine expression
* that is equal to "v" on "domain".
*
* Construct an isl_pw_aff on each of the sets in "domain" and
* collect the results.
*/
__isl_give isl_union_pw_aff *isl_union_pw_aff_val_on_domain(
__isl_take isl_union_set *domain, __isl_take isl_val *v)
{
struct isl_union_pw_aff_val_on_domain_data data;
isl_space *space;
space = isl_union_set_get_space(domain);
data.res = isl_union_pw_aff_empty(space);
data.v = v;
if (isl_union_set_foreach_set(domain, &pw_aff_val_on_domain, &data) < 0)
data.res = isl_union_pw_aff_free(data.res);
isl_union_set_free(domain);
isl_val_free(v);
return data.res;
}
/* Construct a piecewise multi affine expression
* that is equal to "pa" and add it to upma.
*/
static isl_stat pw_multi_aff_from_pw_aff_entry(__isl_take isl_pw_aff *pa,
void *user)
{
isl_union_pw_multi_aff **upma = user;
isl_pw_multi_aff *pma;
pma = isl_pw_multi_aff_from_pw_aff(pa);
*upma = isl_union_pw_multi_aff_add_pw_multi_aff(*upma, pma);
return *upma ? isl_stat_ok : isl_stat_error;
}
/* Construct and return a union piecewise multi affine expression
* that is equal to the given union piecewise affine expression.
*/
__isl_give isl_union_pw_multi_aff *isl_union_pw_multi_aff_from_union_pw_aff(
__isl_take isl_union_pw_aff *upa)
{
isl_space *space;
isl_union_pw_multi_aff *upma;
if (!upa)
return NULL;
space = isl_union_pw_aff_get_space(upa);
upma = isl_union_pw_multi_aff_empty(space);
if (isl_union_pw_aff_foreach_pw_aff(upa,
&pw_multi_aff_from_pw_aff_entry, &upma) < 0)
upma = isl_union_pw_multi_aff_free(upma);
isl_union_pw_aff_free(upa);
return upma;
}
/* Compute the set of elements in the domain of "pa" where it is zero and
* add this set to "uset".
*/
static isl_stat zero_union_set(__isl_take isl_pw_aff *pa, void *user)
{
isl_union_set **uset = (isl_union_set **)user;
*uset = isl_union_set_add_set(*uset, isl_pw_aff_zero_set(pa));
return *uset ? isl_stat_ok : isl_stat_error;
}
/* Return a union set containing those elements in the domain
* of "upa" where it is zero.
*/
__isl_give isl_union_set *isl_union_pw_aff_zero_union_set(
__isl_take isl_union_pw_aff *upa)
{
isl_union_set *zero;
zero = isl_union_set_empty(isl_union_pw_aff_get_space(upa));
if (isl_union_pw_aff_foreach_pw_aff(upa, &zero_union_set, &zero) < 0)
zero = isl_union_set_free(zero);
isl_union_pw_aff_free(upa);
return zero;
}
/* Convert "pa" to an isl_map and add it to *umap.
*/
static isl_stat map_from_pw_aff_entry(__isl_take isl_pw_aff *pa, void *user)
{
isl_union_map **umap = user;
isl_map *map;
map = isl_map_from_pw_aff(pa);
*umap = isl_union_map_add_map(*umap, map);
return *umap ? isl_stat_ok : isl_stat_error;
}
/* Construct a union map mapping the domain of the union
* piecewise affine expression to its range, with the single output dimension
* equated to the corresponding affine expressions on their cells.
*/
__isl_give isl_union_map *isl_union_map_from_union_pw_aff(
__isl_take isl_union_pw_aff *upa)
{
isl_space *space;
isl_union_map *umap;
if (!upa)
return NULL;
space = isl_union_pw_aff_get_space(upa);
umap = isl_union_map_empty(space);
if (isl_union_pw_aff_foreach_pw_aff(upa, &map_from_pw_aff_entry,
&umap) < 0)
umap = isl_union_map_free(umap);
isl_union_pw_aff_free(upa);
return umap;
}
/* Internal data structure for isl_union_pw_aff_pullback_union_pw_multi_aff.
* upma is the function that is plugged in.
* pa is the current part of the function in which upma is plugged in.
* res collects the results.
*/
struct isl_union_pw_aff_pullback_upma_data {
isl_union_pw_multi_aff *upma;
isl_pw_aff *pa;
isl_union_pw_aff *res;
};
/* Check if "pma" can be plugged into data->pa.
* If so, perform the pullback and add the result to data->res.
*/
static isl_stat pa_pb_pma(__isl_take isl_pw_multi_aff *pma, void *user)
{
struct isl_union_pw_aff_pullback_upma_data *data = user;
isl_pw_aff *pa;
if (!isl_space_tuple_is_equal(data->pa->dim, isl_dim_in,
pma->dim, isl_dim_out)) {
isl_pw_multi_aff_free(pma);
return isl_stat_ok;
}
pa = isl_pw_aff_copy(data->pa);
pa = isl_pw_aff_pullback_pw_multi_aff(pa, pma);
data->res = isl_union_pw_aff_add_pw_aff(data->res, pa);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Check if any of the elements of data->upma can be plugged into pa,
* add if so add the result to data->res.
*/
static isl_stat upa_pb_upma(__isl_take isl_pw_aff *pa, void *user)
{
struct isl_union_pw_aff_pullback_upma_data *data = user;
isl_stat r;
data->pa = pa;
r = isl_union_pw_multi_aff_foreach_pw_multi_aff(data->upma,
&pa_pb_pma, data);
isl_pw_aff_free(pa);
return r;
}
/* Compute the pullback of "upa" by the function represented by "upma".
* In other words, plug in "upma" in "upa". The result contains
* expressions defined over the domain space of "upma".
*
* Run over all pairs of elements in "upa" and "upma", perform
* the pullback when appropriate and collect the results.
* If the hash value were based on the domain space rather than
* the function space, then we could run through all elements
* of "upma" and directly pick out the corresponding element of "upa".
*/
__isl_give isl_union_pw_aff *isl_union_pw_aff_pullback_union_pw_multi_aff(
__isl_take isl_union_pw_aff *upa,
__isl_take isl_union_pw_multi_aff *upma)
{
struct isl_union_pw_aff_pullback_upma_data data = { NULL, NULL };
isl_space *space;
space = isl_union_pw_multi_aff_get_space(upma);
upa = isl_union_pw_aff_align_params(upa, space);
space = isl_union_pw_aff_get_space(upa);
upma = isl_union_pw_multi_aff_align_params(upma, space);
if (!upa || !upma)
goto error;
data.upma = upma;
data.res = isl_union_pw_aff_alloc_same_size(upa);
if (isl_union_pw_aff_foreach_pw_aff(upa, &upa_pb_upma, &data) < 0)
data.res = isl_union_pw_aff_free(data.res);
isl_union_pw_aff_free(upa);
isl_union_pw_multi_aff_free(upma);
return data.res;
error:
isl_union_pw_aff_free(upa);
isl_union_pw_multi_aff_free(upma);
return NULL;
}
#undef BASE
#define BASE union_pw_aff
#undef DOMBASE
#define DOMBASE union_set
#define NO_MOVE_DIMS
#define NO_DIMS
#define NO_DOMAIN
#define NO_PRODUCT
#define NO_SPLICE
#define NO_ZERO
#define NO_IDENTITY
#define NO_GIST
#include <isl_multi_templ.c>
#include <isl_multi_apply_set.c>
#include <isl_multi_apply_union_set.c>
#include <isl_multi_coalesce.c>
#include <isl_multi_floor.c>
#include <isl_multi_gist.c>
#include <isl_multi_intersect.c>
/* Construct a multiple union piecewise affine expression
* in the given space with value zero in each of the output dimensions.
*
* Since there is no canonical zero value for
* a union piecewise affine expression, we can only construct
* zero-dimensional "zero" value.
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_zero(
__isl_take isl_space *space)
{
if (!space)
return NULL;
if (!isl_space_is_set(space))
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"expecting set space", goto error);
if (isl_space_dim(space , isl_dim_out) != 0)
isl_die(isl_space_get_ctx(space), isl_error_invalid,
"expecting 0D space", goto error);
return isl_multi_union_pw_aff_alloc(space);
error:
isl_space_free(space);
return NULL;
}
/* Compute the sum of "mupa1" and "mupa2" on the union of their domains,
* with the actual sum on the shared domain and
* the defined expression on the symmetric difference of the domains.
*
* We simply iterate over the elements in both arguments and
* call isl_union_pw_aff_union_add on each of them.
*/
static __isl_give isl_multi_union_pw_aff *
isl_multi_union_pw_aff_union_add_aligned(
__isl_take isl_multi_union_pw_aff *mupa1,
__isl_take isl_multi_union_pw_aff *mupa2)
{
return isl_multi_union_pw_aff_bin_op(mupa1, mupa2,
&isl_union_pw_aff_union_add);
}
/* Compute the sum of "mupa1" and "mupa2" on the union of their domains,
* with the actual sum on the shared domain and
* the defined expression on the symmetric difference of the domains.
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_union_add(
__isl_take isl_multi_union_pw_aff *mupa1,
__isl_take isl_multi_union_pw_aff *mupa2)
{
return isl_multi_union_pw_aff_align_params_multi_multi_and(mupa1, mupa2,
&isl_multi_union_pw_aff_union_add_aligned);
}
/* Construct and return a multi union piecewise affine expression
* that is equal to the given multi affine expression.
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_from_multi_aff(
__isl_take isl_multi_aff *ma)
{
isl_multi_pw_aff *mpa;
mpa = isl_multi_pw_aff_from_multi_aff(ma);
return isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
}
/* Construct and return a multi union piecewise affine expression
* that is equal to the given multi piecewise affine expression.
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_from_multi_pw_aff(
__isl_take isl_multi_pw_aff *mpa)
{
int i, n;
isl_space *space;
isl_multi_union_pw_aff *mupa;
if (!mpa)
return NULL;
space = isl_multi_pw_aff_get_space(mpa);
space = isl_space_range(space);
mupa = isl_multi_union_pw_aff_alloc(space);
n = isl_multi_pw_aff_dim(mpa, isl_dim_out);
for (i = 0; i < n; ++i) {
isl_pw_aff *pa;
isl_union_pw_aff *upa;
pa = isl_multi_pw_aff_get_pw_aff(mpa, i);
upa = isl_union_pw_aff_from_pw_aff(pa);
mupa = isl_multi_union_pw_aff_set_union_pw_aff(mupa, i, upa);
}
isl_multi_pw_aff_free(mpa);
return mupa;
}
/* Extract the range space of "pma" and assign it to *space.
* If *space has already been set (through a previous call to this function),
* then check that the range space is the same.
*/
static isl_stat extract_space(__isl_take isl_pw_multi_aff *pma, void *user)
{
isl_space **space = user;
isl_space *pma_space;
isl_bool equal;
pma_space = isl_space_range(isl_pw_multi_aff_get_space(pma));
isl_pw_multi_aff_free(pma);
if (!pma_space)
return isl_stat_error;
if (!*space) {
*space = pma_space;
return isl_stat_ok;
}
equal = isl_space_is_equal(pma_space, *space);
isl_space_free(pma_space);
if (equal < 0)
return isl_stat_error;
if (!equal)
isl_die(isl_space_get_ctx(*space), isl_error_invalid,
"range spaces not the same", return isl_stat_error);
return isl_stat_ok;
}
/* Construct and return a multi union piecewise affine expression
* that is equal to the given union piecewise multi affine expression.
*
* In order to be able to perform the conversion, the input
* needs to be non-empty and may only involve a single range space.
*/
__isl_give isl_multi_union_pw_aff *
isl_multi_union_pw_aff_from_union_pw_multi_aff(
__isl_take isl_union_pw_multi_aff *upma)
{
isl_space *space = NULL;
isl_multi_union_pw_aff *mupa;
int i, n;
if (!upma)
return NULL;
if (isl_union_pw_multi_aff_n_pw_multi_aff(upma) == 0)
isl_die(isl_union_pw_multi_aff_get_ctx(upma), isl_error_invalid,
"cannot extract range space from empty input",
goto error);
if (isl_union_pw_multi_aff_foreach_pw_multi_aff(upma, &extract_space,
&space) < 0)
goto error;
if (!space)
goto error;
n = isl_space_dim(space, isl_dim_set);
mupa = isl_multi_union_pw_aff_alloc(space);
for (i = 0; i < n; ++i) {
isl_union_pw_aff *upa;
upa = isl_union_pw_multi_aff_get_union_pw_aff(upma, i);
mupa = isl_multi_union_pw_aff_set_union_pw_aff(mupa, i, upa);
}
isl_union_pw_multi_aff_free(upma);
return mupa;
error:
isl_space_free(space);
isl_union_pw_multi_aff_free(upma);
return NULL;
}
/* Try and create an isl_multi_union_pw_aff that is equivalent
* to the given isl_union_map.
* The isl_union_map is required to be single-valued in each space.
* Moreover, it cannot be empty and all range spaces need to be the same.
* Otherwise, an error is produced.
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_from_union_map(
__isl_take isl_union_map *umap)
{
isl_union_pw_multi_aff *upma;
upma = isl_union_pw_multi_aff_from_union_map(umap);
return isl_multi_union_pw_aff_from_union_pw_multi_aff(upma);
}
/* Return a multiple union piecewise affine expression
* that is equal to "mv" on "domain", assuming "domain" and "mv"
* have been aligned.
*/
static __isl_give isl_multi_union_pw_aff *
isl_multi_union_pw_aff_multi_val_on_domain_aligned(
__isl_take isl_union_set *domain, __isl_take isl_multi_val *mv)
{
int i, n;
isl_space *space;
isl_multi_union_pw_aff *mupa;
if (!domain || !mv)
goto error;
n = isl_multi_val_dim(mv, isl_dim_set);
space = isl_multi_val_get_space(mv);
mupa = isl_multi_union_pw_aff_alloc(space);
for (i = 0; i < n; ++i) {
isl_val *v;
isl_union_pw_aff *upa;
v = isl_multi_val_get_val(mv, i);
upa = isl_union_pw_aff_val_on_domain(isl_union_set_copy(domain),
v);
mupa = isl_multi_union_pw_aff_set_union_pw_aff(mupa, i, upa);
}
isl_union_set_free(domain);
isl_multi_val_free(mv);
return mupa;
error:
isl_union_set_free(domain);
isl_multi_val_free(mv);
return NULL;
}
/* Return a multiple union piecewise affine expression
* that is equal to "mv" on "domain".
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_multi_val_on_domain(
__isl_take isl_union_set *domain, __isl_take isl_multi_val *mv)
{
if (!domain || !mv)
goto error;
if (isl_space_match(domain->dim, isl_dim_param,
mv->space, isl_dim_param))
return isl_multi_union_pw_aff_multi_val_on_domain_aligned(
domain, mv);
domain = isl_union_set_align_params(domain,
isl_multi_val_get_space(mv));
mv = isl_multi_val_align_params(mv, isl_union_set_get_space(domain));
return isl_multi_union_pw_aff_multi_val_on_domain_aligned(domain, mv);
error:
isl_union_set_free(domain);
isl_multi_val_free(mv);
return NULL;
}
/* Return a multiple union piecewise affine expression
* that is equal to "ma" on "domain", assuming "domain" and "ma"
* have been aligned.
*/
static __isl_give isl_multi_union_pw_aff *
isl_multi_union_pw_aff_multi_aff_on_domain_aligned(
__isl_take isl_union_set *domain, __isl_take isl_multi_aff *ma)
{
int i, n;
isl_space *space;
isl_multi_union_pw_aff *mupa;
if (!domain || !ma)
goto error;
n = isl_multi_aff_dim(ma, isl_dim_set);
space = isl_multi_aff_get_space(ma);
mupa = isl_multi_union_pw_aff_alloc(space);
for (i = 0; i < n; ++i) {
isl_aff *aff;
isl_union_pw_aff *upa;
aff = isl_multi_aff_get_aff(ma, i);
upa = isl_union_pw_aff_aff_on_domain(isl_union_set_copy(domain),
aff);
mupa = isl_multi_union_pw_aff_set_union_pw_aff(mupa, i, upa);
}
isl_union_set_free(domain);
isl_multi_aff_free(ma);
return mupa;
error:
isl_union_set_free(domain);
isl_multi_aff_free(ma);
return NULL;
}
/* Return a multiple union piecewise affine expression
* that is equal to "ma" on "domain".
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_multi_aff_on_domain(
__isl_take isl_union_set *domain, __isl_take isl_multi_aff *ma)
{
if (!domain || !ma)
goto error;
if (isl_space_match(domain->dim, isl_dim_param,
ma->space, isl_dim_param))
return isl_multi_union_pw_aff_multi_aff_on_domain_aligned(
domain, ma);
domain = isl_union_set_align_params(domain,
isl_multi_aff_get_space(ma));
ma = isl_multi_aff_align_params(ma, isl_union_set_get_space(domain));
return isl_multi_union_pw_aff_multi_aff_on_domain_aligned(domain, ma);
error:
isl_union_set_free(domain);
isl_multi_aff_free(ma);
return NULL;
}
/* Return a union set containing those elements in the domains
* of the elements of "mupa" where they are all zero.
*/
__isl_give isl_union_set *isl_multi_union_pw_aff_zero_union_set(
__isl_take isl_multi_union_pw_aff *mupa)
{
int i, n;
isl_union_pw_aff *upa;
isl_union_set *zero;
if (!mupa)
return NULL;
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
if (n == 0)
isl_die(isl_multi_union_pw_aff_get_ctx(mupa), isl_error_invalid,
"cannot determine zero set "
"of zero-dimensional function", goto error);
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, 0);
zero = isl_union_pw_aff_zero_union_set(upa);
for (i = 1; i < n; ++i) {
isl_union_set *zero_i;
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
zero_i = isl_union_pw_aff_zero_union_set(upa);
zero = isl_union_set_intersect(zero, zero_i);
}
isl_multi_union_pw_aff_free(mupa);
return zero;
error:
isl_multi_union_pw_aff_free(mupa);
return NULL;
}
/* Construct a union map mapping the shared domain
* of the union piecewise affine expressions to the range of "mupa"
* with each dimension in the range equated to the
* corresponding union piecewise affine expression.
*
* The input cannot be zero-dimensional as there is
* no way to extract a domain from a zero-dimensional isl_multi_union_pw_aff.
*/
__isl_give isl_union_map *isl_union_map_from_multi_union_pw_aff(
__isl_take isl_multi_union_pw_aff *mupa)
{
int i, n;
isl_space *space;
isl_union_map *umap;
isl_union_pw_aff *upa;
if (!mupa)
return NULL;
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
if (n == 0)
isl_die(isl_multi_union_pw_aff_get_ctx(mupa), isl_error_invalid,
"cannot determine domain of zero-dimensional "
"isl_multi_union_pw_aff", goto error);
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, 0);
umap = isl_union_map_from_union_pw_aff(upa);
for (i = 1; i < n; ++i) {
isl_union_map *umap_i;
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
umap_i = isl_union_map_from_union_pw_aff(upa);
umap = isl_union_map_flat_range_product(umap, umap_i);
}
space = isl_multi_union_pw_aff_get_space(mupa);
umap = isl_union_map_reset_range_space(umap, space);
isl_multi_union_pw_aff_free(mupa);
return umap;
error:
isl_multi_union_pw_aff_free(mupa);
return NULL;
}
/* Internal data structure for isl_union_pw_multi_aff_reset_range_space.
* "range" is the space from which to set the range space.
* "res" collects the results.
*/
struct isl_union_pw_multi_aff_reset_range_space_data {
isl_space *range;
isl_union_pw_multi_aff *res;
};
/* Replace the range space of "pma" by the range space of data->range and
* add the result to data->res.
*/
static isl_stat reset_range_space(__isl_take isl_pw_multi_aff *pma, void *user)
{
struct isl_union_pw_multi_aff_reset_range_space_data *data = user;
isl_space *space;
space = isl_pw_multi_aff_get_space(pma);
space = isl_space_domain(space);
space = isl_space_extend_domain_with_range(space,
isl_space_copy(data->range));
pma = isl_pw_multi_aff_reset_space(pma, space);
data->res = isl_union_pw_multi_aff_add_pw_multi_aff(data->res, pma);
return data->res ? isl_stat_ok : isl_stat_error;
}
/* Replace the range space of all the piecewise affine expressions in "upma" by
* the range space of "space".
*
* This assumes that all these expressions have the same output dimension.
*
* Since the spaces of the expressions change, so do their hash values.
* We therefore need to create a new isl_union_pw_multi_aff.
* Note that the hash value is currently computed based on the entire
* space even though there can only be a single expression with a given
* domain space.
*/
static __isl_give isl_union_pw_multi_aff *
isl_union_pw_multi_aff_reset_range_space(
__isl_take isl_union_pw_multi_aff *upma, __isl_take isl_space *space)
{
struct isl_union_pw_multi_aff_reset_range_space_data data = { space };
isl_space *space_upma;
space_upma = isl_union_pw_multi_aff_get_space(upma);
data.res = isl_union_pw_multi_aff_empty(space_upma);
if (isl_union_pw_multi_aff_foreach_pw_multi_aff(upma,
&reset_range_space, &data) < 0)
data.res = isl_union_pw_multi_aff_free(data.res);
isl_space_free(space);
isl_union_pw_multi_aff_free(upma);
return data.res;
}
/* Construct and return a union piecewise multi affine expression
* that is equal to the given multi union piecewise affine expression.
*
* In order to be able to perform the conversion, the input
* needs to have a least one output dimension.
*/
__isl_give isl_union_pw_multi_aff *
isl_union_pw_multi_aff_from_multi_union_pw_aff(
__isl_take isl_multi_union_pw_aff *mupa)
{
int i, n;
isl_space *space;
isl_union_pw_multi_aff *upma;
isl_union_pw_aff *upa;
if (!mupa)
return NULL;
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
if (n == 0)
isl_die(isl_multi_union_pw_aff_get_ctx(mupa), isl_error_invalid,
"cannot determine domain of zero-dimensional "
"isl_multi_union_pw_aff", goto error);
space = isl_multi_union_pw_aff_get_space(mupa);
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, 0);
upma = isl_union_pw_multi_aff_from_union_pw_aff(upa);
for (i = 1; i < n; ++i) {
isl_union_pw_multi_aff *upma_i;
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
upma_i = isl_union_pw_multi_aff_from_union_pw_aff(upa);
upma = isl_union_pw_multi_aff_flat_range_product(upma, upma_i);
}
upma = isl_union_pw_multi_aff_reset_range_space(upma, space);
isl_multi_union_pw_aff_free(mupa);
return upma;
error:
isl_multi_union_pw_aff_free(mupa);
return NULL;
}
/* Intersect the range of "mupa" with "range".
* That is, keep only those domain elements that have a function value
* in "range".
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_intersect_range(
__isl_take isl_multi_union_pw_aff *mupa, __isl_take isl_set *range)
{
isl_union_pw_multi_aff *upma;
isl_union_set *domain;
isl_space *space;
int n;
int match;
if (!mupa || !range)
goto error;
space = isl_set_get_space(range);
match = isl_space_tuple_is_equal(mupa->space, isl_dim_set,
space, isl_dim_set);
isl_space_free(space);
if (match < 0)
goto error;
if (!match)
isl_die(isl_multi_union_pw_aff_get_ctx(mupa), isl_error_invalid,
"space don't match", goto error);
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
if (n == 0)
isl_die(isl_multi_union_pw_aff_get_ctx(mupa), isl_error_invalid,
"cannot intersect range of zero-dimensional "
"isl_multi_union_pw_aff", goto error);
upma = isl_union_pw_multi_aff_from_multi_union_pw_aff(
isl_multi_union_pw_aff_copy(mupa));
domain = isl_union_set_from_set(range);
domain = isl_union_set_preimage_union_pw_multi_aff(domain, upma);
mupa = isl_multi_union_pw_aff_intersect_domain(mupa, domain);
return mupa;
error:
isl_multi_union_pw_aff_free(mupa);
isl_set_free(range);
return NULL;
}
/* Return the shared domain of the elements of "mupa".
*/
__isl_give isl_union_set *isl_multi_union_pw_aff_domain(
__isl_take isl_multi_union_pw_aff *mupa)
{
int i, n;
isl_union_pw_aff *upa;
isl_union_set *dom;
if (!mupa)
return NULL;
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
if (n == 0)
isl_die(isl_multi_union_pw_aff_get_ctx(mupa), isl_error_invalid,
"cannot determine domain", goto error);
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, 0);
dom = isl_union_pw_aff_domain(upa);
for (i = 1; i < n; ++i) {
isl_union_set *dom_i;
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
dom_i = isl_union_pw_aff_domain(upa);
dom = isl_union_set_intersect(dom, dom_i);
}
isl_multi_union_pw_aff_free(mupa);
return dom;
error:
isl_multi_union_pw_aff_free(mupa);
return NULL;
}
/* Apply "aff" to "mupa". The space of "mupa" is equal to the domain of "aff".
* In particular, the spaces have been aligned.
* The result is defined over the shared domain of the elements of "mupa"
*
* We first extract the parametric constant part of "aff" and
* define that over the shared domain.
* Then we iterate over all input dimensions of "aff" and add the corresponding
* multiples of the elements of "mupa".
* Finally, we consider the integer divisions, calling the function
* recursively to obtain an isl_union_pw_aff corresponding to the
* integer division argument.
*/
static __isl_give isl_union_pw_aff *multi_union_pw_aff_apply_aff(
__isl_take isl_multi_union_pw_aff *mupa, __isl_take isl_aff *aff)
{
int i, n_in, n_div;
isl_union_pw_aff *upa;
isl_union_set *uset;
isl_val *v;
isl_aff *cst;
n_in = isl_aff_dim(aff, isl_dim_in);
n_div = isl_aff_dim(aff, isl_dim_div);
uset = isl_multi_union_pw_aff_domain(isl_multi_union_pw_aff_copy(mupa));
cst = isl_aff_copy(aff);
cst = isl_aff_drop_dims(cst, isl_dim_div, 0, n_div);
cst = isl_aff_drop_dims(cst, isl_dim_in, 0, n_in);
cst = isl_aff_project_domain_on_params(cst);
upa = isl_union_pw_aff_aff_on_domain(uset, cst);
for (i = 0; i < n_in; ++i) {
isl_union_pw_aff *upa_i;
if (!isl_aff_involves_dims(aff, isl_dim_in, i, 1))
continue;
v = isl_aff_get_coefficient_val(aff, isl_dim_in, i);
upa_i = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
upa_i = isl_union_pw_aff_scale_val(upa_i, v);
upa = isl_union_pw_aff_add(upa, upa_i);
}
for (i = 0; i < n_div; ++i) {
isl_aff *div;
isl_union_pw_aff *upa_i;
if (!isl_aff_involves_dims(aff, isl_dim_div, i, 1))
continue;
div = isl_aff_get_div(aff, i);
upa_i = multi_union_pw_aff_apply_aff(
isl_multi_union_pw_aff_copy(mupa), div);
upa_i = isl_union_pw_aff_floor(upa_i);
v = isl_aff_get_coefficient_val(aff, isl_dim_div, i);
upa_i = isl_union_pw_aff_scale_val(upa_i, v);
upa = isl_union_pw_aff_add(upa, upa_i);
}
isl_multi_union_pw_aff_free(mupa);
isl_aff_free(aff);
return upa;
}
/* Apply "aff" to "mupa". The space of "mupa" needs to be compatible
* with the domain of "aff".
* Furthermore, the dimension of this space needs to be greater than zero.
* The result is defined over the shared domain of the elements of "mupa"
*
* We perform these checks and then hand over control to
* multi_union_pw_aff_apply_aff.
*/
__isl_give isl_union_pw_aff *isl_multi_union_pw_aff_apply_aff(
__isl_take isl_multi_union_pw_aff *mupa, __isl_take isl_aff *aff)
{
isl_space *space1, *space2;
int equal;
mupa = isl_multi_union_pw_aff_align_params(mupa,
isl_aff_get_space(aff));
aff = isl_aff_align_params(aff, isl_multi_union_pw_aff_get_space(mupa));
if (!mupa || !aff)
goto error;
space1 = isl_multi_union_pw_aff_get_space(mupa);
space2 = isl_aff_get_domain_space(aff);
equal = isl_space_is_equal(space1, space2);
isl_space_free(space1);
isl_space_free(space2);
if (equal < 0)
goto error;
if (!equal)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"spaces don't match", goto error);
if (isl_aff_dim(aff, isl_dim_in) == 0)
isl_die(isl_aff_get_ctx(aff), isl_error_invalid,
"cannot determine domains", goto error);
return multi_union_pw_aff_apply_aff(mupa, aff);
error:
isl_multi_union_pw_aff_free(mupa);
isl_aff_free(aff);
return NULL;
}
/* Apply "ma" to "mupa". The space of "mupa" needs to be compatible
* with the domain of "ma".
* Furthermore, the dimension of this space needs to be greater than zero,
* unless the dimension of the target space of "ma" is also zero.
* The result is defined over the shared domain of the elements of "mupa"
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_apply_multi_aff(
__isl_take isl_multi_union_pw_aff *mupa, __isl_take isl_multi_aff *ma)
{
isl_space *space1, *space2;
isl_multi_union_pw_aff *res;
int equal;
int i, n_out;
mupa = isl_multi_union_pw_aff_align_params(mupa,
isl_multi_aff_get_space(ma));
ma = isl_multi_aff_align_params(ma,
isl_multi_union_pw_aff_get_space(mupa));
if (!mupa || !ma)
goto error;
space1 = isl_multi_union_pw_aff_get_space(mupa);
space2 = isl_multi_aff_get_domain_space(ma);
equal = isl_space_is_equal(space1, space2);
isl_space_free(space1);
isl_space_free(space2);
if (equal < 0)
goto error;
if (!equal)
isl_die(isl_multi_aff_get_ctx(ma), isl_error_invalid,
"spaces don't match", goto error);
n_out = isl_multi_aff_dim(ma, isl_dim_out);
if (isl_multi_aff_dim(ma, isl_dim_in) == 0 && n_out != 0)
isl_die(isl_multi_aff_get_ctx(ma), isl_error_invalid,
"cannot determine domains", goto error);
space1 = isl_space_range(isl_multi_aff_get_space(ma));
res = isl_multi_union_pw_aff_alloc(space1);
for (i = 0; i < n_out; ++i) {
isl_aff *aff;
isl_union_pw_aff *upa;
aff = isl_multi_aff_get_aff(ma, i);
upa = multi_union_pw_aff_apply_aff(
isl_multi_union_pw_aff_copy(mupa), aff);
res = isl_multi_union_pw_aff_set_union_pw_aff(res, i, upa);
}
isl_multi_aff_free(ma);
isl_multi_union_pw_aff_free(mupa);
return res;
error:
isl_multi_union_pw_aff_free(mupa);
isl_multi_aff_free(ma);
return NULL;
}
/* Apply "pa" to "mupa". The space of "mupa" needs to be compatible
* with the domain of "pa".
* Furthermore, the dimension of this space needs to be greater than zero.
* The result is defined over the shared domain of the elements of "mupa"
*/
__isl_give isl_union_pw_aff *isl_multi_union_pw_aff_apply_pw_aff(
__isl_take isl_multi_union_pw_aff *mupa, __isl_take isl_pw_aff *pa)
{
int i;
int equal;
isl_space *space, *space2;
isl_union_pw_aff *upa;
mupa = isl_multi_union_pw_aff_align_params(mupa,
isl_pw_aff_get_space(pa));
pa = isl_pw_aff_align_params(pa,
isl_multi_union_pw_aff_get_space(mupa));
if (!mupa || !pa)
goto error;
space = isl_multi_union_pw_aff_get_space(mupa);
space2 = isl_pw_aff_get_domain_space(pa);
equal = isl_space_is_equal(space, space2);
isl_space_free(space);
isl_space_free(space2);
if (equal < 0)
goto error;
if (!equal)
isl_die(isl_pw_aff_get_ctx(pa), isl_error_invalid,
"spaces don't match", goto error);
if (isl_pw_aff_dim(pa, isl_dim_in) == 0)
isl_die(isl_pw_aff_get_ctx(pa), isl_error_invalid,
"cannot determine domains", goto error);
space = isl_space_params(isl_multi_union_pw_aff_get_space(mupa));
upa = isl_union_pw_aff_empty(space);
for (i = 0; i < pa->n; ++i) {
isl_aff *aff;
isl_set *domain;
isl_multi_union_pw_aff *mupa_i;
isl_union_pw_aff *upa_i;
mupa_i = isl_multi_union_pw_aff_copy(mupa);
domain = isl_set_copy(pa->p[i].set);
mupa_i = isl_multi_union_pw_aff_intersect_range(mupa_i, domain);
aff = isl_aff_copy(pa->p[i].aff);
upa_i = multi_union_pw_aff_apply_aff(mupa_i, aff);
upa = isl_union_pw_aff_union_add(upa, upa_i);
}
isl_multi_union_pw_aff_free(mupa);
isl_pw_aff_free(pa);
return upa;
error:
isl_multi_union_pw_aff_free(mupa);
isl_pw_aff_free(pa);
return NULL;
}
/* Apply "pma" to "mupa". The space of "mupa" needs to be compatible
* with the domain of "pma".
* Furthermore, the dimension of this space needs to be greater than zero,
* unless the dimension of the target space of "pma" is also zero.
* The result is defined over the shared domain of the elements of "mupa"
*/
__isl_give isl_multi_union_pw_aff *isl_multi_union_pw_aff_apply_pw_multi_aff(
__isl_take isl_multi_union_pw_aff *mupa,
__isl_take isl_pw_multi_aff *pma)
{
isl_space *space1, *space2;
isl_multi_union_pw_aff *res;
int equal;
int i, n_out;
mupa = isl_multi_union_pw_aff_align_params(mupa,
isl_pw_multi_aff_get_space(pma));
pma = isl_pw_multi_aff_align_params(pma,
isl_multi_union_pw_aff_get_space(mupa));
if (!mupa || !pma)
goto error;
space1 = isl_multi_union_pw_aff_get_space(mupa);
space2 = isl_pw_multi_aff_get_domain_space(pma);
equal = isl_space_is_equal(space1, space2);
isl_space_free(space1);
isl_space_free(space2);
if (equal < 0)
goto error;
if (!equal)
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"spaces don't match", goto error);
n_out = isl_pw_multi_aff_dim(pma, isl_dim_out);
if (isl_pw_multi_aff_dim(pma, isl_dim_in) == 0 && n_out != 0)
isl_die(isl_pw_multi_aff_get_ctx(pma), isl_error_invalid,
"cannot determine domains", goto error);
space1 = isl_space_range(isl_pw_multi_aff_get_space(pma));
res = isl_multi_union_pw_aff_alloc(space1);
for (i = 0; i < n_out; ++i) {
isl_pw_aff *pa;
isl_union_pw_aff *upa;
pa = isl_pw_multi_aff_get_pw_aff(pma, i);
upa = isl_multi_union_pw_aff_apply_pw_aff(
isl_multi_union_pw_aff_copy(mupa), pa);
res = isl_multi_union_pw_aff_set_union_pw_aff(res, i, upa);
}
isl_pw_multi_aff_free(pma);
isl_multi_union_pw_aff_free(mupa);
return res;
error:
isl_multi_union_pw_aff_free(mupa);
isl_pw_multi_aff_free(pma);
return NULL;
}
/* Compute the pullback of "mupa" by the function represented by "upma".
* In other words, plug in "upma" in "mupa". The result contains
* expressions defined over the domain space of "upma".
*
* Run over all elements of "mupa" and plug in "upma" in each of them.
*/
__isl_give isl_multi_union_pw_aff *
isl_multi_union_pw_aff_pullback_union_pw_multi_aff(
__isl_take isl_multi_union_pw_aff *mupa,
__isl_take isl_union_pw_multi_aff *upma)
{
int i, n;
mupa = isl_multi_union_pw_aff_align_params(mupa,
isl_union_pw_multi_aff_get_space(upma));
upma = isl_union_pw_multi_aff_align_params(upma,
isl_multi_union_pw_aff_get_space(mupa));
if (!mupa || !upma)
goto error;
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
for (i = 0; i < n; ++i) {
isl_union_pw_aff *upa;
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
upa = isl_union_pw_aff_pullback_union_pw_multi_aff(upa,
isl_union_pw_multi_aff_copy(upma));
mupa = isl_multi_union_pw_aff_set_union_pw_aff(mupa, i, upa);
}
isl_union_pw_multi_aff_free(upma);
return mupa;
error:
isl_multi_union_pw_aff_free(mupa);
isl_union_pw_multi_aff_free(upma);
return NULL;
}
/* Extract the sequence of elements in "mupa" with domain space "space"
* (ignoring parameters).
*
* For the elements of "mupa" that are not defined on the specified space,
* the corresponding element in the result is empty.
*/
__isl_give isl_multi_pw_aff *isl_multi_union_pw_aff_extract_multi_pw_aff(
__isl_keep isl_multi_union_pw_aff *mupa, __isl_take isl_space *space)
{
int i, n;
isl_space *space_mpa = NULL;
isl_multi_pw_aff *mpa;
if (!mupa || !space)
goto error;
space_mpa = isl_multi_union_pw_aff_get_space(mupa);
if (!isl_space_match(space_mpa, isl_dim_param, space, isl_dim_param)) {
space = isl_space_drop_dims(space, isl_dim_param,
0, isl_space_dim(space, isl_dim_param));
space = isl_space_align_params(space,
isl_space_copy(space_mpa));
if (!space)
goto error;
}
space_mpa = isl_space_map_from_domain_and_range(isl_space_copy(space),
space_mpa);
mpa = isl_multi_pw_aff_alloc(space_mpa);
space = isl_space_from_domain(space);
space = isl_space_add_dims(space, isl_dim_out, 1);
n = isl_multi_union_pw_aff_dim(mupa, isl_dim_set);
for (i = 0; i < n; ++i) {
isl_union_pw_aff *upa;
isl_pw_aff *pa;
upa = isl_multi_union_pw_aff_get_union_pw_aff(mupa, i);
pa = isl_union_pw_aff_extract_pw_aff(upa,
isl_space_copy(space));
mpa = isl_multi_pw_aff_set_pw_aff(mpa, i, pa);
isl_union_pw_aff_free(upa);
}
isl_space_free(space);
return mpa;
error:
isl_space_free(space_mpa);
isl_space_free(space);
return NULL;
}
|
920033.c | /*
Copyright (c) 2008 Sascha Steinbiss <[email protected]>
Copyright (c) 2008 Center for Bioinformatics, University of Hamburg
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <math.h>
#include <string.h>
#include "core/bittab.h"
#include "core/class_alloc_lock.h"
#include "core/ensure.h"
#include "core/ma.h"
#include "core/minmax.h"
#include "core/range.h"
#include "core/unused_api.h"
#include "annotationsketch/canvas.h"
#include "annotationsketch/canvas_members.h"
#include "annotationsketch/canvas_cairo.h"
#include "annotationsketch/canvas_cairo_file.h"
#include "annotationsketch/canvas_rep.h"
#include "annotationsketch/default_formats.h"
#include "annotationsketch/cliptype.h"
#include "annotationsketch/graphics_cairo_api.h"
#include "annotationsketch/style.h"
struct GtCanvasCairoFile {
const GtCanvas parent_instance;
GtGraphicsOutType type;
};
int gt_canvas_cairo_file_to_file(GtCanvasCairoFile *canvas,
const char *filename, GtError *err)
{
int had_err = 0;
GtCanvas *c = (GtCanvas*) canvas;
gt_error_check(err);
gt_assert(canvas && filename);
/* write out result file */
if (c->pvt->g)
had_err = gt_graphics_save_to_file(c->pvt->g, filename, err);
else
{
/* XXX: shouldn't this be an assertion? */
gt_error_set(err, "No graphics has been created yet!");
had_err = -1;
}
return had_err;
}
int gt_canvas_cairo_file_to_stream(GtCanvasCairoFile *canvas, GtStr *stream)
{
int had_err = 0;
GtCanvas *c = (GtCanvas*) canvas;
gt_assert(canvas && stream);
/* write out result file */
if (c->pvt->g)
gt_graphics_save_to_stream(c->pvt->g, stream);
return had_err;
}
const GtCanvasClass* gt_canvas_cairo_file_class(void)
{
static const GtCanvasClass *canvas_class = NULL;
gt_class_alloc_lock_enter();
if (!canvas_class) {
canvas_class = gt_canvas_class_new(sizeof (GtCanvasCairoFile),
gt_canvas_cairo_visit_layout_pre,
gt_canvas_cairo_visit_layout_post,
gt_canvas_cairo_visit_track_pre,
gt_canvas_cairo_visit_track_post,
gt_canvas_cairo_visit_line_pre,
gt_canvas_cairo_visit_line_post,
gt_canvas_cairo_visit_block,
gt_canvas_cairo_visit_element,
gt_canvas_cairo_visit_custom_track,
gt_canvas_cairo_draw_ruler,
NULL);
}
gt_class_alloc_lock_leave();
return canvas_class;
}
GtCanvas* gt_canvas_cairo_file_new(GtStyle *style,
GtGraphicsOutType output_type,
GtUword width, GtUword height,
GtImageInfo *image_info,
GtError *err)
{
GtCanvas *canvas;
GtColor bgcolor = {1.0, 1.0, 1.0, 1.0};
GtCanvasCairoFile *ccf;
GtStyleQueryStatus status;
double margins = 10.0;
gt_assert(style && width > 0 && height > 0);
status = gt_style_get_color(style, "format", "background_color", &bgcolor,
NULL, err);
switch (status) {
case GT_STYLE_QUERY_ERROR:
return NULL;
case GT_STYLE_QUERY_NOT_SET:
bgcolor.red = bgcolor.green = bgcolor.blue = bgcolor.alpha = 1.0;
break;
default:
break;
}
if (gt_style_get_num(style,
"format", "margins", &margins,
NULL, err) == GT_STYLE_QUERY_ERROR) {
return NULL;
}
canvas = gt_canvas_create(gt_canvas_cairo_file_class());
canvas->pvt->g = gt_graphics_cairo_new(output_type, width, height);
(void) gt_graphics_set_background_color(canvas->pvt->g, bgcolor);
(void) gt_graphics_set_margins(canvas->pvt->g, margins, 0);
canvas->pvt->margins = margins;
if (image_info)
gt_image_info_set_height(image_info, height);
canvas->pvt->sty = style;
canvas->pvt->y += 0.5;
canvas->pvt->ii = image_info;
canvas->pvt->width = width;
canvas->pvt->height = height;
canvas->pvt->bt = NULL;
ccf = canvas_cairo_file_cast(canvas);
ccf->type = output_type;
return canvas;
}
|
633791.c | #include "utils.h"
void printErrorLn(char* s) {
printf(ANSI_COLOR_RED "%s" ANSI_COLOR_RESET "\n", s);
return;
}
void printLn(char* s) {
printf(ANSI_COLOR_GREEN "%s" ANSI_COLOR_RESET "\n", s);
return;
}
char fileExists(char* fileName) {
if (access(fileName, F_OK) != -1) {
// file exists
return 1;
}
// file does not exist
return 0;
}
char dirExists(char* dirName) {
char returnValue = 0;
DIR* dir = opendir(dirName);
if (dir) {
// directory exists
returnValue = 1;
closedir(dir);
}
// directory does not exist
return returnValue;
}
void _mkdir(const char* dir) {
char tmp[256];
char* p = NULL;
size_t len;
snprintf(tmp, sizeof(tmp), "%s", dir);
len = strlen(tmp);
if (tmp[len - 1] == '/')
tmp[len - 1] = 0;
for (p = tmp + 1; *p; p++) {
if (*p == '/') {
*p = 0;
if (!dirExists(tmp)) {
mkdir(tmp, S_IRWXU);
}
*p = '/';
}
}
if (!dirExists(tmp)) {
mkdir(tmp, S_IRWXU);
}
}
void removeFileName(char* path) {
char* const last = strrchr(path, '/');
if (last != NULL)
*last = '\0';
}
struct in_addr getPrivateIp() {
FILE* f;
char line[100], *p, *c;
f = fopen("/proc/net/route", "r");
while (fgets(line, 100, f)) {
p = strtok(line, "\t");
c = strtok(NULL, "\t");
if (p != NULL && c != NULL) {
if (strcmp(c, "00000000") == 0) {
printf("Default interface is: %s\n", p);
break;
}
}
}
int fm = AF_INET; // IPv4
struct ifaddrs *ifaddr, *ifa;
int family;
if (getifaddrs(&ifaddr) == -1) {
perror("getifaddrs error");
exit(EXIT_FAILURE);
}
// iterate through linked list, maintaining head pointer so we can free list later
for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) {
if (ifa->ifa_addr == NULL) {
continue;
}
family = ifa->ifa_addr->sa_family;
if (strcmp(ifa->ifa_name, p) == 0) {
if (family == fm) {
printf("Private IP address: %s\n", inet_ntoa(((struct sockaddr_in*)ifa->ifa_addr)->sin_addr));
break;
}
}
}
freeifaddrs(ifaddr);
return ((struct sockaddr_in*)ifa->ifa_addr)->sin_addr;
}
int connectToPeer(int* socketFd, struct sockaddr_in* peerAddr) {
// create socket
if (((*socketFd) = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
perror("Socket creation error");
return 1;
}
int attempts = 1;
char connected = 1;
// try to connect for MAX_CONNECT_ATTEMPTS maximum times
while (connect((*socketFd), (struct sockaddr*)peerAddr, sizeof(*peerAddr)) < 0) {
perror("Socket connection failed");
printf("Attempt: %d, retrying...\n", attempts);
attempts++;
if (attempts == (MAX_CONNECT_ATTEMPTS) + 1) { // reached maximum reconnect times
connected = 0;
break;
}
// sleep for 1 second and then try to connect again
sleep(1);
}
if (connected) {
// printf("Connected to port %d and ip %s\n", peerAddr->sin_port, inet_ntoa(peerAddr->sin_addr));
return 0;
} else {
close(*socketFd);
return 1;
}
}
int createServer(int* socketFd, struct sockaddr_in* socketAddr, int portNum, int maxConnectionsNum) {
// create socket
if (((*socketFd) = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
perror("Socket creation error");
return 1;
}
socketAddr->sin_family = AF_INET; // IPv4
socketAddr->sin_addr = getPrivateIp(); // get private ip
socketAddr->sin_port = portNum;
// bind address to socket
if (bind(*socketFd, (struct sockaddr*)socketAddr, sizeof(*socketAddr)) < 0) {
perror("Bind error");
return 1;
}
// start listenning for incoming connections
if (listen(*socketFd, maxConnectionsNum) < 0) {
perror("Listen error");
return 1;
}
return 0;
} |
907775.c | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE127_Buffer_Underread__char_alloca_cpy_52b.c
Label Definition File: CWE127_Buffer_Underread.stack.label.xml
Template File: sources-sink-52b.tmpl.c
*/
/*
* @description
* CWE: 127 Buffer Under-read
* BadSource: Set data pointer to before the allocated memory buffer
* GoodSource: Set data pointer to the allocated memory buffer
* Sink: cpy
* BadSink : Copy data to string using strcpy
* Flow Variant: 52 Data flow: data passed as an argument from one function to another to another in three different source files
*
* */
#include "std_testcase.h"
#include <wchar.h>
/* all the sinks are the same, we just want to know where the hit originated if a tool flags one */
#ifndef OMITBAD
/* bad function declaration */
void CWE127_Buffer_Underread__char_alloca_cpy_52c_badSink(char * data);
void CWE127_Buffer_Underread__char_alloca_cpy_52b_badSink(char * data)
{
CWE127_Buffer_Underread__char_alloca_cpy_52c_badSink(data);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good function declaration */
void CWE127_Buffer_Underread__char_alloca_cpy_52c_goodG2BSink(char * data);
/* goodG2B uses the GoodSource with the BadSink */
void CWE127_Buffer_Underread__char_alloca_cpy_52b_goodG2BSink(char * data)
{
CWE127_Buffer_Underread__char_alloca_cpy_52c_goodG2BSink(data);
}
#endif /* OMITGOOD */
|
829046.c | /* BFD back-end for SH PE IMAGE COFF files.
Copyright 1995, 2000, 2001 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#include "bfd.h"
#include "sysdep.h"
#define TARGET_SHL_SYM shlpei_vec
#define TARGET_SHL_NAME "pei-shl"
#define IMAGE_BASE NT_IMAGE_BASE
#define COFF_IMAGE_WITH_PE
#define COFF_WITH_PE
#define PCRELOFFSET true
#define TARGET_UNDERSCORE '_'
#define COFF_LONG_SECTION_NAMES
#include "coff-sh.c"
|
886804.c |
/**
********************************************************************************
* @file stm8s_tim1_SetIC3Prescaler.c
* @author MCD Application Team
* @version V2.2.0
* @date 30-September-2014
* @brief This file contains all the functions for the UART1 peripheral.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2014 STMicroelectronics</center></h2>
*
* Licensed under MCD-ST Liberty SW License Agreement V2, (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.st.com/software_license_agreement_liberty_v2
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "stm8s_tim1.h"
#include "private/stm8s_tim1_private.h"
/** @addtogroup STM8S_StdPeriph_Driver
* @{
*/
/** @}
* @addtogroup TIM1_Public_Functions
* @{
*/
/**
* @brief Sets the TIM1 Input Capture 3 prescaler.
* @param TIM1_IC3Prescaler specifies the Input Capture prescaler new value
* This parameter can be one of the following values:
* - TIM1_ICPSC_DIV1: no prescaler
* - TIM1_ICPSC_DIV2: capture is done once every 2 events
* - TIM1_ICPSC_DIV4: capture is done once every 4 events
* - TIM1_ICPSC_DIV8: capture is done once every 8 events
* @retval None
*/
void TIM1_SetIC3Prescaler(TIM1_ICPSC_TypeDef TIM1_IC3Prescaler)
{
/* Check the parameters */
assert_param(IS_TIM1_IC_PRESCALER_OK(TIM1_IC3Prescaler));
/* Reset the IC1PSC Bits & Set the IC1PSC value */
TIM1->CCMR3 = (uint8_t)((uint8_t)(TIM1->CCMR3 & (uint8_t)(~TIM1_CCMR_ICxPSC)) |
(uint8_t)TIM1_IC3Prescaler);
}
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
294473.c | /* Software floating-point emulation.
Return (double)(*a)
Copyright (C) 1997-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson ([email protected]) and
Jakub Jelinek ([email protected]).
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include "soft-fp.h"
#include "double.h"
#include "quad.h"
double _Qp_qtod(const long double *a)
{
FP_DECL_EX;
FP_DECL_Q(A);
FP_DECL_D(R);
double r;
FP_INIT_ROUNDMODE;
FP_UNPACK_SEMIRAW_QP(A, a);
#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
FP_TRUNC(D,Q,2,4,R,A);
#else
FP_TRUNC(D,Q,1,2,R,A);
#endif
FP_PACK_SEMIRAW_D(r, R);
QP_HANDLE_EXCEPTIONS(__asm (
" ldd [%1], %%f52\n"
" ldd [%1+8], %%f54\n"
" fqtod %%f52, %0\n"
" " : "=&e" (r) : "r" (a) : QP_CLOBBER));
return r;
}
|
774968.c | /*
** kern.c for kos in /nfs/export/work/kos
**
** Made by jeremy cochoy
** Login <[email protected]>
**
** Started on Sun Dec 21 01:04:39 2008 jeremy cochoy
** Last update Sun Apr 19 21:47:40 2009 jeremy cochoy
*/
#include "klib.h"
#include "sysio.h"
int main(void);
void _start(void)
{
main();
}
int main(void)
{
tty_y = 2;
//Clear screen
clear();
//Load GDT
setColor(RED, FALSE, BLACK, 0);
print("Kernel : loading gdt\t\n");
gdt_reload();
/* Refresh Stack Ptr */
asm("movw $0x18, %ax \n \
movw %ax, %ss \n \
movl $0x1FFF0, %esp \n");
print("Kernel : gdt loaded\n");
//Load IDT
setColor(YELLOW, FALSE, BLACK, 0);
print("Kernel : loading idt\t\n");
//Kernel OK
setColor(PURPLE, FALSE, BLACK, 0);
print("\n\nKernel : kernel loaded\n");
while(42);
}
|
105518.c | /*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <[email protected]>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_prototype.h"
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
*
* Setup the function pointers and the NVM info structure. Should be called
* once per NVM initialization, e.g. inside the i40e_init_shared_code().
* Please notice that the NVM term is used here (& in all methods covered
* in this file) as an equivalent of the FLASH part mapped into the SR.
* We are accessing FLASH always thru the Shadow RAM.
**/
i40e_status i40e_init_nvm(struct i40e_hw *hw)
{
struct i40e_nvm_info *nvm = &hw->nvm;
i40e_status ret_code = 0;
u32 fla, gens;
u8 sr_size;
/* The SR size is stored regardless of the nvm programming mode
* as the blank mode may be used in the factory line.
*/
gens = rd32(hw, I40E_GLNVM_GENS);
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
/* Max NVM timeout */
nvm->timeout = I40E_MAX_NVM_TIMEOUT;
nvm->blank_nvm_mode = false;
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
}
return ret_code;
}
/**
* i40e_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure
* @access: NVM access type (read or write)
*
* This function will request NVM ownership for reading
* via the proper Admin Command.
**/
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access)
{
i40e_status ret_code = 0;
u64 gtime, timeout;
u64 time_left = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0, &time_left, NULL);
/* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
/* Store the timeout */
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
if (ret_code)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
access, time_left, ret_code, hw->aq.asq_last_status);
if (ret_code && time_left) {
/* Poll until the current NVM owner timeouts */
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
while ((gtime < timeout) && time_left) {
usleep_range(10000, 20000);
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
ret_code = i40e_aq_request_resource(hw,
I40E_NVM_RESOURCE_ID,
access, 0, &time_left,
NULL);
if (!ret_code) {
hw->nvm.hw_semaphore_timeout =
I40E_MS_TO_GTIME(time_left) + gtime;
break;
}
}
if (ret_code) {
hw->nvm.hw_semaphore_timeout = 0;
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
time_left, ret_code, hw->aq.asq_last_status);
}
}
i40e_i40e_acquire_nvm_exit:
return ret_code;
}
/**
* i40e_release_nvm - Generic request for releasing the NVM ownership
* @hw: pointer to the HW structure
*
* This function will release NVM resource via the proper Admin Command.
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
if (!hw->nvm.blank_nvm_mode)
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
}
/**
* i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
* @hw: pointer to the HW structure
*
* Polls the SRCTL Shadow RAM register done bit.
**/
static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
srctl = rd32(hw, I40E_GLNVM_SRCTL);
if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
ret_code = 0;
break;
}
udelay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
return ret_code;
}
/**
* i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
offset, hw->nvm.sr_size);
ret_code = I40E_ERR_PARAM;
goto read_nvm_exit;
}
/* Poll the done bit first */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
/* Write the address and start reading */
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
(1 << I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
*data = (u16)((sr_reg &
I40E_GLNVM_SRDATA_RDDATA_MASK)
>> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
}
}
if (ret_code)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
offset);
read_nvm_exit:
return ret_code;
}
/**
* i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code = 0;
u16 index, word;
/* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
if (ret_code)
break;
}
/* Update the number of words read from the Shadow RAM */
*words = word;
return ret_code;
}
/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, NULL);
return ret_code;
}
/**
* i40e_calc_nvm_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
* @checksum: pointer to the checksum
*
* This function calculates SW Checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
u16 *data;
u16 i = 0;
ret_code = i40e_allocate_virt_mem(hw, &vmem,
I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
if (ret_code)
goto i40e_calc_nvm_checksum_exit;
data = (u16 *)vmem.va;
/* read pointer to VPD area */
ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* Calculate SW checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules
*/
for (i = 0; i < hw->nvm.sr_size; i++) {
/* Read SR page */
if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
}
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
continue;
/* Skip VPD module (convert byte size to word count) */
if ((i >= (u32)vpd_module) &&
(i < ((u32)vpd_module +
(I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
continue;
}
/* Skip PCIe ALT module (convert byte size to word count) */
if ((i >= (u32)pcie_alt_module) &&
(i < ((u32)pcie_alt_module +
(I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
continue;
}
checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
}
*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
i40e_calc_nvm_checksum_exit:
i40e_free_virt_mem(hw, &vmem);
return ret_code;
}
/**
* i40e_update_nvm_checksum - Updates the NVM checksum
* @hw: pointer to hardware structure
*
* NVM ownership must be acquired before calling this function and released
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
**/
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u16 checksum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
if (!ret_code)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &checksum, true);
return ret_code;
}
/**
* i40e_validate_nvm_checksum - Validate EEPROM checksum
* @hw: pointer to hardware structure
* @checksum: calculated checksum
*
* Performs checksum calculation and validates the NVM SW checksum. If the
* caller does not need checksum, the value can be NULL.
**/
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
u16 checksum_sr = 0;
u16 checksum_local = 0;
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
if (ret_code)
goto i40e_validate_nvm_checksum_exit;
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (checksum_local != checksum_sr)
ret_code = I40E_ERR_NVM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum)
*checksum = checksum_local;
i40e_validate_nvm_checksum_exit:
return ret_code;
}
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno);
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno);
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
static inline u8 i40e_nvmupd_get_transaction(u32 val)
{
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
static char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
"I40E_NVMUPD_READ_SNT",
"I40E_NVMUPD_READ_LCB",
"I40E_NVMUPD_READ_SA",
"I40E_NVMUPD_WRITE_ERA",
"I40E_NVMUPD_WRITE_CON",
"I40E_NVMUPD_WRITE_SNT",
"I40E_NVMUPD_WRITE_LCB",
"I40E_NVMUPD_WRITE_SA",
"I40E_NVMUPD_CSUM_CON",
"I40E_NVMUPD_CSUM_SA",
"I40E_NVMUPD_CSUM_LCB",
};
/**
* i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* Dispatches command depending on what update state is current
**/
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
/* assume success */
*errno = 0;
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_STATE_READING:
status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_STATE_WRITING:
status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
break;
default:
/* invalid state, should never happen */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: no such state %d\n", hw->nvmupd_state);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_state_init - Handle NVM update state Init
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*errno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
i40e_release_nvm(hw);
}
break;
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*errno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
if (status)
i40e_release_nvm(hw);
else
hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
}
break;
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
if (status)
i40e_release_nvm(hw);
else
hw->aq.nvm_release_on_done = true;
}
break;
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (status)
i40e_release_nvm(hw);
else
hw->aq.nvm_release_on_done = true;
}
break;
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (status)
i40e_release_nvm(hw);
else
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
*errno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = true;
}
}
break;
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_ERR_NVM;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_state_reading - Handle NVM update state Reading
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
case I40E_NVMUPD_READ_CON:
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_READ_LCB:
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
i40e_release_nvm(hw);
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in reading state.\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_state_writing - Handle NVM update state Writing
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_WRITE_LCB:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (!status)
hw->aq.nvm_release_on_done = true;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
if (status) {
*errno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
if (status)
*errno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
else
hw->aq.nvm_release_on_done = true;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in writing state.\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
}
/* In some circumstances, a multi-write transaction takes longer
* than the default 3 minute timeout on the write semaphore. If
* the write failed with an EBUSY status, this is likely the problem,
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
!retry_attempt) {
i40e_status old_status = status;
u32 old_asq_status = hw->aq.asq_last_status;
u32 gtime;
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
if (gtime >= hw->nvm.hw_semaphore_timeout) {
i40e_debug(hw, I40E_DEBUG_ALL,
"NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
gtime, hw->nvm.hw_semaphore_timeout);
i40e_release_nvm(hw);
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
i40e_debug(hw, I40E_DEBUG_ALL,
"NVMUPD: write semaphore reacquire failed aq_err = %d\n",
hw->aq.asq_last_status);
status = old_status;
hw->aq.asq_last_status = old_asq_status;
} else {
retry_attempt = true;
goto retry;
}
}
}
return status;
}
/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @errno: pointer to return error code
*
* Return one of the valid command types or I40E_NVMUPD_INVALID
**/
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno)
{
enum i40e_nvmupd_cmd upd_cmd;
u8 transaction;
/* anything that doesn't match a recognized case is an error */
upd_cmd = I40E_NVMUPD_INVALID;
transaction = i40e_nvmupd_get_transaction(cmd->config);
/* limits on data size */
if ((cmd->data_size < 1) ||
(cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_validate_command data_size %d\n",
cmd->data_size);
*errno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
switch (cmd->command) {
case I40E_NVM_READ:
switch (transaction) {
case I40E_NVM_CON:
upd_cmd = I40E_NVMUPD_READ_CON;
break;
case I40E_NVM_SNT:
upd_cmd = I40E_NVMUPD_READ_SNT;
break;
case I40E_NVM_LCB:
upd_cmd = I40E_NVMUPD_READ_LCB;
break;
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_READ_SA;
break;
}
break;
case I40E_NVM_WRITE:
switch (transaction) {
case I40E_NVM_CON:
upd_cmd = I40E_NVMUPD_WRITE_CON;
break;
case I40E_NVM_SNT:
upd_cmd = I40E_NVMUPD_WRITE_SNT;
break;
case I40E_NVM_LCB:
upd_cmd = I40E_NVMUPD_WRITE_LCB;
break;
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_WRITE_SA;
break;
case I40E_NVM_ERA:
upd_cmd = I40E_NVMUPD_WRITE_ERA;
break;
case I40E_NVM_CSUM:
upd_cmd = I40E_NVMUPD_CSUM_CON;
break;
case (I40E_NVM_CSUM|I40E_NVM_SA):
upd_cmd = I40E_NVMUPD_CSUM_SA;
break;
case (I40E_NVM_CSUM|I40E_NVM_LCB):
upd_cmd = I40E_NVMUPD_CSUM_LCB;
break;
}
break;
}
i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
hw->aq.nvm_release_on_done);
if (upd_cmd == I40E_NVMUPD_INVALID) {
*errno = -EFAULT;
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_validate_command returns %d errno %d\n",
upd_cmd, *errno);
}
return upd_cmd;
}
/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
bytes, last, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
module, cmd->offset, cmd->data_size);
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_read status %d aq %d\n",
status, hw->aq.asq_last_status);
*errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
}
/**
* i40e_nvmupd_nvm_erase - Erase an NVM module
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @errno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno)
{
i40e_status status = 0;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
last, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
module, cmd->offset, cmd->data_size);
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_erase status %d aq %d\n",
status, hw->aq.asq_last_status);
*errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
}
/**
* i40e_nvmupd_nvm_write - Write NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status = 0;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
module, cmd->offset, cmd->data_size);
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write status %d aq %d\n",
status, hw->aq.asq_last_status);
*errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
}
|
34044.c | /*
* Status monitor for Canon CAPT Printer.
* Copyright CANON INC. 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "uimain.h"
#include "widgets.h"
#include "interface.h"
#include "devdlg.h"
#include "data_process.h"
#define SET_FLASH_DATA_SIZE 7
void UpdateDevDlgWidgets(UIStatusWnd *wnd);
UIDevDlg* CreateDevDlg(UIDialog *parent)
{
UIDevDlg *dialog;
dialog = (UIDevDlg *)CreateDialog(sizeof(UIDevDlg), parent);
UI_DIALOG(dialog)->window = create_DevS_dialog();
return dialog;
}
static int GetDevDlgLBP3600(UIStatusWnd *wnd)
{
char value = 0;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->printer_flag = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->num_cassette = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->size_tray = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->size_cas1 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->size_cas2 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->size_cas3 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->size_cas4 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->user_flag = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->mask_tray = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->mask_cas1 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->mask_cas2 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->mask_cas3 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->mask_cas4 = (int)value;
if(cnsktGetResData(wnd->pCnskt, &value, READ_TYPE_BYTE, -1) == 0)
wnd->dev_dlg->mask_dplx = (int)value;
return 0;
}
void ShowDevDlg(UIStatusWnd *wnd)
{
if(GetDevDlgLBP3600(wnd))
return;
SigDisable();
UpdateDevDlgWidgets(wnd);
SigEnable();
gtk_widget_show(UI_DIALOG(wnd->dev_dlg)->window);
}
void HideDevDlg(UIStatusWnd *wnd)
{
gtk_widget_hide(UI_DIALOG(wnd->dev_dlg)->window);
wnd->dev = FALSE;
}
void DevDlgOK(UIStatusWnd *wnd)
{
GtkWidget *window = UI_DIALOG(wnd->dev_dlg)->window;
int active = 0;
int user_flag = 0;
active = GetToggleButtonActive(window, "DevDlg_2Mode_checkbutton");
if(active)
user_flag |= USER_MODE_FLAGS_DUPLEX;
active = GetToggleButtonActive(window, "DevDlg_GraMode_checkbutton");
if(active)
user_flag |= USER_MODE_FLAGS_GRAPHIC;
active = GetToggleButtonActive(window, "DevDlg_FlkMode_checkbutton");
if(active)
user_flag |= USER_MODE_FLAGS_FLICKER;
wnd->dev_dlg->user_flag = user_flag;
wnd->dev_dlg->mask_tray
= GetSpinButtonValue(window, "DevDlg_MltT_spinbutton", 10);
wnd->dev_dlg->mask_cas1
= GetSpinButtonValue(window, "DevDlg_Cas1_spinbutton", 10);
wnd->dev_dlg->mask_cas2
= GetSpinButtonValue(window, "DevDlg_Cas2_spinbutton", 10);
wnd->dev_dlg->mask_cas3
= GetSpinButtonValue(window, "DevDlg_Cas3_spinbutton", 10);
wnd->dev_dlg->mask_cas4
= GetSpinButtonValue(window, "DevDlg_Cas4_spinbutton", 10);
wnd->dev_dlg->mask_dplx
= GetSpinButtonValue(window, "DevDlg_Dplx_spinbutton", 10);
if(cnsktSetReqLong(wnd->pCnskt, SET_FLASH_DATA_SIZE) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, user_flag) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, wnd->dev_dlg->mask_tray) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, wnd->dev_dlg->mask_cas1) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, wnd->dev_dlg->mask_cas2) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, wnd->dev_dlg->mask_cas3) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, wnd->dev_dlg->mask_cas4) < 0)
goto err;
if(cnsktSetReqByte(wnd->pCnskt, wnd->dev_dlg->mask_dplx) < 0)
goto err;
UpdateJob(CCPD_REQ_SET_FLASH);
err:
HideDevDlg(wnd);
return;
}
void DevDlgCancel(UIStatusWnd *wnd)
{
HideDevDlg(wnd);
}
const char *hbox_widget_table[] = {
"DevDlg_Cas1_hbox",
"DevDlg_Cas2_hbox",
"DevDlg_Cas3_hbox",
"DevDlg_Cas4_hbox",
NULL,
};
void UpdateDevDlgWidgets(UIStatusWnd *wnd)
{
GtkWidget *window = UI_DIALOG(wnd->dev_dlg)->window;
int dplx, cas, i = 0, sensi = 0;
int usr_flag;
int check;
float value;
cas = wnd->dev_dlg->num_cassette;
while(hbox_widget_table[i] != NULL){
sensi = (i < cas) ? TRUE : FALSE;
SetWidgetSensitive(window, (char *)hbox_widget_table[i], sensi);
i++;
}
value = (float)wnd->dev_dlg->mask_tray / 10.0;
SetSpinButtonFloat(window, "DevDlg_MltT_spinbutton", value);
value = (float)wnd->dev_dlg->mask_cas1 / 10.0;
SetSpinButtonFloat(window, "DevDlg_Cas1_spinbutton", value);
value = (float)wnd->dev_dlg->mask_cas2 / 10.0;
SetSpinButtonFloat(window, "DevDlg_Cas2_spinbutton", value);
value = (float)wnd->dev_dlg->mask_cas3 / 10.0;
SetSpinButtonFloat(window, "DevDlg_Cas3_spinbutton", value);
value = (float)wnd->dev_dlg->mask_cas4 / 10.0;
SetSpinButtonFloat(window, "DevDlg_Cas4_spinbutton", value);
value = (float)wnd->dev_dlg->mask_dplx / 10.0;
SetSpinButtonFloat(window, "DevDlg_Dplx_spinbutton", value);
dplx = wnd->dev_dlg->printer_flag;
sensi = (dplx & PRINTER_FLAGS_SB_DUPLEX) ? TRUE : FALSE;
SetWidgetSensitive(window, "DevDlg_Dplx_hbox", sensi);
usr_flag = wnd->dev_dlg->user_flag;
check = (usr_flag & USER_MODE_FLAGS_DUPLEX) ? TRUE : FALSE;
SetActiveCheckButton(window, "DevDlg_2Mode_checkbutton", check);
check = (usr_flag & USER_MODE_FLAGS_GRAPHIC) ? TRUE : FALSE;
SetActiveCheckButton(window, "DevDlg_GraMode_checkbutton", check);
check = (usr_flag & USER_MODE_FLAGS_FLICKER) ? TRUE : FALSE;
SetActiveCheckButton(window, "DevDlg_FlkMode_checkbutton", check);
}
|
543034.c | /******************************************************************************/
/* */
/* Copyright (c) International Business Machines Corp., 2006 */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See */
/* the GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/* */
/******************************************************************************/
/*
* File:
* ns-igmp_querier.c
*
* Description:
* This utiltity sends IGMP queries.
* (General Query, Multicast Address Specific Query
* or Multicast Address and Source Specific Query)
*
* Author:
* Mitsuru Chinen <[email protected]>
*
* History:
* Apr 24 2006 - Created (Mitsuru Chinen)
*---------------------------------------------------------------------------*/
/*
* Header Files
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <netdb.h>
#include <signal.h>
#include <time.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/igmp.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include "ns-mcast.h"
#include "ns-traffic.h"
/*
* Structure Definitions
*/
struct igmp_info {
uint32_t ifindex;
struct igmpv3_query *query;
double timeout;
struct timespec interval;
};
/*
* Gloval variables
*/
char *program_name; /* program name */
struct sigaction handler; /* Behavior for a signal */
int catch_sighup; /* When catch the SIGHUP, set to non-zero */
/*
* Function: usage()
*
* Descripton:
* Print the usage of this program. Then, terminate this program with
* the specified exit value.
*
* Argument:
* exit_value: exit value
*
* Return value:
* This function does not return.
*/
void usage(char *program_name, int exit_value)
{
FILE *stream = stdout; /* stream where the usage is output */
if (exit_value == EXIT_FAILURE)
stream = stderr;
fprintf(stream, "%s [OPTION]\n"
"\t-I ifname\tname of listening interface\n"
"\t-m addr\tmulticast address\n"
"\t-s addrs\tcomma separated array of Source Addresses\n"
"\t-r value\tMax Resp Code\n"
"\t-i value\tinterval [nanosec]\n"
"\t-t value\ttimeout [sec]\n"
"\t-o\t\tsend only one query\n"
"\t-b\t\twork in the background\n"
"\t-d\t\tdisplay debug informations\n"
"\t-h\t\tdisplay this usage\n", program_name);
exit(exit_value);
}
/*
* Function: set_signal_flag()
*
* Description:
* This function sets global variables accordig to signal
*
* Argument:
* type: type of signal
*
* Return value:
* None
*/
void set_signal_flag(int type)
{
if (debug)
fprintf(stderr, "Catch signal. type is %d\n", type);
switch (type) {
case SIGHUP:
catch_sighup = 1;
handler.sa_handler = SIG_IGN;
if (sigaction(type, &handler, NULL) < 0)
fatal_error("sigaction()");
break;
default:
fprintf(stderr, "Unexpected signal (%d) is caught\n", type);
exit(EXIT_FAILURE);
}
}
/*
* Function: create_query()
*
* Description:
* This function create a igmpv3 query information.
* This function allocates memory to store the information.
*
* Argument:
* code: Max Resp Code
* maddr: multicast address
* saddrs: comma separated array of the source addresses
*
* Return value:
* pointer to allocated igmpv3_query structure
*/
struct igmpv3_query *create_query(uint8_t code, char *maddr, char *saddrs)
{
struct igmpv3_query *query; /* pointer to igmpv3_query structure */
uint16_t numsrc; /* number of source address */
size_t query_size; /* size of igmpv3_query */
struct in_addr ip;
uint32_t idx;
char *sp, *ep;
/* calculate the number of source address */
if (saddrs == NULL) {
numsrc = 0;
} else {
numsrc = 1;
for (sp = saddrs; *sp != '\0'; sp++)
if (*sp == ',')
numsrc++;
}
if (debug)
fprintf(stderr, "number of source address is %u\n", numsrc);
/* allocate memory for igmpv3_query structure */
query_size = MY_IGMPV3_QUERY_SIZE(numsrc);
query = (struct igmpv3_query *)calloc(1, query_size);
if (query == NULL)
fatal_error("calloc()");
/* substitute paramaters */
query->type = IGMP_HOST_MEMBERSHIP_QUERY;
query->code = code;
query->csum = 0; /* Calculate later */
query->resv = 0;
query->suppress = 0;
query->qrv = 0;
query->qqic = 0;
query->nsrcs = htons(numsrc);
/* substitute multicast address */
if (maddr == NULL) {
query->group = htonl(INADDR_ANY);
} else {
if (inet_pton(AF_INET, maddr, &ip) <= 0) {
fprintf(stderr,
"multicast address is something wrong\n");
return NULL;
}
query->group = ip.s_addr;
}
/* substitute source addresses */
sp = saddrs;
for (idx = 0; idx < numsrc; idx++) {
ep = strchr(sp, ',');
if (ep != NULL)
*ep = '\0';
if (debug)
fprintf(stderr, "source address[%u]: %s\n", idx, sp);
if (inet_pton(AF_INET, sp, &ip) <= 0) {
fprintf(stderr,
"source address list is something wrong\n");
return NULL;
}
query->srcs[idx] = ip.s_addr;
sp = ep + 1;
}
/* Calculate checksum */
query->csum = calc_checksum((u_int16_t *) query, query_size);
return query;
}
/*
* Function: parse_options()
*
* Description:
* This function parse the options
*
* Argument:
* argc: the number of argument
* argv: arguments
* info_p: pointer to data of querier information
* bg_p: pointer to the flag of working in backgrond
*
* Return value:
* None
*/
void parse_options(int argc, char *argv[], struct igmp_info *info_p, int *bg_p)
{
int optc; /* option */
unsigned long opt_ul; /* option value in unsigned long */
double opt_d; /* option value in double */
uint8_t max_resp; /* Max Resp Code */
char *maddr; /* multicast address */
char *saddrs; /* comma separated array of source addresses */
max_resp = IGMP_MAX_HOST_REPORT_DELAY;
maddr = NULL;
saddrs = NULL;
while ((optc = getopt(argc, argv, "I:m:s:r:t:i:obdh")) != EOF) {
switch (optc) {
case 'I':
info_p->ifindex = if_nametoindex(optarg);
if (info_p->ifindex == 0) {
fprintf(stderr,
"specified interface is incorrect\n");
usage(program_name, EXIT_FAILURE);
}
break;
case 'm':
maddr = strdup(optarg);
if (maddr == NULL)
fatal_error("strdup()");
break;
case 's':
saddrs = strdup(optarg);
if (saddrs == NULL)
fatal_error("strdup()");
break;
case 'r':
opt_ul = strtoul(optarg, NULL, 0);
if (opt_ul > 255) {
fprintf(stderr,
"Max Resp Code should be less then 256\n");
usage(program_name, EXIT_FAILURE);
}
max_resp = opt_ul;
break;
case 't':
opt_d = strtod(optarg, NULL);
if (opt_d < 0.0) {
fprintf(stderr,
"Timeout should be positive value\n");
usage(program_name, EXIT_FAILURE);
}
info_p->timeout = opt_d;
break;
case 'i':
if (strtotimespec(optarg, &info_p->interval)) {
fprintf(stderr,
"Interval is something wrong\n");
usage(program_name, EXIT_FAILURE);
}
break;
case 'o':
info_p->timeout = -1.0;
break;
case 'b':
*bg_p = 1;
break;
case 'd':
debug = 1;
break;
case 'h':
usage(program_name, EXIT_SUCCESS);
break;
default:
usage(program_name, EXIT_FAILURE);
}
}
if (info_p->ifindex == 0) {
fprintf(stderr, "specified interface seems incorrect\n");
usage(program_name, EXIT_FAILURE);
}
if ((info_p->query = create_query(max_resp, maddr, saddrs)) == NULL)
usage(program_name, EXIT_FAILURE);
free(maddr);
free(saddrs);
}
/*
* Function: create_socket()
*
* Description:
* This function creates a socket to send
*
* Argument:
* info_p: pointer to data of igmp query information
*
* Return value:
* file descriptor referencing the socket
*/
int create_socket(struct igmp_info *info_p)
{
int sd; /* socket file descriptor */
int on;
unsigned char opt[4] = { 0x94, 0x04, 0x00, 0x00 }; /* Router Alert */
struct ip_mreqn mcast_req, *req_p = &mcast_req;
/* Create a socket */
sd = socket(AF_INET, SOCK_RAW, IPPROTO_IGMP);
if (sd < 0)
fatal_error("socket()");
/* Enable to reuse the socket */
on = 1;
if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(int)))
fatal_error("setsockopt(): enable to reuse the socket");
/* Add router alert option */
if (setsockopt(sd, IPPROTO_IP, IP_OPTIONS, opt, sizeof(opt)))
fatal_error("setsockopt(): socket options");
/* Specify the interface for outgoing datagrams */
req_p->imr_multiaddr.s_addr = info_p->query->group;
req_p->imr_address.s_addr = htonl(INADDR_ANY);
req_p->imr_ifindex = info_p->ifindex;
if (setsockopt(sd, IPPROTO_IP, IP_MULTICAST_IF,
req_p, sizeof(struct ip_mreqn))) {
fatal_error("setsockopt(): specify the interface");
}
return sd;
}
/*
* Function: send_query()
*
* Description:
* This function sends IGMP query
*
* Argument:
* info_p: pointer to data of igmp query information
*
* Return value:
* None
*/
void send_query(struct igmp_info *info_p)
{
int sd;
int retval;
double start_time;
struct sockaddr_in to;
size_t query_size;
/* Set singal hander for SIGHUP */
handler.sa_handler = set_signal_flag;
handler.sa_flags = 0;
if (sigfillset(&handler.sa_mask) < 0)
fatal_error("sigfillset()");
if (sigaction(SIGHUP, &handler, NULL) < 0)
fatal_error("sigaction()");
/* Specify multicast address to send */
to.sin_family = AF_INET;
to.sin_port = IPPROTO_IGMP;
if (info_p->query->group == htonl(INADDR_ANY))
to.sin_addr.s_addr = IGMP_ALL_HOSTS;
else
to.sin_addr.s_addr = info_p->query->group;
/* Create a socket */
sd = create_socket(info_p);
/* loop for sending queries */
start_time = time(NULL);
query_size = MY_IGMPV3_QUERY_SIZE(ntohs(info_p->query->nsrcs));
if (debug)
fprintf(stderr, "query size is %zu\n", query_size);
for (;;) {
retval = sendto(sd, info_p->query, query_size, 0,
(struct sockaddr *)&to,
sizeof(struct sockaddr_in));
if (retval != query_size) {
if (errno == ENOBUFS) {
sleep(1);
continue;
}
if (catch_sighup)
break;
else
fatal_error("sendto()");
}
/* Check timeout:
If timeout value is negative only send one datagram */
if (info_p->timeout)
if (info_p->timeout < difftime(time(NULL), start_time))
break;
/* Wait in specified interval */
nanosleep(&info_p->interval, NULL);
/* catch SIGHUP */
if (catch_sighup)
break;
}
close(sd);
}
/*
*
* Function: main()
*
*/
int main(int argc, char *argv[])
{
struct igmp_info mcast_rcv;
int background = 0;
debug = 0;
program_name = strdup(argv[0]);
memset(&mcast_rcv, '\0', sizeof(struct igmp_info));
parse_options(argc, argv, &mcast_rcv, &background);
if (background) /* Work in the background */
if (daemon(0, 0) < 0)
fatal_error("daemon()");
send_query(&mcast_rcv);
exit(EXIT_SUCCESS);
}
|
564772.c | #include<stdio.h>
int func(int n)
{
if(n>100)
return n-10;
else
return func(func(n+11));
}
int main()
{
int x = func(999);
printf("%d\n",x);
}
|
13702.c | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* Dynamic Virtual Channel
*
* Copyright 2010-2011 Vic Lee
* Copyright 2015 Thincast Technologies GmbH
* Copyright 2015 DI (FH) Martin Haimberger <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <winpr/crt.h>
#include <winpr/stream.h>
#include "drdynvc_main.h"
#define TAG CHANNELS_TAG("drdynvc.client")
static void dvcman_free(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr);
static void dvcman_channel_free(void* channel);
static UINT drdynvc_write_data(drdynvcPlugin* drdynvc, UINT32 ChannelId, const BYTE* data,
UINT32 dataSize);
static UINT drdynvc_send(drdynvcPlugin* drdynvc, wStream* s);
static void dvcman_wtslistener_free(DVCMAN_LISTENER* listener)
{
if (listener)
free(listener->channel_name);
free(listener);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_get_configuration(IWTSListener* pListener, void** ppPropertyBag)
{
WINPR_UNUSED(pListener);
*ppPropertyBag = NULL;
return ERROR_INTERNAL_ERROR;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_create_listener(IWTSVirtualChannelManager* pChannelMgr,
const char* pszChannelName, ULONG ulFlags,
IWTSListenerCallback* pListenerCallback,
IWTSListener** ppListener)
{
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
DVCMAN_LISTENER* listener;
WLog_DBG(TAG, "create_listener: %d.%s.", ArrayList_Count(dvcman->listeners) + 1,
pszChannelName);
listener = (DVCMAN_LISTENER*)calloc(1, sizeof(DVCMAN_LISTENER));
if (!listener)
{
WLog_ERR(TAG, "calloc failed!");
return CHANNEL_RC_NO_MEMORY;
}
listener->iface.GetConfiguration = dvcman_get_configuration;
listener->iface.pInterface = NULL;
listener->dvcman = dvcman;
listener->channel_name = _strdup(pszChannelName);
if (!listener->channel_name)
{
WLog_ERR(TAG, "_strdup failed!");
dvcman_wtslistener_free(listener);
return CHANNEL_RC_NO_MEMORY;
}
listener->flags = ulFlags;
listener->listener_callback = pListenerCallback;
if (ppListener)
*ppListener = (IWTSListener*)listener;
if (ArrayList_Add(dvcman->listeners, listener) < 0)
return ERROR_INTERNAL_ERROR;
return CHANNEL_RC_OK;
}
static UINT dvcman_destroy_listener(IWTSVirtualChannelManager* pChannelMgr, IWTSListener* pListener)
{
DVCMAN_LISTENER* listener = (DVCMAN_LISTENER*)pListener;
WINPR_UNUSED(pChannelMgr);
if (listener)
{
DVCMAN* dvcman = listener->dvcman;
if (dvcman)
ArrayList_Remove(dvcman->listeners, listener);
}
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_register_plugin(IDRDYNVC_ENTRY_POINTS* pEntryPoints, const char* name,
IWTSPlugin* pPlugin)
{
DVCMAN* dvcman = ((DVCMAN_ENTRY_POINTS*)pEntryPoints)->dvcman;
if (ArrayList_Add(dvcman->plugin_names, _strdup(name)) < 0)
return ERROR_INTERNAL_ERROR;
if (ArrayList_Add(dvcman->plugins, pPlugin) < 0)
return ERROR_INTERNAL_ERROR;
WLog_DBG(TAG, "register_plugin: num_plugins %d", ArrayList_Count(dvcman->plugins));
return CHANNEL_RC_OK;
}
static IWTSPlugin* dvcman_get_plugin(IDRDYNVC_ENTRY_POINTS* pEntryPoints, const char* name)
{
IWTSPlugin* plugin = NULL;
size_t i, nc, pc;
DVCMAN* dvcman = ((DVCMAN_ENTRY_POINTS*)pEntryPoints)->dvcman;
if (!dvcman || !pEntryPoints || !name)
return NULL;
nc = ArrayList_Count(dvcman->plugin_names);
pc = ArrayList_Count(dvcman->plugins);
if (nc != pc)
return NULL;
ArrayList_Lock(dvcman->plugin_names);
ArrayList_Lock(dvcman->plugins);
for (i = 0; i < pc; i++)
{
const char* cur = ArrayList_GetItem(dvcman->plugin_names, i);
if (strcmp(cur, name) == 0)
{
plugin = ArrayList_GetItem(dvcman->plugins, i);
break;
}
}
ArrayList_Unlock(dvcman->plugin_names);
ArrayList_Unlock(dvcman->plugins);
return plugin;
}
static ADDIN_ARGV* dvcman_get_plugin_data(IDRDYNVC_ENTRY_POINTS* pEntryPoints)
{
return ((DVCMAN_ENTRY_POINTS*)pEntryPoints)->args;
}
static void* dvcman_get_rdp_settings(IDRDYNVC_ENTRY_POINTS* pEntryPoints)
{
return (void*)((DVCMAN_ENTRY_POINTS*)pEntryPoints)->settings;
}
static UINT32 dvcman_get_channel_id(IWTSVirtualChannel* channel)
{
DVCMAN_CHANNEL* dvc = (DVCMAN_CHANNEL*)channel;
return dvc->channel_id;
}
static const char* dvcman_get_channel_name(IWTSVirtualChannel* channel)
{
DVCMAN_CHANNEL* dvc = (DVCMAN_CHANNEL*)channel;
return dvc->channel_name;
}
static IWTSVirtualChannel* dvcman_find_channel_by_id(IWTSVirtualChannelManager* pChannelMgr,
UINT32 ChannelId)
{
int index;
IWTSVirtualChannel* channel = NULL;
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
ArrayList_Lock(dvcman->channels);
for (index = 0; index < ArrayList_Count(dvcman->channels); index++)
{
DVCMAN_CHANNEL* cur = (DVCMAN_CHANNEL*)ArrayList_GetItem(dvcman->channels, index);
if (cur->channel_id == ChannelId)
{
channel = &cur->iface;
break;
}
}
ArrayList_Unlock(dvcman->channels);
return channel;
}
static void dvcman_plugin_terminate(void* plugin)
{
IWTSPlugin* pPlugin = plugin;
UINT error = IFCALLRESULT(CHANNEL_RC_OK, pPlugin->Terminated, pPlugin);
if (error != CHANNEL_RC_OK)
WLog_ERR(TAG, "Terminated failed with error %" PRIu32 "!", error);
}
static void wts_listener_free(void* arg)
{
DVCMAN_LISTENER* listener = (DVCMAN_LISTENER*)arg;
dvcman_wtslistener_free(listener);
}
static IWTSVirtualChannelManager* dvcman_new(drdynvcPlugin* plugin)
{
wObject* obj;
DVCMAN* dvcman;
dvcman = (DVCMAN*)calloc(1, sizeof(DVCMAN));
if (!dvcman)
return NULL;
dvcman->iface.CreateListener = dvcman_create_listener;
dvcman->iface.DestroyListener = dvcman_destroy_listener;
dvcman->iface.FindChannelById = dvcman_find_channel_by_id;
dvcman->iface.GetChannelId = dvcman_get_channel_id;
dvcman->iface.GetChannelName = dvcman_get_channel_name;
dvcman->drdynvc = plugin;
dvcman->channels = ArrayList_New(TRUE);
if (!dvcman->channels)
goto fail;
obj = ArrayList_Object(dvcman->channels);
obj->fnObjectFree = dvcman_channel_free;
dvcman->pool = StreamPool_New(TRUE, 10);
if (!dvcman->pool)
goto fail;
dvcman->listeners = ArrayList_New(TRUE);
if (!dvcman->listeners)
goto fail;
obj = ArrayList_Object(dvcman->listeners);
obj->fnObjectFree = wts_listener_free;
dvcman->plugin_names = ArrayList_New(TRUE);
if (!dvcman->plugin_names)
goto fail;
obj = ArrayList_Object(dvcman->plugin_names);
obj->fnObjectFree = free;
dvcman->plugins = ArrayList_New(TRUE);
if (!dvcman->plugins)
goto fail;
obj = ArrayList_Object(dvcman->plugins);
obj->fnObjectFree = dvcman_plugin_terminate;
return &dvcman->iface;
fail:
dvcman_free(plugin, &dvcman->iface);
return NULL;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_load_addin(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr,
ADDIN_ARGV* args, rdpSettings* settings)
{
DVCMAN_ENTRY_POINTS entryPoints;
PDVC_PLUGIN_ENTRY pDVCPluginEntry = NULL;
WLog_Print(drdynvc->log, WLOG_INFO, "Loading Dynamic Virtual Channel %s", args->argv[0]);
pDVCPluginEntry = (PDVC_PLUGIN_ENTRY)freerdp_load_channel_addin_entry(
args->argv[0], NULL, NULL, FREERDP_ADDIN_CHANNEL_DYNAMIC);
if (pDVCPluginEntry)
{
entryPoints.iface.RegisterPlugin = dvcman_register_plugin;
entryPoints.iface.GetPlugin = dvcman_get_plugin;
entryPoints.iface.GetPluginData = dvcman_get_plugin_data;
entryPoints.iface.GetRdpSettings = dvcman_get_rdp_settings;
entryPoints.dvcman = (DVCMAN*)pChannelMgr;
entryPoints.args = args;
entryPoints.settings = settings;
return pDVCPluginEntry(&entryPoints.iface);
}
return ERROR_INVALID_FUNCTION;
}
static DVCMAN_CHANNEL* dvcman_channel_new(drdynvcPlugin* drdynvc,
IWTSVirtualChannelManager* pChannelMgr, UINT32 ChannelId,
const char* ChannelName)
{
DVCMAN_CHANNEL* channel;
if (dvcman_find_channel_by_id(pChannelMgr, ChannelId))
{
WLog_Print(drdynvc->log, WLOG_ERROR,
"Protocol error: Duplicated ChannelId %" PRIu32 " (%s)!", ChannelId,
ChannelName);
return NULL;
}
channel = (DVCMAN_CHANNEL*)calloc(1, sizeof(DVCMAN_CHANNEL));
if (!channel)
goto fail;
channel->dvcman = (DVCMAN*)pChannelMgr;
channel->channel_id = ChannelId;
channel->channel_name = _strdup(ChannelName);
if (!channel->channel_name)
goto fail;
if (!InitializeCriticalSectionEx(&(channel->lock), 0, 0))
goto fail;
return channel;
fail:
dvcman_channel_free(channel);
return NULL;
}
static void dvcman_channel_free(void* arg)
{
DVCMAN_CHANNEL* channel = (DVCMAN_CHANNEL*)arg;
UINT error = CHANNEL_RC_OK;
if (channel)
{
if (channel->channel_callback)
{
IFCALL(channel->channel_callback->OnClose, channel->channel_callback);
channel->channel_callback = NULL;
}
if (channel->status == CHANNEL_RC_OK)
{
IWTSVirtualChannel* ichannel = (IWTSVirtualChannel*)channel;
if (channel->dvcman && channel->dvcman->drdynvc)
{
DrdynvcClientContext* context = channel->dvcman->drdynvc->context;
if (context)
{
IFCALLRET(context->OnChannelDisconnected, error, context, channel->channel_name,
channel->pInterface);
}
}
error = IFCALLRESULT(CHANNEL_RC_OK, ichannel->Close, ichannel);
if (error != CHANNEL_RC_OK)
WLog_ERR(TAG, "Close failed with error %" PRIu32 "!", error);
}
if (channel->dvc_data)
Stream_Release(channel->dvc_data);
DeleteCriticalSection(&(channel->lock));
free(channel->channel_name);
}
free(channel);
}
static void dvcman_clear(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr)
{
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
WINPR_UNUSED(drdynvc);
ArrayList_Clear(dvcman->plugins);
ArrayList_Clear(dvcman->channels);
ArrayList_Clear(dvcman->plugin_names);
ArrayList_Clear(dvcman->listeners);
}
static void dvcman_free(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr)
{
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
WINPR_UNUSED(drdynvc);
ArrayList_Free(dvcman->plugins);
ArrayList_Free(dvcman->channels);
ArrayList_Free(dvcman->plugin_names);
ArrayList_Free(dvcman->listeners);
StreamPool_Free(dvcman->pool);
free(dvcman);
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_init(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr)
{
size_t i;
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
UINT error = CHANNEL_RC_OK;
ArrayList_Lock(dvcman->plugins);
for (i = 0; i < ArrayList_Count(dvcman->plugins); i++)
{
IWTSPlugin* pPlugin = ArrayList_GetItem(dvcman->plugins, i);
error = IFCALLRESULT(CHANNEL_RC_OK, pPlugin->Initialize, pPlugin, pChannelMgr);
if (error != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Initialize failed with error %" PRIu32 "!",
error);
goto fail;
}
}
fail:
ArrayList_Unlock(dvcman->plugins);
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_write_channel(IWTSVirtualChannel* pChannel, ULONG cbSize, const BYTE* pBuffer,
void* pReserved)
{
UINT status;
DVCMAN_CHANNEL* channel = (DVCMAN_CHANNEL*)pChannel;
WINPR_UNUSED(pReserved);
if (!channel || !channel->dvcman)
return CHANNEL_RC_BAD_CHANNEL;
EnterCriticalSection(&(channel->lock));
status = drdynvc_write_data(channel->dvcman->drdynvc, channel->channel_id, pBuffer, cbSize);
LeaveCriticalSection(&(channel->lock));
return status;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_close_channel_iface(IWTSVirtualChannel* pChannel)
{
DVCMAN_CHANNEL* channel = (DVCMAN_CHANNEL*)pChannel;
if (!channel)
return CHANNEL_RC_BAD_CHANNEL;
WLog_DBG(TAG, "close_channel_iface: id=%" PRIu32 "", channel->channel_id);
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_create_channel(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr,
UINT32 ChannelId, const char* ChannelName)
{
size_t i;
BOOL bAccept;
DVCMAN_CHANNEL* channel;
DrdynvcClientContext* context;
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
UINT error;
if (!(channel = dvcman_channel_new(drdynvc, pChannelMgr, ChannelId, ChannelName)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "dvcman_channel_new failed!");
return CHANNEL_RC_NO_MEMORY;
}
channel->status = ERROR_NOT_CONNECTED;
if (ArrayList_Add(dvcman->channels, channel) < 0)
return ERROR_INTERNAL_ERROR;
ArrayList_Lock(dvcman->listeners);
for (i = 0; i < ArrayList_Count(dvcman->listeners); i++)
{
DVCMAN_LISTENER* listener = (DVCMAN_LISTENER*)ArrayList_GetItem(dvcman->listeners, i);
if (strcmp(listener->channel_name, ChannelName) == 0)
{
IWTSVirtualChannelCallback* pCallback = NULL;
channel->iface.Write = dvcman_write_channel;
channel->iface.Close = dvcman_close_channel_iface;
bAccept = TRUE;
if ((error = listener->listener_callback->OnNewChannelConnection(
listener->listener_callback, &channel->iface, NULL, &bAccept, &pCallback)) ==
CHANNEL_RC_OK &&
bAccept)
{
WLog_Print(drdynvc->log, WLOG_DEBUG, "listener %s created new channel %" PRIu32 "",
listener->channel_name, channel->channel_id);
channel->status = CHANNEL_RC_OK;
channel->channel_callback = pCallback;
channel->pInterface = listener->iface.pInterface;
context = dvcman->drdynvc->context;
IFCALLRET(context->OnChannelConnected, error, context, ChannelName,
listener->iface.pInterface);
if (error)
WLog_Print(drdynvc->log, WLOG_ERROR,
"context.OnChannelConnected failed with error %" PRIu32 "", error);
goto fail;
}
else
{
if (error)
{
WLog_Print(drdynvc->log, WLOG_ERROR,
"OnNewChannelConnection failed with error %" PRIu32 "!", error);
goto fail;
}
else
{
WLog_Print(drdynvc->log, WLOG_ERROR,
"OnNewChannelConnection returned with bAccept FALSE!");
error = ERROR_INTERNAL_ERROR;
goto fail;
}
}
}
}
error = ERROR_INTERNAL_ERROR;
fail:
ArrayList_Unlock(dvcman->listeners);
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_open_channel(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr,
UINT32 ChannelId)
{
DVCMAN_CHANNEL* channel;
IWTSVirtualChannelCallback* pCallback;
UINT error;
channel = (DVCMAN_CHANNEL*)dvcman_find_channel_by_id(pChannelMgr, ChannelId);
if (!channel)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "ChannelId %" PRIu32 " not found!", ChannelId);
return ERROR_INTERNAL_ERROR;
}
if (channel->status == CHANNEL_RC_OK)
{
pCallback = channel->channel_callback;
if ((pCallback->OnOpen) && (error = pCallback->OnOpen(pCallback)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "OnOpen failed with error %" PRIu32 "!", error);
return error;
}
WLog_Print(drdynvc->log, WLOG_DEBUG, "open_channel: ChannelId %" PRIu32 "", ChannelId);
}
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_close_channel(IWTSVirtualChannelManager* pChannelMgr, UINT32 ChannelId,
BOOL bSendClosePDU)
{
DVCMAN_CHANNEL* channel;
UINT error = CHANNEL_RC_OK;
DVCMAN* dvcman = (DVCMAN*)pChannelMgr;
drdynvcPlugin* drdynvc = dvcman->drdynvc;
channel = (DVCMAN_CHANNEL*)dvcman_find_channel_by_id(pChannelMgr, ChannelId);
if (!channel)
{
// WLog_Print(drdynvc->log, WLOG_ERROR, "ChannelId %"PRIu32" not found!", ChannelId);
/**
* Windows 8 / Windows Server 2012 send close requests for channels that failed to be
* created. Do not warn, simply return success here.
*/
return CHANNEL_RC_OK;
}
if (drdynvc && bSendClosePDU)
{
wStream* s = StreamPool_Take(dvcman->pool, 5);
if (!s)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "StreamPool_Take failed!");
error = CHANNEL_RC_NO_MEMORY;
}
else
{
Stream_Write_UINT8(s, (CLOSE_REQUEST_PDU << 4) | 0x02);
Stream_Write_UINT32(s, ChannelId);
error = drdynvc_send(drdynvc, s);
}
}
ArrayList_Remove(dvcman->channels, channel);
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_receive_channel_data_first(drdynvcPlugin* drdynvc,
IWTSVirtualChannelManager* pChannelMgr,
UINT32 ChannelId, UINT32 length)
{
DVCMAN_CHANNEL* channel;
channel = (DVCMAN_CHANNEL*)dvcman_find_channel_by_id(pChannelMgr, ChannelId);
if (!channel)
{
/**
* Windows Server 2012 R2 can send some messages over
* Microsoft::Windows::RDS::Geometry::v08.01 even if the dynamic virtual channel wasn't
* registered on our side. Ignoring it works.
*/
WLog_Print(drdynvc->log, WLOG_ERROR, "ChannelId %" PRIu32 " not found!", ChannelId);
return CHANNEL_RC_OK;
}
if (channel->dvc_data)
Stream_Release(channel->dvc_data);
channel->dvc_data = StreamPool_Take(channel->dvcman->pool, length);
if (!channel->dvc_data)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "StreamPool_Take failed!");
return CHANNEL_RC_NO_MEMORY;
}
channel->dvc_data_length = length;
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT dvcman_receive_channel_data(drdynvcPlugin* drdynvc,
IWTSVirtualChannelManager* pChannelMgr, UINT32 ChannelId,
wStream* data)
{
UINT status = CHANNEL_RC_OK;
DVCMAN_CHANNEL* channel;
size_t dataSize = Stream_GetRemainingLength(data);
channel = (DVCMAN_CHANNEL*)dvcman_find_channel_by_id(pChannelMgr, ChannelId);
if (!channel)
{
/* Windows 8.1 tries to open channels not created.
* Ignore cases like this. */
WLog_Print(drdynvc->log, WLOG_ERROR, "ChannelId %" PRIu32 " not found!", ChannelId);
return CHANNEL_RC_OK;
}
if (channel->dvc_data)
{
/* Fragmented data */
if (Stream_GetPosition(channel->dvc_data) + dataSize > channel->dvc_data_length)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "data exceeding declared length!");
Stream_Release(channel->dvc_data);
channel->dvc_data = NULL;
return ERROR_INVALID_DATA;
}
Stream_Copy(data, channel->dvc_data, dataSize);
if (Stream_GetPosition(channel->dvc_data) >= channel->dvc_data_length)
{
Stream_SealLength(channel->dvc_data);
Stream_SetPosition(channel->dvc_data, 0);
status = channel->channel_callback->OnDataReceived(channel->channel_callback,
channel->dvc_data);
Stream_Release(channel->dvc_data);
channel->dvc_data = NULL;
}
}
else
{
status = channel->channel_callback->OnDataReceived(channel->channel_callback, data);
}
return status;
}
static UINT8 drdynvc_write_variable_uint(wStream* s, UINT32 val)
{
UINT8 cb;
if (val <= 0xFF)
{
cb = 0;
Stream_Write_UINT8(s, (UINT8)val);
}
else if (val <= 0xFFFF)
{
cb = 1;
Stream_Write_UINT16(s, (UINT16)val);
}
else
{
cb = 2;
Stream_Write_UINT32(s, val);
}
return cb;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_send(drdynvcPlugin* drdynvc, wStream* s)
{
UINT status;
if (!drdynvc)
status = CHANNEL_RC_BAD_CHANNEL_HANDLE;
else
{
status = drdynvc->channelEntryPoints.pVirtualChannelWriteEx(
drdynvc->InitHandle, drdynvc->OpenHandle, Stream_Buffer(s),
(UINT32)Stream_GetPosition(s), s);
}
switch (status)
{
case CHANNEL_RC_OK:
return CHANNEL_RC_OK;
case CHANNEL_RC_NOT_CONNECTED:
Stream_Release(s);
return CHANNEL_RC_OK;
case CHANNEL_RC_BAD_CHANNEL_HANDLE:
Stream_Release(s);
WLog_ERR(TAG, "VirtualChannelWriteEx failed with CHANNEL_RC_BAD_CHANNEL_HANDLE");
return status;
default:
Stream_Release(s);
WLog_Print(drdynvc->log, WLOG_ERROR,
"VirtualChannelWriteEx failed with %s [%08" PRIX32 "]",
WTSErrorToString(status), status);
return status;
}
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_write_data(drdynvcPlugin* drdynvc, UINT32 ChannelId, const BYTE* data,
UINT32 dataSize)
{
wStream* data_out;
size_t pos;
UINT8 cbChId;
UINT8 cbLen;
unsigned long chunkLength;
UINT status = CHANNEL_RC_BAD_INIT_HANDLE;
DVCMAN* dvcman;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
dvcman = (DVCMAN*)drdynvc->channel_mgr;
WLog_Print(drdynvc->log, WLOG_DEBUG, "write_data: ChannelId=%" PRIu32 " size=%" PRIu32 "",
ChannelId, dataSize);
data_out = StreamPool_Take(dvcman->pool, CHANNEL_CHUNK_LENGTH);
if (!data_out)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "StreamPool_Take failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_SetPosition(data_out, 1);
cbChId = drdynvc_write_variable_uint(data_out, ChannelId);
pos = Stream_GetPosition(data_out);
if (dataSize == 0)
{
dvcman_close_channel(drdynvc->channel_mgr, ChannelId, TRUE);
Stream_Release(data_out);
}
else if (dataSize <= CHANNEL_CHUNK_LENGTH - pos)
{
Stream_SetPosition(data_out, 0);
Stream_Write_UINT8(data_out, (DATA_PDU << 4) | cbChId);
Stream_SetPosition(data_out, pos);
Stream_Write(data_out, data, dataSize);
status = drdynvc_send(drdynvc, data_out);
}
else
{
/* Fragment the data */
cbLen = drdynvc_write_variable_uint(data_out, dataSize);
pos = Stream_GetPosition(data_out);
Stream_SetPosition(data_out, 0);
Stream_Write_UINT8(data_out, (DATA_FIRST_PDU << 4) | cbChId | (cbLen << 2));
Stream_SetPosition(data_out, pos);
chunkLength = CHANNEL_CHUNK_LENGTH - pos;
Stream_Write(data_out, data, chunkLength);
data += chunkLength;
dataSize -= chunkLength;
status = drdynvc_send(drdynvc, data_out);
while (status == CHANNEL_RC_OK && dataSize > 0)
{
data_out = StreamPool_Take(dvcman->pool, CHANNEL_CHUNK_LENGTH);
if (!data_out)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "StreamPool_Take failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_SetPosition(data_out, 1);
cbChId = drdynvc_write_variable_uint(data_out, ChannelId);
pos = Stream_GetPosition(data_out);
Stream_SetPosition(data_out, 0);
Stream_Write_UINT8(data_out, (DATA_PDU << 4) | cbChId);
Stream_SetPosition(data_out, pos);
chunkLength = dataSize;
if (chunkLength > CHANNEL_CHUNK_LENGTH - pos)
chunkLength = CHANNEL_CHUNK_LENGTH - pos;
Stream_Write(data_out, data, chunkLength);
data += chunkLength;
dataSize -= chunkLength;
status = drdynvc_send(drdynvc, data_out);
}
}
if (status != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "VirtualChannelWriteEx failed with %s [%08" PRIX32 "]",
WTSErrorToString(status), status);
return status;
}
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_send_capability_response(drdynvcPlugin* drdynvc)
{
UINT status;
wStream* s;
DVCMAN* dvcman;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
dvcman = (DVCMAN*)drdynvc->channel_mgr;
WLog_Print(drdynvc->log, WLOG_TRACE, "capability_response");
s = StreamPool_Take(dvcman->pool, 4);
if (!s)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_Ndrdynvc_write_variable_uintew failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT16(s, 0x0050); /* Cmd+Sp+cbChId+Pad. Note: MSTSC sends 0x005c */
Stream_Write_UINT16(s, drdynvc->version);
status = drdynvc_send(drdynvc, s);
if (status != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "VirtualChannelWriteEx failed with %s [%08" PRIX32 "]",
WTSErrorToString(status), status);
}
return status;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_process_capability_request(drdynvcPlugin* drdynvc, int Sp, int cbChId,
wStream* s)
{
UINT status;
if (!drdynvc)
return CHANNEL_RC_BAD_INIT_HANDLE;
if (Stream_GetRemainingLength(s) < 3)
return ERROR_INVALID_DATA;
WLog_Print(drdynvc->log, WLOG_TRACE, "capability_request Sp=%d cbChId=%d", Sp, cbChId);
Stream_Seek(s, 1); /* pad */
Stream_Read_UINT16(s, drdynvc->version);
/* RDP8 servers offer version 3, though Microsoft forgot to document it
* in their early documents. It behaves the same as version 2.
*/
if ((drdynvc->version == 2) || (drdynvc->version == 3))
{
if (Stream_GetRemainingLength(s) < 8)
return ERROR_INVALID_DATA;
Stream_Read_UINT16(s, drdynvc->PriorityCharge0);
Stream_Read_UINT16(s, drdynvc->PriorityCharge1);
Stream_Read_UINT16(s, drdynvc->PriorityCharge2);
Stream_Read_UINT16(s, drdynvc->PriorityCharge3);
}
status = drdynvc_send_capability_response(drdynvc);
drdynvc->state = DRDYNVC_STATE_READY;
return status;
}
static UINT32 drdynvc_cblen_to_bytes(int cbLen)
{
switch (cbLen)
{
case 0:
return 1;
case 1:
return 2;
default:
return 4;
}
}
static UINT32 drdynvc_read_variable_uint(wStream* s, int cbLen)
{
UINT32 val;
switch (cbLen)
{
case 0:
Stream_Read_UINT8(s, val);
break;
case 1:
Stream_Read_UINT16(s, val);
break;
default:
Stream_Read_UINT32(s, val);
break;
}
return val;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_process_create_request(drdynvcPlugin* drdynvc, int Sp, int cbChId, wStream* s)
{
size_t pos;
UINT status;
UINT32 ChannelId;
wStream* data_out;
UINT channel_status;
char* name;
size_t length;
DVCMAN* dvcman;
WINPR_UNUSED(Sp);
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
dvcman = (DVCMAN*)drdynvc->channel_mgr;
if (drdynvc->state == DRDYNVC_STATE_CAPABILITIES)
{
/**
* For some reason the server does not always send the
* capabilities pdu as it should. When this happens,
* send a capabilities response.
*/
drdynvc->version = 3;
if ((status = drdynvc_send_capability_response(drdynvc)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "drdynvc_send_capability_response failed!");
return status;
}
drdynvc->state = DRDYNVC_STATE_READY;
}
if (Stream_GetRemainingLength(s) < drdynvc_cblen_to_bytes(cbChId))
return ERROR_INVALID_DATA;
ChannelId = drdynvc_read_variable_uint(s, cbChId);
pos = Stream_GetPosition(s);
name = (char*)Stream_Pointer(s);
length = Stream_GetRemainingLength(s);
if (strnlen(name, length) >= length)
return ERROR_INVALID_DATA;
WLog_Print(drdynvc->log, WLOG_DEBUG,
"process_create_request: ChannelId=%" PRIu32 " ChannelName=%s", ChannelId, name);
channel_status = dvcman_create_channel(drdynvc, drdynvc->channel_mgr, ChannelId, name);
data_out = StreamPool_Take(dvcman->pool, pos + 4);
if (!data_out)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "StreamPool_Take failed!");
return CHANNEL_RC_NO_MEMORY;
}
Stream_Write_UINT8(data_out, (CREATE_REQUEST_PDU << 4) | cbChId);
Stream_SetPosition(s, 1);
Stream_Copy(s, data_out, pos - 1);
if (channel_status == CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_DEBUG, "channel created");
Stream_Write_UINT32(data_out, 0);
}
else
{
WLog_Print(drdynvc->log, WLOG_DEBUG, "no listener");
Stream_Write_UINT32(data_out, (UINT32)0xC0000001); /* same code used by mstsc */
}
status = drdynvc_send(drdynvc, data_out);
if (status != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "VirtualChannelWriteEx failed with %s [%08" PRIX32 "]",
WTSErrorToString(status), status);
return status;
}
if (channel_status == CHANNEL_RC_OK)
{
if ((status = dvcman_open_channel(drdynvc, drdynvc->channel_mgr, ChannelId)))
{
WLog_Print(drdynvc->log, WLOG_ERROR,
"dvcman_open_channel failed with error %" PRIu32 "!", status);
return status;
}
}
else
{
if ((status = dvcman_close_channel(drdynvc->channel_mgr, ChannelId, FALSE)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"dvcman_close_channel failed with error %" PRIu32 "!", status);
}
return status;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_process_data_first(drdynvcPlugin* drdynvc, int Sp, int cbChId, wStream* s)
{
UINT status;
UINT32 Length;
UINT32 ChannelId;
if (Stream_GetRemainingLength(s) < drdynvc_cblen_to_bytes(cbChId) + drdynvc_cblen_to_bytes(Sp))
return ERROR_INVALID_DATA;
ChannelId = drdynvc_read_variable_uint(s, cbChId);
Length = drdynvc_read_variable_uint(s, Sp);
WLog_Print(drdynvc->log, WLOG_DEBUG,
"process_data_first: Sp=%d cbChId=%d, ChannelId=%" PRIu32 " Length=%" PRIu32 "", Sp,
cbChId, ChannelId, Length);
status = dvcman_receive_channel_data_first(drdynvc, drdynvc->channel_mgr, ChannelId, Length);
if (status == CHANNEL_RC_OK)
status = dvcman_receive_channel_data(drdynvc, drdynvc->channel_mgr, ChannelId, s);
if (status != CHANNEL_RC_OK)
status = dvcman_close_channel(drdynvc->channel_mgr, ChannelId, TRUE);
return status;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_process_data(drdynvcPlugin* drdynvc, int Sp, int cbChId, wStream* s)
{
UINT32 ChannelId;
UINT status;
if (Stream_GetRemainingLength(s) < drdynvc_cblen_to_bytes(cbChId))
return ERROR_INVALID_DATA;
ChannelId = drdynvc_read_variable_uint(s, cbChId);
WLog_Print(drdynvc->log, WLOG_TRACE, "process_data: Sp=%d cbChId=%d, ChannelId=%" PRIu32 "", Sp,
cbChId, ChannelId);
status = dvcman_receive_channel_data(drdynvc, drdynvc->channel_mgr, ChannelId, s);
if (status != CHANNEL_RC_OK)
status = dvcman_close_channel(drdynvc->channel_mgr, ChannelId, TRUE);
return status;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_process_close_request(drdynvcPlugin* drdynvc, int Sp, int cbChId, wStream* s)
{
UINT error;
UINT32 ChannelId;
if (Stream_GetRemainingLength(s) < drdynvc_cblen_to_bytes(cbChId))
return ERROR_INVALID_DATA;
ChannelId = drdynvc_read_variable_uint(s, cbChId);
WLog_Print(drdynvc->log, WLOG_DEBUG,
"process_close_request: Sp=%d cbChId=%d, ChannelId=%" PRIu32 "", Sp, cbChId,
ChannelId);
if ((error = dvcman_close_channel(drdynvc->channel_mgr, ChannelId, TRUE)))
WLog_Print(drdynvc->log, WLOG_ERROR, "dvcman_close_channel failed with error %" PRIu32 "!",
error);
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_order_recv(drdynvcPlugin* drdynvc, wStream* s)
{
int value;
int Cmd;
int Sp;
int cbChId;
if (Stream_GetRemainingLength(s) < 1)
return ERROR_INVALID_DATA;
Stream_Read_UINT8(s, value);
Cmd = (value & 0xf0) >> 4;
Sp = (value & 0x0c) >> 2;
cbChId = (value & 0x03) >> 0;
WLog_Print(drdynvc->log, WLOG_DEBUG, "order_recv: Cmd=0x%x, Sp=%d cbChId=%d", Cmd, Sp, cbChId);
switch (Cmd)
{
case CAPABILITY_REQUEST_PDU:
return drdynvc_process_capability_request(drdynvc, Sp, cbChId, s);
case CREATE_REQUEST_PDU:
return drdynvc_process_create_request(drdynvc, Sp, cbChId, s);
case DATA_FIRST_PDU:
return drdynvc_process_data_first(drdynvc, Sp, cbChId, s);
case DATA_PDU:
return drdynvc_process_data(drdynvc, Sp, cbChId, s);
case CLOSE_REQUEST_PDU:
return drdynvc_process_close_request(drdynvc, Sp, cbChId, s);
default:
WLog_Print(drdynvc->log, WLOG_ERROR, "unknown drdynvc cmd 0x%x", Cmd);
return ERROR_INTERNAL_ERROR;
}
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_virtual_channel_event_data_received(drdynvcPlugin* drdynvc, void* pData,
UINT32 dataLength, UINT32 totalLength,
UINT32 dataFlags)
{
wStream* data_in;
if ((dataFlags & CHANNEL_FLAG_SUSPEND) || (dataFlags & CHANNEL_FLAG_RESUME))
{
return CHANNEL_RC_OK;
}
if (dataFlags & CHANNEL_FLAG_FIRST)
{
DVCMAN* mgr = (DVCMAN*)drdynvc->channel_mgr;
if (drdynvc->data_in)
Stream_Release(drdynvc->data_in);
drdynvc->data_in = StreamPool_Take(mgr->pool, totalLength);
}
if (!(data_in = drdynvc->data_in))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "StreamPool_Take failed!");
return CHANNEL_RC_NO_MEMORY;
}
if (!Stream_EnsureRemainingCapacity(data_in, dataLength))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_EnsureRemainingCapacity failed!");
Stream_Release(drdynvc->data_in);
drdynvc->data_in = NULL;
return ERROR_INTERNAL_ERROR;
}
Stream_Write(data_in, pData, dataLength);
if (dataFlags & CHANNEL_FLAG_LAST)
{
const size_t cap = Stream_Capacity(data_in);
const size_t pos = Stream_GetPosition(data_in);
if (cap < pos)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "drdynvc_plugin_process_received: read error");
return ERROR_INVALID_DATA;
}
drdynvc->data_in = NULL;
Stream_SealLength(data_in);
Stream_SetPosition(data_in, 0);
if (!MessageQueue_Post(drdynvc->queue, NULL, 0, (void*)data_in, NULL))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_Post failed!");
return ERROR_INTERNAL_ERROR;
}
}
return CHANNEL_RC_OK;
}
static void VCAPITYPE drdynvc_virtual_channel_open_event_ex(LPVOID lpUserParam, DWORD openHandle,
UINT event, LPVOID pData,
UINT32 dataLength, UINT32 totalLength,
UINT32 dataFlags)
{
UINT error = CHANNEL_RC_OK;
drdynvcPlugin* drdynvc = (drdynvcPlugin*)lpUserParam;
switch (event)
{
case CHANNEL_EVENT_DATA_RECEIVED:
if (!drdynvc || (drdynvc->OpenHandle != openHandle))
{
WLog_ERR(TAG, "drdynvc_virtual_channel_open_event: error no match");
return;
}
if ((error = drdynvc_virtual_channel_event_data_received(drdynvc, pData, dataLength,
totalLength, dataFlags)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"drdynvc_virtual_channel_event_data_received failed with error %" PRIu32
"",
error);
break;
case CHANNEL_EVENT_WRITE_CANCELLED:
case CHANNEL_EVENT_WRITE_COMPLETE:
{
wStream* s = (wStream*)pData;
Stream_Release(s);
}
break;
case CHANNEL_EVENT_USER:
break;
}
if (error && drdynvc && drdynvc->rdpcontext)
setChannelError(drdynvc->rdpcontext, error,
"drdynvc_virtual_channel_open_event reported an error");
}
static DWORD WINAPI drdynvc_virtual_channel_client_thread(LPVOID arg)
{
wStream* data;
wMessage message;
UINT error = CHANNEL_RC_OK;
drdynvcPlugin* drdynvc = (drdynvcPlugin*)arg;
if (!drdynvc)
{
ExitThread((DWORD)CHANNEL_RC_BAD_CHANNEL_HANDLE);
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
}
while (1)
{
if (!MessageQueue_Wait(drdynvc->queue))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_Wait failed!");
error = ERROR_INTERNAL_ERROR;
break;
}
if (!MessageQueue_Peek(drdynvc->queue, &message, TRUE))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_Peek failed!");
error = ERROR_INTERNAL_ERROR;
break;
}
if (message.id == WMQ_QUIT)
break;
if (message.id == 0)
{
data = (wStream*)message.wParam;
if ((error = drdynvc_order_recv(drdynvc, data)))
{
WLog_Print(drdynvc->log, WLOG_WARN,
"drdynvc_order_recv failed with error %" PRIu32 "!", error);
}
Stream_Release(data);
}
}
{
/* Disconnect remaining dynamic channels that the server did not.
* This is required to properly shut down channels by calling the appropriate
* event handlers. */
DVCMAN* drdynvcMgr = (DVCMAN*)drdynvc->channel_mgr;
while (ArrayList_Count(drdynvcMgr->channels) > 0)
{
IWTSVirtualChannel* channel =
(IWTSVirtualChannel*)ArrayList_GetItem(drdynvcMgr->channels, 0);
const UINT32 ChannelId = drdynvc->channel_mgr->GetChannelId(channel);
dvcman_close_channel(drdynvc->channel_mgr, ChannelId, FALSE);
}
}
if (error && drdynvc->rdpcontext)
setChannelError(drdynvc->rdpcontext, error,
"drdynvc_virtual_channel_client_thread reported an error");
ExitThread((DWORD)error);
return error;
}
static void drdynvc_queue_object_free(void* obj)
{
wStream* s;
wMessage* msg = (wMessage*)obj;
if (!msg || (msg->id != 0))
return;
s = (wStream*)msg->wParam;
if (s)
Stream_Release(s);
}
static UINT drdynvc_virtual_channel_event_initialized(drdynvcPlugin* drdynvc, LPVOID pData,
UINT32 dataLength)
{
UINT error = CHANNEL_RC_OK;
WINPR_UNUSED(pData);
WINPR_UNUSED(dataLength);
if (!drdynvc)
goto error;
drdynvc->queue = MessageQueue_New(NULL);
if (!drdynvc->queue)
{
error = CHANNEL_RC_NO_MEMORY;
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_New failed!");
goto error;
}
drdynvc->queue->object.fnObjectFree = drdynvc_queue_object_free;
drdynvc->channel_mgr = dvcman_new(drdynvc);
if (!drdynvc->channel_mgr)
{
error = CHANNEL_RC_NO_MEMORY;
WLog_Print(drdynvc->log, WLOG_ERROR, "dvcman_new failed!");
goto error;
}
return CHANNEL_RC_OK;
error:
return ERROR_INTERNAL_ERROR;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_virtual_channel_event_connected(drdynvcPlugin* drdynvc, LPVOID pData,
UINT32 dataLength)
{
UINT error;
UINT32 status;
UINT32 index;
ADDIN_ARGV* args;
rdpSettings* settings;
WINPR_UNUSED(pData);
WINPR_UNUSED(dataLength);
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
status = drdynvc->channelEntryPoints.pVirtualChannelOpenEx(
drdynvc->InitHandle, &drdynvc->OpenHandle, drdynvc->channelDef.name,
drdynvc_virtual_channel_open_event_ex);
if (status != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "pVirtualChannelOpen failed with %s [%08" PRIX32 "]",
WTSErrorToString(status), status);
return status;
}
settings = (rdpSettings*)drdynvc->channelEntryPoints.pExtendedData;
for (index = 0; index < settings->DynamicChannelCount; index++)
{
args = settings->DynamicChannelArray[index];
error = dvcman_load_addin(drdynvc, drdynvc->channel_mgr, args, settings);
if (CHANNEL_RC_OK != error)
goto error;
}
if ((error = dvcman_init(drdynvc, drdynvc->channel_mgr)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "dvcman_init failed with error %" PRIu32 "!", error);
goto error;
}
drdynvc->state = DRDYNVC_STATE_CAPABILITIES;
if (!(drdynvc->thread = CreateThread(NULL, 0, drdynvc_virtual_channel_client_thread,
(void*)drdynvc, 0, NULL)))
{
error = ERROR_INTERNAL_ERROR;
WLog_Print(drdynvc->log, WLOG_ERROR, "CreateThread failed!");
goto error;
}
error:
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_virtual_channel_event_disconnected(drdynvcPlugin* drdynvc)
{
UINT status;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
if (drdynvc->OpenHandle == 0)
return CHANNEL_RC_OK;
if (!MessageQueue_PostQuit(drdynvc->queue, 0))
{
status = GetLastError();
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_PostQuit failed with error %" PRIu32 "",
status);
return status;
}
if (WaitForSingleObject(drdynvc->thread, INFINITE) != WAIT_OBJECT_0)
{
status = GetLastError();
WLog_Print(drdynvc->log, WLOG_ERROR, "WaitForSingleObject failed with error %" PRIu32 "",
status);
return status;
}
CloseHandle(drdynvc->thread);
drdynvc->thread = NULL;
status = drdynvc->channelEntryPoints.pVirtualChannelCloseEx(drdynvc->InitHandle,
drdynvc->OpenHandle);
if (status != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "pVirtualChannelClose failed with %s [%08" PRIX32 "]",
WTSErrorToString(status), status);
}
dvcman_clear(drdynvc, drdynvc->channel_mgr);
MessageQueue_Clear(drdynvc->queue);
drdynvc->OpenHandle = 0;
if (drdynvc->data_in)
{
Stream_Release(drdynvc->data_in);
drdynvc->data_in = NULL;
}
return status;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT drdynvc_virtual_channel_event_terminated(drdynvcPlugin* drdynvc)
{
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
if (drdynvc->channel_mgr)
{
dvcman_free(drdynvc, drdynvc->channel_mgr);
drdynvc->channel_mgr = NULL;
}
MessageQueue_Free(drdynvc->queue);
drdynvc->queue = NULL;
drdynvc->InitHandle = 0;
free(drdynvc->context);
free(drdynvc);
return CHANNEL_RC_OK;
}
static UINT drdynvc_virtual_channel_event_attached(drdynvcPlugin* drdynvc)
{
UINT error = CHANNEL_RC_OK;
size_t i;
DVCMAN* dvcman;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
dvcman = (DVCMAN*)drdynvc->channel_mgr;
if (!dvcman)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
ArrayList_Lock(dvcman->plugins);
for (i = 0; i < ArrayList_Count(dvcman->plugins); i++)
{
IWTSPlugin* pPlugin = ArrayList_GetItem(dvcman->plugins, i);
error = IFCALLRESULT(CHANNEL_RC_OK, pPlugin->Attached, pPlugin);
if (error != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Attach failed with error %" PRIu32 "!", error);
goto fail;
}
}
fail:
ArrayList_Unlock(dvcman->plugins);
return error;
}
static UINT drdynvc_virtual_channel_event_detached(drdynvcPlugin* drdynvc)
{
UINT error = CHANNEL_RC_OK;
size_t i;
DVCMAN* dvcman;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
dvcman = (DVCMAN*)drdynvc->channel_mgr;
if (!dvcman)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
ArrayList_Lock(dvcman->plugins);
for (i = 0; i < ArrayList_Count(dvcman->plugins); i++)
{
IWTSPlugin* pPlugin = ArrayList_GetItem(dvcman->plugins, i);
error = IFCALLRESULT(CHANNEL_RC_OK, pPlugin->Detached, pPlugin);
if (error != CHANNEL_RC_OK)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Detach failed with error %" PRIu32 "!", error);
goto fail;
}
}
fail:
ArrayList_Unlock(dvcman->plugins);
return error;
}
static VOID VCAPITYPE drdynvc_virtual_channel_init_event_ex(LPVOID lpUserParam, LPVOID pInitHandle,
UINT event, LPVOID pData,
UINT dataLength)
{
UINT error = CHANNEL_RC_OK;
drdynvcPlugin* drdynvc = (drdynvcPlugin*)lpUserParam;
if (!drdynvc || (drdynvc->InitHandle != pInitHandle))
{
WLog_ERR(TAG, "drdynvc_virtual_channel_init_event: error no match");
return;
}
switch (event)
{
case CHANNEL_EVENT_INITIALIZED:
error = drdynvc_virtual_channel_event_initialized(drdynvc, pData, dataLength);
break;
case CHANNEL_EVENT_CONNECTED:
if ((error = drdynvc_virtual_channel_event_connected(drdynvc, pData, dataLength)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"drdynvc_virtual_channel_event_connected failed with error %" PRIu32 "",
error);
break;
case CHANNEL_EVENT_DISCONNECTED:
if ((error = drdynvc_virtual_channel_event_disconnected(drdynvc)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"drdynvc_virtual_channel_event_disconnected failed with error %" PRIu32
"",
error);
break;
case CHANNEL_EVENT_TERMINATED:
if ((error = drdynvc_virtual_channel_event_terminated(drdynvc)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"drdynvc_virtual_channel_event_terminated failed with error %" PRIu32 "",
error);
break;
case CHANNEL_EVENT_ATTACHED:
if ((error = drdynvc_virtual_channel_event_attached(drdynvc)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"drdynvc_virtual_channel_event_attached failed with error %" PRIu32 "",
error);
break;
case CHANNEL_EVENT_DETACHED:
if ((error = drdynvc_virtual_channel_event_detached(drdynvc)))
WLog_Print(drdynvc->log, WLOG_ERROR,
"drdynvc_virtual_channel_event_detached failed with error %" PRIu32 "",
error);
break;
default:
break;
}
if (error && drdynvc->rdpcontext)
setChannelError(drdynvc->rdpcontext, error,
"drdynvc_virtual_channel_init_event_ex reported an error");
}
/**
* Channel Client Interface
*/
static int drdynvc_get_version(DrdynvcClientContext* context)
{
drdynvcPlugin* drdynvc = (drdynvcPlugin*)context->handle;
return drdynvc->version;
}
/* drdynvc is always built-in */
#define VirtualChannelEntryEx drdynvc_VirtualChannelEntryEx
BOOL VCAPITYPE VirtualChannelEntryEx(PCHANNEL_ENTRY_POINTS_EX pEntryPoints, PVOID pInitHandle)
{
UINT rc;
drdynvcPlugin* drdynvc;
DrdynvcClientContext* context = NULL;
CHANNEL_ENTRY_POINTS_FREERDP_EX* pEntryPointsEx;
drdynvc = (drdynvcPlugin*)calloc(1, sizeof(drdynvcPlugin));
if (!drdynvc)
{
WLog_ERR(TAG, "calloc failed!");
return FALSE;
}
drdynvc->channelDef.options =
CHANNEL_OPTION_INITIALIZED | CHANNEL_OPTION_ENCRYPT_RDP | CHANNEL_OPTION_COMPRESS_RDP;
sprintf_s(drdynvc->channelDef.name, ARRAYSIZE(drdynvc->channelDef.name), "drdynvc");
drdynvc->state = DRDYNVC_STATE_INITIAL;
pEntryPointsEx = (CHANNEL_ENTRY_POINTS_FREERDP_EX*)pEntryPoints;
if ((pEntryPointsEx->cbSize >= sizeof(CHANNEL_ENTRY_POINTS_FREERDP_EX)) &&
(pEntryPointsEx->MagicNumber == FREERDP_CHANNEL_MAGIC_NUMBER))
{
context = (DrdynvcClientContext*)calloc(1, sizeof(DrdynvcClientContext));
if (!context)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "calloc failed!");
free(drdynvc);
return FALSE;
}
context->handle = (void*)drdynvc;
context->custom = NULL;
drdynvc->context = context;
context->GetVersion = drdynvc_get_version;
drdynvc->rdpcontext = pEntryPointsEx->context;
}
drdynvc->log = WLog_Get(TAG);
WLog_Print(drdynvc->log, WLOG_DEBUG, "VirtualChannelEntryEx");
CopyMemory(&(drdynvc->channelEntryPoints), pEntryPoints,
sizeof(CHANNEL_ENTRY_POINTS_FREERDP_EX));
drdynvc->InitHandle = pInitHandle;
rc = drdynvc->channelEntryPoints.pVirtualChannelInitEx(
drdynvc, context, pInitHandle, &drdynvc->channelDef, 1, VIRTUAL_CHANNEL_VERSION_WIN2000,
drdynvc_virtual_channel_init_event_ex);
if (CHANNEL_RC_OK != rc)
{
WLog_Print(drdynvc->log, WLOG_ERROR, "pVirtualChannelInit failed with %s [%08" PRIX32 "]",
WTSErrorToString(rc), rc);
free(drdynvc->context);
free(drdynvc);
return FALSE;
}
drdynvc->channelEntryPoints.pInterface = context;
return TRUE;
}
|
724520.c | #include <mongoc/mongoc-cursor-private.h>
#include "mongoc/mongoc.h"
#include "mongoc/mongoc-util-private.h"
#include "mongoc/mongoc-change-stream-private.h"
#include "mongoc/mongoc-collection-private.h"
#include "mongoc/utlist.h"
#include "TestSuite.h"
#include "test-conveniences.h"
#include "test-libmongoc.h"
#include "mock_server/mock-server.h"
#include "mock_server/future-functions.h"
#include "json-test.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "session-test"
static void
test_session_opts_clone (void)
{
mongoc_session_opt_t *opts;
mongoc_session_opt_t *clone;
opts = mongoc_session_opts_new ();
clone = mongoc_session_opts_clone (opts);
/* causalConsistency is enabled by default if snapshot is not enabled */
BSON_ASSERT (mongoc_session_opts_get_causal_consistency (clone));
mongoc_session_opts_destroy (clone);
mongoc_session_opts_set_causal_consistency (opts, false);
clone = mongoc_session_opts_clone (opts);
BSON_ASSERT (!mongoc_session_opts_get_causal_consistency (clone));
mongoc_session_opts_destroy (clone);
mongoc_session_opts_destroy (opts);
}
static void
test_session_opts_causal_consistency_and_snapshot (void)
{
mongoc_session_opt_t *opts;
opts = mongoc_session_opts_new ();
/* causalConsistency is enabled by default if snapshot is not enabled */
BSON_ASSERT (mongoc_session_opts_get_causal_consistency (opts));
BSON_ASSERT (!mongoc_session_opts_get_snapshot (opts));
/* causalConsistency is disabled by default if snapshot is enabled */
mongoc_session_opts_set_snapshot (opts, true);
BSON_ASSERT (!mongoc_session_opts_get_causal_consistency (opts));
BSON_ASSERT (mongoc_session_opts_get_snapshot (opts));
/* causalConsistency and snapshot can both be enabled, although this will
* result in an error when starting the session. */
mongoc_session_opts_set_causal_consistency (opts, true);
BSON_ASSERT (mongoc_session_opts_get_causal_consistency (opts));
BSON_ASSERT (mongoc_session_opts_get_snapshot (opts));
mongoc_session_opts_destroy (opts);
}
static void
test_session_no_crypto (void *ctx)
{
mongoc_client_t *client;
bson_error_t error;
client = test_framework_new_default_client ();
BSON_ASSERT (!mongoc_client_start_session (client, NULL, &error));
ASSERT_ERROR_CONTAINS (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_SESSION_FAILURE,
"need a cryptography library");
mongoc_client_destroy (client);
}
#define ASSERT_SESSIONS_MATCH(_lsid_a, _lsid_b) \
do { \
assert_match_bson ((_lsid_a), (_lsid_b), false); \
} while (0)
#define ASSERT_SESSIONS_DIFFER(_lsid_a, _lsid_b) \
do { \
BSON_ASSERT (!match_bson ((_lsid_a), (_lsid_b), false)); \
} while (0)
/* "Pool is LIFO" test from Driver Sessions Spec */
static void
_test_session_pool_lifo (bool pooled)
{
mongoc_client_pool_t *pool = NULL;
mongoc_client_t *client;
mongoc_client_session_t *a, *b, *c, *d;
bson_t lsid_a, lsid_b;
bson_error_t error;
if (pooled) {
pool = test_framework_new_default_client_pool ();
client = mongoc_client_pool_pop (pool);
} else {
client = test_framework_new_default_client ();
}
a = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (a, error);
a->server_session->last_used_usec = bson_get_monotonic_time ();
bson_copy_to (mongoc_client_session_get_lsid (a), &lsid_a);
b = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (b, error);
b->server_session->last_used_usec = bson_get_monotonic_time ();
bson_copy_to (mongoc_client_session_get_lsid (b), &lsid_b);
/* return server sessions to pool: first "a", then "b" */
mongoc_client_session_destroy (a);
mongoc_client_session_destroy (b);
/* first pop returns last push */
c = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (c, error);
ASSERT_SESSIONS_MATCH (&lsid_b, mongoc_client_session_get_lsid (c));
/* second pop returns previous push */
d = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (d, error);
ASSERT_SESSIONS_MATCH (&lsid_a, mongoc_client_session_get_lsid (d));
mongoc_client_session_destroy (c);
mongoc_client_session_destroy (d);
if (pooled) {
/* the pooled client never needed to connect, so it warns that
* it isn't connecting in order to send endSessions */
capture_logs (true);
mongoc_client_pool_push (pool, client);
mongoc_client_pool_destroy (pool);
} else {
mongoc_client_destroy (client);
}
bson_destroy (&lsid_a);
bson_destroy (&lsid_b);
}
static void
test_session_pool_lifo_single (void *ctx)
{
_test_session_pool_lifo (false);
}
static void
test_session_pool_lifo_pooled (void *ctx)
{
_test_session_pool_lifo (true);
}
/* test that a session that is timed out is not added to the pool,
* and a session that times out while it's in the pool is destroyed
*/
static void
_test_session_pool_timeout (bool pooled)
{
mongoc_client_pool_t *pool = NULL;
mongoc_client_t *client;
uint32_t server_id;
mongoc_client_session_t *s;
bson_error_t error;
bson_t lsid;
int64_t almost_timeout_usec;
almost_timeout_usec =
(test_framework_session_timeout_minutes () - 1) * 60 * 1000 * 1000;
if (pooled) {
pool = test_framework_new_default_client_pool ();
client = mongoc_client_pool_pop (pool);
} else {
client = test_framework_new_default_client ();
}
/*
* trigger discovery
*/
server_id = mongoc_topology_select_server_id (
client->topology, MONGOC_SS_READ, NULL, &error);
ASSERT_OR_PRINT (server_id, error);
/*
* get a session, set last_used_date more than 29 minutes ago and return to
* the pool. it's timed out & freed.
*/
BSON_ASSERT (
mongoc_server_session_pool_is_empty (client->topology->session_pool));
s = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (s, error);
bson_copy_to (mongoc_client_session_get_lsid (s), &lsid);
s->server_session->last_used_usec =
(bson_get_monotonic_time () - almost_timeout_usec - 100);
mongoc_client_session_destroy (s);
BSON_ASSERT (
mongoc_server_session_pool_is_empty (client->topology->session_pool));
/*
* get a new session, set last_used_date so it has one second left to live,
* return to the pool, wait 1.5 seconds. it's timed out & freed.
*/
s = mongoc_client_start_session (client, NULL, &error);
ASSERT_SESSIONS_DIFFER (&lsid, mongoc_client_session_get_lsid (s));
bson_destroy (&lsid);
bson_copy_to (mongoc_client_session_get_lsid (s), &lsid);
s->server_session->last_used_usec =
(bson_get_monotonic_time () + 1000 * 1000 - almost_timeout_usec);
mongoc_client_session_destroy (s);
BSON_ASSERT (
!mongoc_server_session_pool_is_empty (client->topology->session_pool));
_mongoc_usleep (1500 * 1000);
/* getting a new client session must start a new server session */
s = mongoc_client_start_session (client, NULL, &error);
ASSERT_SESSIONS_DIFFER (&lsid, mongoc_client_session_get_lsid (s));
BSON_ASSERT (
mongoc_server_session_pool_is_empty (client->topology->session_pool));
mongoc_client_session_destroy (s);
if (pooled) {
/* the pooled client never needed to connect, so it warns that
* it isn't connecting in order to send endSessions */
capture_logs (true);
mongoc_client_pool_push (pool, client);
mongoc_client_pool_destroy (pool);
} else {
mongoc_client_destroy (client);
}
bson_destroy (&lsid);
}
static void
test_session_pool_timeout_single (void *ctx)
{
_test_session_pool_timeout (false);
}
static void
test_session_pool_timeout_pooled (void *ctx)
{
_test_session_pool_timeout (true);
}
/* test that a session that times out while it's in the pool is reaped when
* another session is added
*/
static void
_test_session_pool_reap (bool pooled)
{
mongoc_client_pool_t *pool = NULL;
mongoc_client_t *client;
mongoc_client_session_t *a, *b;
bool r;
bson_error_t error;
bson_t lsid_a, lsid_b;
int64_t almost_timeout_usec;
mongoc_server_session_t *ss;
almost_timeout_usec =
(test_framework_session_timeout_minutes () - 1) * 60 * 1000 * 1000;
if (pooled) {
pool = test_framework_new_default_client_pool ();
client = mongoc_client_pool_pop (pool);
} else {
client = test_framework_new_default_client ();
}
/*
* trigger discovery
*/
r = mongoc_client_command_simple (
client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, &error);
ASSERT_OR_PRINT (r, error);
/*
* get a new session, set last_used_date so it has one second left to live,
* return to the pool, wait 1.5 seconds.
*/
a = mongoc_client_start_session (client, NULL, &error);
b = mongoc_client_start_session (client, NULL, &error);
bson_copy_to (mongoc_client_session_get_lsid (a), &lsid_a);
bson_copy_to (mongoc_client_session_get_lsid (b), &lsid_b);
a->server_session->last_used_usec =
(bson_get_monotonic_time () + 1000 * 1000 - almost_timeout_usec);
mongoc_client_session_destroy (a);
BSON_ASSERT (!mongoc_server_session_pool_is_empty (
client->topology->session_pool)); /* session is pooled */
_mongoc_usleep (1500 * 1000);
/*
* returning session B causes session A to be reaped
*/
b->server_session->last_used_usec = bson_get_monotonic_time ();
mongoc_client_session_destroy (b);
BSON_ASSERT (
!mongoc_server_session_pool_is_empty (client->topology->session_pool));
ss =
mongoc_server_session_pool_get_existing (client->topology->session_pool);
BSON_ASSERT (ss);
ASSERT_SESSIONS_MATCH (&ss->lsid, &lsid_b);
mongoc_server_session_pool_return (ss);
if (pooled) {
mongoc_client_pool_push (pool, client);
mongoc_client_pool_destroy (pool);
} else {
mongoc_client_destroy (client);
}
bson_destroy (&lsid_a);
bson_destroy (&lsid_b);
}
static void
test_session_pool_reap_single (void *ctx)
{
_test_session_pool_reap (false);
}
static void
test_session_pool_reap_pooled (void *ctx)
{
_test_session_pool_reap (true);
}
static void
test_session_id_bad (void *ctx)
{
const char *bad_opts[] = {
"{'sessionId': null}",
"{'sessionId': 'foo'}",
"{'sessionId': {'$numberInt': '1'}}",
"{'sessionId': {'$numberDouble': '1'}}",
/* doesn't fit in uint32 */
"{'sessionId': {'$numberLong': '5000000000'}}",
/* doesn't match existing mongoc_client_session_t */
"{'sessionId': {'$numberLong': '123'}}",
NULL,
};
const char **bad_opt;
mongoc_client_t *client;
bson_error_t error;
bool r;
client = test_framework_new_default_client ();
for (bad_opt = bad_opts; *bad_opt; bad_opt++) {
r = mongoc_client_read_command_with_opts (client,
"admin",
tmp_bson ("{'ping': 1}"),
NULL,
tmp_bson (*bad_opt),
NULL,
&error);
BSON_ASSERT (!r);
ASSERT_ERROR_CONTAINS (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Invalid sessionId");
memset (&error, 0, sizeof (bson_error_t));
}
mongoc_client_destroy (client);
}
static void
_test_session_supported (bool pooled)
{
mongoc_client_pool_t *pool = NULL;
mongoc_client_t *client;
bson_error_t error;
mongoc_client_session_t *session;
if (pooled) {
pool = test_framework_new_default_client_pool ();
client = mongoc_client_pool_pop (pool);
} else {
client = test_framework_new_default_client ();
}
if (test_framework_session_timeout_minutes () == -1) {
BSON_ASSERT (!mongoc_client_start_session (client, NULL, &error));
ASSERT_ERROR_CONTAINS (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_SESSION_FAILURE,
"Server does not support sessions");
} else {
session = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (session, error);
mongoc_client_session_destroy (session);
}
if (pooled) {
/* the pooled client never needed to connect, so it warns that
* it isn't connecting in order to send endSessions */
capture_logs (true);
mongoc_client_pool_push (pool, client);
mongoc_client_pool_destroy (pool);
} else {
mongoc_client_destroy (client);
}
}
static void
test_session_supported_single (void *ctx)
{
_test_session_supported (false);
}
static void
test_session_supported_pooled (void *ctx)
{
_test_session_supported (true);
}
static void
_test_mock_end_sessions (bool pooled)
{
mock_server_t *server;
mongoc_client_pool_t *pool = NULL;
mongoc_client_t *client;
bson_error_t error;
mongoc_client_session_t *session;
bson_t lsid;
bson_t opts = BSON_INITIALIZER;
bson_t *expected_cmd;
future_t *future;
request_t *request;
bool r;
server = mock_mongos_new (WIRE_VERSION_OP_MSG);
mock_server_run (server);
if (pooled) {
pool = test_framework_client_pool_new_from_uri (
mock_server_get_uri (server), NULL);
client = mongoc_client_pool_pop (pool);
} else {
client = test_framework_client_new_from_uri (mock_server_get_uri (server),
NULL);
}
session = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (session, error);
bson_copy_to (mongoc_client_session_get_lsid (session), &lsid);
r = mongoc_client_session_append (session, &opts, &error);
ASSERT_OR_PRINT (r, error);
future = future_client_command_with_opts (
client, "admin", tmp_bson ("{'ping': 1}"), NULL, &opts, NULL, &error);
request = mock_server_receives_msg (
server, 0, tmp_bson ("{'ping': 1, 'lsid': {'$exists': true}}"));
mock_server_replies_ok_and_destroys (request);
BSON_ASSERT (future_get_bool (future));
future_destroy (future);
/* before destroying the session, construct the expected endSessions cmd */
expected_cmd =
BCON_NEW ("endSessions",
"[",
BCON_DOCUMENT (mongoc_client_session_get_lsid (session)),
"]");
mongoc_client_session_destroy (session);
if (pooled) {
mongoc_client_pool_push (pool, client);
future = future_client_pool_destroy (pool);
} else {
future = future_client_destroy (client);
}
/* check that we got the expected endSessions cmd */
request = mock_server_receives_msg (server, 0, expected_cmd);
mock_server_replies_ok_and_destroys (request);
future_wait (future);
future_destroy (future);
mock_server_destroy (server);
bson_destroy (expected_cmd);
bson_destroy (&lsid);
bson_destroy (&opts);
}
static void
test_mock_end_sessions_single (void)
{
_test_mock_end_sessions (false);
}
static void
test_mock_end_sessions_pooled (void)
{
_test_mock_end_sessions (true);
}
/* Test for CDRIVER-3587 - Do not reuse server stream that becomes invalid on
* failure to end session */
static void
test_mock_end_sessions_server_disconnect (void)
{
mock_server_t *server;
mongoc_client_t *client;
bson_error_t error;
mongoc_client_session_t *session[12000];
future_t *future;
uint16_t i;
server = mock_mongos_new (WIRE_VERSION_OP_MSG);
mock_server_run (server);
client =
test_framework_client_new_from_uri (mock_server_get_uri (server), NULL);
for (i = 0; i < 12000; i++) {
session[i] = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (session[i], error);
}
/* Simulate server failure or network failure. Destroy the mock server here
* rather than at the end of the test so that the 'endSessions' commands fail
* to reach the mock server. */
mock_server_destroy (server);
/* The below calls to mongoc_client_session_destroy () will produce a warning
* regarding the inability to send the 'endSessions' command. */
capture_logs (true);
for (i = 0; i < 12000; i++) {
mongoc_client_session_destroy (session[i]);
}
/* The above loop will add each session back to the session pool. If
* CDRIVER-3587 has not been fixed, the mongoc_client_destroy () call below
* will create 'endSessions' commands which will be sent but fail to reach
* the server; the associated server stream will not be correctly
* invalidated. Subsequent reuse of the stream, as in the attempt to send
* the second batch of 10,000 during the attempt to destroy the client, will
* trigger a segfault. */
future = future_client_destroy (client);
future_wait (future);
future_destroy (future);
}
typedef struct {
int started_calls;
int succeeded_calls;
mongoc_array_t cmds;
mongoc_client_pool_t *pool;
mongoc_client_t *client;
} endsessions_test_t;
static void
endsessions_started_cb (const mongoc_apm_command_started_t *event)
{
endsessions_test_t *test;
bson_t *cmd;
if (strcmp (mongoc_apm_command_started_get_command_name (event),
"endSessions") != 0) {
return;
}
test = (endsessions_test_t *) mongoc_apm_command_started_get_context (event);
test->started_calls++;
cmd = bson_copy (mongoc_apm_command_started_get_command (event));
_mongoc_array_append_vals (&test->cmds, &cmd, 1);
}
static void
endsessions_succeeded_cb (const mongoc_apm_command_succeeded_t *event)
{
endsessions_test_t *test;
if (strcmp (mongoc_apm_command_succeeded_get_command_name (event),
"endSessions") != 0) {
return;
}
test =
(endsessions_test_t *) mongoc_apm_command_succeeded_get_context (event);
test->succeeded_calls++;
}
static void
endsessions_test_init (endsessions_test_t *test, bool pooled)
{
mongoc_apm_callbacks_t *callbacks;
test->started_calls = test->succeeded_calls = 0;
_mongoc_array_init (&test->cmds, sizeof (bson_t *));
callbacks = mongoc_apm_callbacks_new ();
mongoc_apm_set_command_started_cb (callbacks, endsessions_started_cb);
mongoc_apm_set_command_succeeded_cb (callbacks, endsessions_succeeded_cb);
if (pooled) {
test->pool = test_framework_new_default_client_pool ();
ASSERT (
mongoc_client_pool_set_apm_callbacks (test->pool, callbacks, test));
test->client = mongoc_client_pool_pop (test->pool);
} else {
test->pool = NULL;
test->client = test_framework_new_default_client ();
ASSERT (mongoc_client_set_apm_callbacks (test->client, callbacks, test));
}
mongoc_apm_callbacks_destroy (callbacks);
}
static void
endsessions_test_destroy_client (endsessions_test_t *test)
{
if (test->pool) {
mongoc_client_pool_push (test->pool, test->client);
mongoc_client_pool_destroy (test->pool);
} else {
mongoc_client_destroy (test->client);
}
}
static void
endsessions_test_get_ended_lsids (endsessions_test_t *test,
size_t index,
bson_t *ended_lsids)
{
bson_iter_t iter;
ASSERT_CMPINT (test->started_calls, >, (int) index);
BSON_ASSERT (
bson_iter_init_find (&iter,
_mongoc_array_index (&test->cmds, bson_t *, index),
"endSessions"));
BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (&iter));
bson_iter_bson (&iter, ended_lsids);
}
static void
endsessions_test_cleanup (endsessions_test_t *test)
{
size_t i;
for (i = 0; i < test->cmds.len; i++) {
bson_destroy (_mongoc_array_index (&test->cmds, bson_t *, i));
}
_mongoc_array_destroy (&test->cmds);
}
static void
_test_end_sessions (bool pooled)
{
endsessions_test_t test;
mongoc_client_t *client;
bson_error_t error;
mongoc_client_session_t *cs1;
mongoc_client_session_t *cs2;
bson_t lsid1;
bson_t lsid2;
bson_t opts1 = BSON_INITIALIZER;
bson_t opts2 = BSON_INITIALIZER;
bool lsid1_ended = false;
bool lsid2_ended = false;
bson_t ended_lsids;
bson_iter_t iter;
bson_t ended_lsid;
match_ctx_t ctx = {{0}};
bool r;
endsessions_test_init (&test, pooled);
client = test.client;
/*
* create and use sessions 1 and 2
*/
cs1 = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (cs1, error);
bson_copy_to (mongoc_client_session_get_lsid (cs1), &lsid1);
r = mongoc_client_session_append (cs1, &opts1, &error);
ASSERT_OR_PRINT (r, error);
r = mongoc_client_command_with_opts (
client, "admin", tmp_bson ("{'count': 'c'}"), NULL, &opts1, NULL, &error);
ASSERT_OR_PRINT (r, error);
cs2 = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (cs2, error);
bson_copy_to (mongoc_client_session_get_lsid (cs2), &lsid2);
r = mongoc_client_session_append (cs2, &opts2, &error);
ASSERT_OR_PRINT (r, error);
r = mongoc_client_command_with_opts (
client, "admin", tmp_bson ("{'count': 'c'}"), NULL, &opts2, NULL, &error);
ASSERT_OR_PRINT (r, error);
/*
* return server sessions to the pool
*/
mongoc_client_session_destroy (cs1);
mongoc_client_session_destroy (cs2);
endsessions_test_destroy_client (&test);
/*
* sessions were ended on server
*/
ASSERT_CMPINT (test.started_calls, ==, 1);
ASSERT_CMPINT (test.succeeded_calls, ==, 1);
endsessions_test_get_ended_lsids (&test, 0, &ended_lsids);
BSON_ASSERT (bson_iter_init (&iter, &ended_lsids));
while (bson_iter_next (&iter)) {
BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter));
bson_iter_bson (&iter, &ended_lsid);
if (match_bson_with_ctx (&ended_lsid, &lsid1, &ctx)) {
lsid1_ended = true;
} else if (match_bson_with_ctx (&ended_lsid, &lsid2, &ctx)) {
lsid2_ended = true;
}
}
BSON_ASSERT (lsid1_ended);
BSON_ASSERT (lsid2_ended);
bson_destroy (&lsid1);
bson_destroy (&opts1);
bson_destroy (&lsid2);
bson_destroy (&opts2);
endsessions_test_cleanup (&test);
}
static void
test_end_sessions_single (void *ctx)
{
_test_end_sessions (false);
}
static void
test_end_sessions_pooled (void *ctx)
{
_test_end_sessions (true);
}
/* Sends ping to server via client_session. useful for marking
* server_sessions as used so that they are pushed back to the session pool */
static void
send_ping (mongoc_client_t *client, mongoc_client_session_t *client_session)
{
bson_t ping_cmd = BSON_INITIALIZER;
bson_t opts = BSON_INITIALIZER;
bson_error_t error;
bool ret;
BCON_APPEND (&ping_cmd, "ping", BCON_INT32 (1));
ret = mongoc_client_session_append (client_session, &opts, &error);
ASSERT_OR_PRINT (ret, error);
ret = mongoc_client_command_with_opts (
client, "admin", &ping_cmd, NULL, &opts, NULL, &error);
ASSERT_OR_PRINT (ret, error);
bson_destroy (&opts);
bson_destroy (&ping_cmd);
}
static void
_test_end_sessions_many (bool pooled)
{
endsessions_test_t test;
mongoc_client_t *client;
int i;
mongoc_client_session_t *sessions[10001];
bson_error_t error;
bson_t ended_lsids;
endsessions_test_init (&test, pooled);
client = test.client;
/* connect */
ASSERT_OR_PRINT (
mongoc_client_command_simple (
client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, &error),
error);
/*
* create and destroy 10,001 sessions
*/
for (i = 0; i < sizeof sessions / sizeof (mongoc_client_session_t *); i++) {
sessions[i] = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (sessions[i], error);
send_ping (client, sessions[i]);
}
for (i = 0; i < sizeof sessions / sizeof (mongoc_client_session_t *); i++) {
mongoc_client_session_destroy (sessions[i]);
}
endsessions_test_destroy_client (&test);
/*
* sessions were ended on the server, ten thousand at a time
*/
ASSERT_CMPINT (test.started_calls, ==, 2);
ASSERT_CMPINT (test.succeeded_calls, ==, 2);
endsessions_test_get_ended_lsids (&test, 0, &ended_lsids);
ASSERT_CMPINT (bson_count_keys (&ended_lsids), ==, 10000);
endsessions_test_get_ended_lsids (&test, 1, &ended_lsids);
ASSERT_CMPINT (bson_count_keys (&ended_lsids), ==, 1);
endsessions_test_cleanup (&test);
}
static void
test_end_sessions_many_single (void *ctx)
{
_test_end_sessions_many (false);
}
static void
test_end_sessions_many_pooled (void *ctx)
{
_test_end_sessions_many (true);
}
static void
_test_advance_cluster_time (mongoc_client_session_t *cs,
int new_timestamp,
int new_increment,
bool should_advance)
{
bson_t *old_cluster_time;
bson_t *new_cluster_time;
old_cluster_time = bson_copy (mongoc_client_session_get_cluster_time (cs));
new_cluster_time =
tmp_bson ("{'clusterTime': {'$timestamp': {'t': %d, 'i': %d}}}",
new_timestamp,
new_increment);
mongoc_client_session_advance_cluster_time (cs, new_cluster_time);
if (should_advance) {
assert_match_bson (
mongoc_client_session_get_cluster_time (cs), new_cluster_time, false);
} else {
assert_match_bson (
mongoc_client_session_get_cluster_time (cs), old_cluster_time, false);
}
bson_destroy (old_cluster_time);
}
static void
test_session_advance_cluster_time (void *ctx)
{
mongoc_client_t *client;
bson_error_t error;
mongoc_client_session_t *cs;
client = test_framework_new_default_client ();
cs = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (cs, error);
BSON_ASSERT (!mongoc_client_session_get_cluster_time (cs));
capture_logs (true);
mongoc_client_session_advance_cluster_time (cs, tmp_bson ("{'foo': 1}"));
ASSERT_CAPTURED_LOG ("mongoc_client_session_advance_cluster_time",
MONGOC_LOG_LEVEL_ERROR,
"Cannot parse cluster time");
capture_logs (true);
mongoc_client_session_advance_cluster_time (cs,
tmp_bson ("{'clusterTime': 1}"));
ASSERT_CAPTURED_LOG ("mongoc_client_session_advance_cluster_time",
MONGOC_LOG_LEVEL_ERROR,
"Cannot parse cluster time");
mongoc_client_session_advance_cluster_time (
cs, tmp_bson ("{'clusterTime': {'$timestamp': {'t': 1, 'i': 1}}}"));
_test_advance_cluster_time (cs, 1, 0, false);
_test_advance_cluster_time (cs, 2, 2, true);
_test_advance_cluster_time (cs, 2, 1, false);
_test_advance_cluster_time (cs, 3, 1, true);
mongoc_client_session_destroy (cs);
mongoc_client_destroy (client);
}
static void
_test_advance_operation_time (mongoc_client_session_t *cs,
uint32_t t,
uint32_t i,
bool should_advance)
{
uint32_t old_t, old_i;
uint32_t new_t, new_i;
mongoc_client_session_get_operation_time (cs, &old_t, &old_i);
mongoc_client_session_advance_operation_time (cs, t, i);
mongoc_client_session_get_operation_time (cs, &new_t, &new_i);
if (should_advance) {
ASSERT_CMPUINT32 (new_t, ==, t);
ASSERT_CMPUINT32 (new_i, ==, i);
} else if (new_t == t && new_i == i) {
fprintf (stderr,
"Shouldn't have advanced from operationTime %" PRIu32
", %" PRIu32 " to %" PRIu32 ", %" PRIu32 "\n",
old_t,
old_i,
t,
i);
abort ();
}
}
static void
test_session_advance_operation_time (void *ctx)
{
mongoc_client_t *client;
bson_error_t error;
mongoc_client_session_t *cs;
uint32_t t, i;
client = test_framework_new_default_client ();
cs = mongoc_client_start_session (client, NULL, &error);
ASSERT_OR_PRINT (cs, error);
mongoc_client_session_get_operation_time (cs, &t, &i);
ASSERT_CMPUINT32 (t, ==, 0);
ASSERT_CMPUINT32 (t, ==, 0);
mongoc_client_session_advance_operation_time (cs, 1, 1);
_test_advance_operation_time (cs, 1, 0, false);
_test_advance_operation_time (cs, 2, 2, true);
_test_advance_operation_time (cs, 2, 1, false);
_test_advance_operation_time (cs, 3, 1, true);
mongoc_client_session_destroy (cs);
mongoc_client_destroy (client);
}
typedef enum {
CORRECT_CLIENT,
INCORRECT_CLIENT,
} session_test_correct_t;
typedef enum {
CAUSAL,
NOT_CAUSAL,
} session_test_causal_t;
typedef struct {
bool verbose;
mongoc_client_t *session_client, *client;
mongoc_database_t *session_db, *db;
mongoc_collection_t *session_collection, *collection;
mongoc_client_session_t *cs;
mongoc_client_session_t *wrong_cs;
bson_t opts;
bson_error_t error;
int n_started;
int n_succeeded;
bool expect_explicit_lsid;
bool acknowledged;
bool succeeded;
mongoc_array_t cmds;
mongoc_array_t replies;
bson_t sent_lsid;
bson_t sent_cluster_time;
bson_t received_cluster_time;
} session_test_t;
static void
started (const mongoc_apm_command_started_t *event)
{
match_ctx_t ctx = {{0}};
bson_iter_t iter;
bool has_cluster_time;
bson_t cluster_time;
bson_t lsid;
const bson_t *client_session_lsid;
bson_t *cmd = bson_copy (mongoc_apm_command_started_get_command (event));
const char *cmd_name = mongoc_apm_command_started_get_command_name (event);
session_test_t *test =
(session_test_t *) mongoc_apm_command_started_get_context (event);
ctx.strict_numeric_types = false;
if (test->verbose) {
char *s = bson_as_json (cmd, NULL);
printf ("%s\n", s);
bson_free (s);
}
if (!strcmp (cmd_name, "endSessions")) {
BSON_ASSERT (!bson_has_field (cmd, "lsid"));
bson_destroy (cmd);
return;
}
if (test->acknowledged) {
if (!bson_iter_init_find (&iter, cmd, "lsid")) {
fprintf (stderr, "no lsid sent with command %s\n", cmd_name);
abort ();
}
bson_iter_bson (&iter, &lsid);
client_session_lsid = &test->cs->server_session->lsid;
if (test->expect_explicit_lsid) {
if (!match_bson_with_ctx (&lsid, client_session_lsid, &ctx)) {
fprintf (stderr,
"command %s should have used client session's lsid\n",
cmd_name);
abort ();
}
} else {
if (match_bson_with_ctx (&lsid, client_session_lsid, &ctx)) {
fprintf (stderr,
"command %s should not have used client session's lsid\n",
cmd_name);
abort ();
}
}
if (bson_empty (&test->sent_lsid)) {
bson_destroy (&test->sent_lsid);
bson_copy_to (&lsid, &test->sent_lsid);
} else {
if (!match_bson_with_ctx (&lsid, &test->sent_lsid, &ctx)) {
fprintf (stderr,
"command %s used different lsid than previous command\n",
cmd_name);
abort ();
}
}
} else {
/* unacknowledged commands should never include lsid */
BSON_ASSERT (!bson_has_field (cmd, "lsid"));
}
has_cluster_time = bson_iter_init_find (&iter, cmd, "$clusterTime");
if (test->acknowledged && !has_cluster_time) {
fprintf (stderr, "no $clusterTime sent with command %s\n", cmd_name);
abort ();
}
if (has_cluster_time) {
/* like $clusterTime: {clusterTime: <timestamp>} */
bson_iter_bson (&iter, &cluster_time);
bson_destroy (&test->sent_cluster_time);
bson_copy_to (&cluster_time, &test->sent_cluster_time);
}
_mongoc_array_append_vals (&test->cmds, &cmd, 1);
test->n_started++;
}
static void
succeeded (const mongoc_apm_command_succeeded_t *event)
{
bson_iter_t iter;
bool has_cluster_time;
bson_t cluster_time;
bson_t *reply = bson_copy (mongoc_apm_command_succeeded_get_reply (event));
const char *cmd_name = mongoc_apm_command_succeeded_get_command_name (event);
session_test_t *test =
(session_test_t *) mongoc_apm_command_succeeded_get_context (event);
if (test->verbose) {
char *s = bson_as_json (reply, NULL);
printf ("<-- %s\n", s);
bson_free (s);
}
has_cluster_time = bson_iter_init_find (&iter, reply, "$clusterTime");
if (test->acknowledged && !has_cluster_time) {
fprintf (stderr, "no $clusterTime in reply to command %s\n", cmd_name);
abort ();
}
if (strcmp (cmd_name, "endSessions") == 0) {
bson_destroy (reply);
return;
}
if (has_cluster_time) {
/* like $clusterTime: {clusterTime: <timestamp>} */
bson_iter_bson (&iter, &cluster_time);
bson_destroy (&test->received_cluster_time);
bson_copy_to (&cluster_time, &test->received_cluster_time);
}
_mongoc_array_append_vals (&test->replies, &reply, 1);
test->n_succeeded++;
}
static void
failed (const mongoc_apm_command_failed_t *event)
{
const char *cmd_name;
bson_error_t error;
session_test_t *test =
(session_test_t *) mongoc_apm_command_failed_get_context (event);
if (!test->verbose) {
return;
}
cmd_name = mongoc_apm_command_failed_get_command_name (event);
mongoc_apm_command_failed_get_error (event, &error);
printf ("<-- %s: %s\n", cmd_name, error.message);
}
static void
set_session_test_callbacks (session_test_t *test)
{
mongoc_apm_callbacks_t *callbacks;
callbacks = mongoc_apm_callbacks_new ();
mongoc_apm_set_command_started_cb (callbacks, started);
mongoc_apm_set_command_succeeded_cb (callbacks, succeeded);
mongoc_apm_set_command_failed_cb (callbacks, failed);
mongoc_client_set_apm_callbacks (test->client, callbacks, test);
mongoc_apm_callbacks_destroy (callbacks);
}
static session_test_t *
session_test_new (session_test_correct_t correct_client,
session_test_causal_t causal)
{
session_test_t *test;
mongoc_session_opt_t *cs_opts;
bson_error_t error;
test = bson_malloc0 (sizeof (session_test_t));
test->verbose = test_framework_getenv_bool ("MONGOC_TEST_SESSION_VERBOSE");
test->n_started = 0;
test->expect_explicit_lsid = true;
test->acknowledged = true;
test->succeeded = false;
_mongoc_array_init (&test->cmds, sizeof (bson_t *));
_mongoc_array_init (&test->replies, sizeof (bson_t *));
bson_init (&test->sent_cluster_time);
bson_init (&test->received_cluster_time);
bson_init (&test->sent_lsid);
test->session_client = test_framework_new_default_client ();
mongoc_client_set_error_api (test->session_client, 2);
test->session_db = mongoc_client_get_database (test->session_client, "db");
test->session_collection =
mongoc_database_get_collection (test->session_db, "collection");
bson_init (&test->opts);
if (correct_client == CORRECT_CLIENT) {
test->client = test->session_client;
test->db = test->session_db;
test->collection = test->session_collection;
} else {
/* test each function with a session from the correct client and a session
* from the wrong client */
test->client = test_framework_new_default_client ();
mongoc_client_set_error_api (test->client, 2);
test->wrong_cs = mongoc_client_start_session (test->client, NULL, &error);
ASSERT_OR_PRINT (test->wrong_cs, error);
test->db = mongoc_client_get_database (test->client, "db");
test->collection =
mongoc_database_get_collection (test->db, "collection");
}
set_session_test_callbacks (test);
cs_opts = mongoc_session_opts_new ();
mongoc_session_opts_set_causal_consistency (cs_opts, causal == CAUSAL);
test->cs =
mongoc_client_start_session (test->session_client, cs_opts, &error);
ASSERT_OR_PRINT (test->cs, error);
mongoc_session_opts_destroy (cs_opts);
return test;
}
struct check_session_returned_t {
const bson_t *expect_lsid;
bool found;
};
static int
check_session_returned_visit (mongoc_server_session_t *ss,
mongoc_topology_t *unused,
void *check_state_)
{
match_ctx_t ctx = {{0}};
struct check_session_returned_t *check_state = check_state_;
ctx.strict_numeric_types = false;
if (!check_state->found) {
check_state->found =
match_bson_with_ctx (&ss->lsid, check_state->expect_lsid, &ctx);
}
/* No session will ever be returned to the pool if it has never been used */
ASSERT_CMPINT64 (ss->last_used_usec, !=, SESSION_NEVER_USED);
return 0;
}
static void
check_session_returned (session_test_t *test, const bson_t *lsid)
{
struct check_session_returned_t check_state;
check_state.expect_lsid = lsid;
check_state.found = false;
mongoc_server_session_pool_visit_each (
test->session_client->topology->session_pool,
&check_state,
check_session_returned_visit);
/* Server session will only be returned to the pool if it has
* been used. It is expected behavior for found to be false if
* ss->last_used_usec == SESSION_NEVER_USED */
if (!check_state.found) {
fprintf (stderr,
"server session %s not returned to pool\n",
bson_as_json (lsid, NULL));
abort ();
}
}
static const bson_t *
first_cmd (session_test_t *test)
{
ASSERT_CMPSIZE_T (test->cmds.len, >, (size_t) 0);
return _mongoc_array_index (&test->cmds, bson_t *, 0);
}
static const bson_t *
last_non_getmore_cmd (session_test_t *test)
{
ssize_t i;
const bson_t *cmd;
ASSERT_CMPSIZE_T (test->cmds.len, >, (size_t) 0);
for (i = test->replies.len - 1; i >= 0; i--) {
cmd = _mongoc_array_index (&test->cmds, bson_t *, i);
if (strcmp (_mongoc_get_command_name (cmd), "getMore") != 0) {
return cmd;
}
}
fprintf (stderr, "No commands besides getMore were recorded\n");
abort ();
}
static const bson_t *
last_reply (session_test_t *test)
{
ASSERT_CMPSIZE_T (test->replies.len, >, (size_t) 0);
return _mongoc_array_index (&test->replies, bson_t *, test->replies.len - 1);
}
static void
clear_history (session_test_t *test)
{
size_t i;
for (i = 0; i < test->cmds.len; i++) {
bson_destroy (_mongoc_array_index (&test->cmds, bson_t *, i));
}
for (i = 0; i < test->replies.len; i++) {
bson_destroy (_mongoc_array_index (&test->replies, bson_t *, i));
}
test->cmds.len = 0;
test->replies.len = 0;
}
static void
session_test_destroy (session_test_t *test)
{
bson_t session_lsid;
size_t i;
bool ss_was_used =
test->cs->server_session->last_used_usec != SESSION_NEVER_USED;
bson_copy_to (mongoc_client_session_get_lsid (test->cs), &session_lsid);
mongoc_client_session_destroy (test->cs);
if (ss_was_used) {
/* If the session was used, assert that it was returned to the pool: */
check_session_returned (test, &session_lsid);
}
bson_destroy (&session_lsid);
if (!bson_empty (&test->sent_lsid)) {
/* for implicit sessions, ensure the implicit session was returned */
check_session_returned (test, &test->sent_lsid);
}
if (test->client != test->session_client) {
mongoc_client_session_destroy (test->wrong_cs);
mongoc_collection_destroy (test->collection);
mongoc_database_destroy (test->db);
mongoc_client_destroy (test->client);
}
mongoc_collection_destroy (test->session_collection);
mongoc_database_destroy (test->session_db);
mongoc_client_destroy (test->session_client);
bson_destroy (&test->opts);
bson_destroy (&test->sent_cluster_time);
bson_destroy (&test->received_cluster_time);
bson_destroy (&test->sent_lsid);
for (i = 0; i < test->cmds.len; i++) {
bson_destroy (_mongoc_array_index (&test->cmds, bson_t *, i));
}
_mongoc_array_destroy (&test->cmds);
for (i = 0; i < test->replies.len; i++) {
bson_destroy (_mongoc_array_index (&test->replies, bson_t *, i));
}
_mongoc_array_destroy (&test->replies);
bson_free (test);
}
static void
check_sessions_from_same_client_enforced (session_test_t *test)
{
if (test->session_client != test->client) {
BSON_ASSERT (!test->succeeded);
ASSERT_ERROR_CONTAINS (test->error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Invalid sessionId");
}
}
static void
check_sessions_with_w0_prohibited (session_test_t *test)
{
if (test->expect_explicit_lsid && !test->acknowledged) {
BSON_ASSERT (!test->succeeded);
ASSERT_ERROR_CONTAINS (test->error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"session with unacknowledged");
}
}
static void
check_success (session_test_t *test)
{
check_sessions_from_same_client_enforced (test);
check_sessions_with_w0_prohibited (test);
if (test->session_client == test->client &&
test->expect_explicit_lsid == test->acknowledged) {
ASSERT_OR_PRINT (test->succeeded, test->error);
}
if (test->succeeded) {
ASSERT_CMPINT (test->n_started, >, 0);
ASSERT_CMPINT (test->n_succeeded, >, 0);
}
}
static void
check_cluster_time (session_test_t *test)
{
const bson_t *session_time;
session_time = mongoc_client_session_get_cluster_time (test->cs);
BSON_ASSERT (session_time); /* should be set during handshake */
/* fail if cluster_time_greater logs an error */
capture_logs (true);
if (_mongoc_cluster_time_greater (&test->received_cluster_time,
session_time)) {
fprintf (stderr, "client session's cluster time is outdated\n");
abort ();
}
ASSERT_NO_CAPTURED_LOGS ("_mongoc_cluster_time_greater");
capture_logs (false);
}
typedef void (*session_test_fn_t) (session_test_t *);
/*
* the following tests check session logic for a variety of operations. most of
* the asserts are in the APM started/succeeded/failed callbacks above
*/
/* use the same client for the session and the operation, expect success */
static void
_test_explicit_session_lsid (session_test_fn_t test_fn)
{
session_test_t *test;
bson_error_t error;
int64_t start;
test = session_test_new (CORRECT_CLIENT, NOT_CAUSAL);
ASSERT_CMPINT64 (test->cs->server_session->last_used_usec, ==, (int64_t) -1);
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
start = bson_get_monotonic_time ();
test_fn (test);
check_success (test);
ASSERT_CMPINT (test->n_started, >, 0);
ASSERT_CMPINT (test->n_succeeded, >, 0);
check_cluster_time (test);
ASSERT_CMPINT64 (test->cs->server_session->last_used_usec, >=, start);
session_test_destroy (test);
}
/* use a session from the wrong client, expect failure. this is the
* "session argument is for right client" test from Driver Sessions Spec */
static void
_test_session_from_wrong_client (session_test_fn_t test_fn)
{
session_test_t *test;
bson_error_t error;
test = session_test_new (INCORRECT_CLIENT, NOT_CAUSAL);
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
test_fn (test);
check_success (test);
mongoc_collection_drop_with_opts (test->session_collection, NULL, NULL);
session_test_destroy (test);
}
/* implicit session - all commands should use an internally-acquired lsid */
static void
_test_implicit_session_lsid (session_test_fn_t test_fn)
{
session_test_t *test;
int64_t start;
mongoc_server_session_t *ss;
test = session_test_new (CORRECT_CLIENT, NOT_CAUSAL);
test->expect_explicit_lsid = false;
start = bson_get_monotonic_time ();
test_fn (test);
check_success (test);
mongoc_collection_drop_with_opts (test->session_collection, NULL, NULL);
ss = mongoc_server_session_pool_get_existing (
test->client->topology->session_pool);
BSON_ASSERT (ss);
ASSERT_CMPINT64 (ss->last_used_usec, >=, start);
mongoc_server_session_pool_return (ss);
session_test_destroy (test);
}
typedef struct {
uint32_t t;
uint32_t i;
} op_time_t;
static void
parse_read_concern_time (const bson_t *cmd, op_time_t *op_time)
{
bson_iter_t iter;
bson_iter_t rc;
BSON_ASSERT (bson_iter_init_find (&iter, cmd, "readConcern"));
BSON_ASSERT (bson_iter_recurse (&iter, &rc));
BSON_ASSERT (bson_iter_find (&rc, "afterClusterTime"));
BSON_ASSERT (BSON_ITER_HOLDS_TIMESTAMP (&rc));
bson_iter_timestamp (&rc, &op_time->t, &op_time->i);
}
static void
parse_reply_time (const bson_t *reply, op_time_t *op_time)
{
bson_iter_t iter;
BSON_ASSERT (bson_iter_init_find (&iter, reply, "operationTime"));
BSON_ASSERT (BSON_ITER_HOLDS_TIMESTAMP (&iter));
bson_iter_timestamp (&iter, &op_time->t, &op_time->i);
}
#define ASSERT_OP_TIMES_EQUAL(_a, _b) \
if ((_a).t != (_b).t || (_a).i != (_b).i) { \
fprintf (stderr, \
#_a " (%d, %d) does not match " #_b " (%d, %d)\n", \
(_a).t, \
(_a).i, \
(_b).t, \
(_b).i); \
abort (); \
}
static void
_test_causal_consistency (session_test_fn_t test_fn, bool allow_read_concern)
{
session_test_t *test;
op_time_t session_time, read_concern_time, reply_time;
bson_error_t error;
const bson_t *cmd;
size_t i;
/*
* first causal exchange: don't send readConcern, receive opTime
*/
test = session_test_new (CORRECT_CLIENT, CAUSAL);
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
test_fn (test);
check_success (test);
BSON_ASSERT (!bson_has_field (first_cmd (test), "readConcern"));
mongoc_client_session_get_operation_time (
test->cs, &session_time.t, &session_time.i);
BSON_ASSERT (session_time.t != 0);
parse_reply_time (last_reply (test), &reply_time);
ASSERT_OP_TIMES_EQUAL (session_time, reply_time);
/*
* second exchange: send previous opTime and receive an opTime.
* send readConcern if this function supports readConcern, like
* mongoc_collection_find_with_opts or mongoc_client_read_command_with_opts.
* don't send readConcern for generic command helpers like
* mongoc_client_command_with_opts or mongoc_client_command.
*/
clear_history (test);
test_fn (test);
check_success (test);
if (allow_read_concern) {
parse_read_concern_time (first_cmd (test), &read_concern_time);
ASSERT_OP_TIMES_EQUAL (reply_time, read_concern_time);
mongoc_client_session_get_operation_time (
test->cs, &session_time.t, &session_time.i);
BSON_ASSERT (session_time.t != 0);
parse_reply_time (last_reply (test), &reply_time);
ASSERT_OP_TIMES_EQUAL (session_time, reply_time);
} else {
/* readConcern prohibited */
for (i = 0; i < test->cmds.len; i++) {
cmd = _mongoc_array_index (&test->cmds, bson_t *, i);
if (bson_has_field (cmd, "readConcern")) {
fprintf (stderr,
"Command should not have included readConcern: %s\n",
bson_as_json (cmd, NULL));
abort ();
}
}
}
session_test_destroy (test);
}
static void
_run_session_test (session_test_fn_t test_fn, bool allow_read_concern)
{
_test_explicit_session_lsid (test_fn);
_test_session_from_wrong_client (test_fn);
_test_implicit_session_lsid (test_fn);
_test_causal_consistency (test_fn, allow_read_concern);
}
static void
run_session_test (void *ctx)
{
_run_session_test ((session_test_fn_t) ((TestFnCtx *) ctx)->test_fn, true);
}
/* test a command that doesn't allow readConcern, and therefore isn't causal */
static void
run_session_test_no_rc (void *ctx)
{
_run_session_test ((session_test_fn_t) ((TestFnCtx *) ctx)->test_fn, false);
}
/* skip _test_session_from_wrong_client, which would abort with bulk op */
static void
run_session_test_bulk_operation (void *ctx)
{
session_test_fn_t test_fn = (session_test_fn_t) ((TestFnCtx *) ctx)->test_fn;
_test_explicit_session_lsid (test_fn);
_test_implicit_session_lsid (test_fn);
_test_causal_consistency (test_fn, false /* read concern */);
}
static void
run_count_test (void *ctx)
{
/* CDRIVER-3612: mongoc_collection_estimated_document_count does not support
* explicit sessions */
_test_implicit_session_lsid (
(session_test_fn_t) ((TestFnCtx *) ctx)->test_fn);
}
static void
insert_10_docs (session_test_t *test)
{
mongoc_bulk_operation_t *bulk;
bson_error_t error;
int i;
bool r;
/* disable callbacks, we're not testing insert's lsid */
mongoc_client_set_apm_callbacks (test->session_client, NULL, NULL);
bulk = mongoc_collection_create_bulk_operation_with_opts (
test->session_collection, NULL);
for (i = 0; i < 10; i++) {
mongoc_bulk_operation_insert (bulk, tmp_bson ("{}"));
}
r = (bool) mongoc_bulk_operation_execute (bulk, NULL, &error);
ASSERT_OR_PRINT (r, error);
mongoc_bulk_operation_destroy (bulk);
set_session_test_callbacks (test);
}
static void
test_cmd (session_test_t *test)
{
test->succeeded =
mongoc_client_command_with_opts (test->client,
"db",
tmp_bson ("{'listCollections': 1}"),
NULL,
&test->opts,
NULL,
&test->error);
}
static void
test_read_cmd (session_test_t *test)
{
test->succeeded =
mongoc_client_read_command_with_opts (test->client,
"db",
tmp_bson ("{'listCollections': 1}"),
NULL,
&test->opts,
NULL,
&test->error);
}
static void
test_write_cmd (session_test_t *test)
{
bson_t *cmd =
tmp_bson ("{'delete': 'collection', 'deletes': [{'q': {}, 'limit': 1}]}");
test->succeeded = mongoc_client_write_command_with_opts (
test->client, "db", cmd, &test->opts, NULL, &test->error);
}
static void
test_read_write_cmd (session_test_t *test)
{
bson_t *cmd = tmp_bson ("{"
" 'aggregate': 'collection',"
" 'cursor': {},"
" 'pipeline': [{'$out': 'collection2'}]"
"}");
test->succeeded = mongoc_client_read_write_command_with_opts (
test->client, "db", cmd, NULL, &test->opts, NULL, &test->error);
}
static void
test_db_cmd (session_test_t *test)
{
test->succeeded =
mongoc_database_command_with_opts (test->db,
tmp_bson ("{'listCollections': 1}"),
NULL,
&test->opts,
NULL,
&test->error);
}
static void
test_count (session_test_t *test)
{
test->succeeded =
(-1 != mongoc_collection_count_with_opts (test->collection,
MONGOC_QUERY_NONE,
NULL,
0,
0,
&test->opts,
NULL,
&test->error));
}
static void
test_cursor (session_test_t *test)
{
mongoc_cursor_t *cursor;
const bson_t *doc;
/* ensure multiple batches */
insert_10_docs (test);
cursor = mongoc_collection_find_with_opts (
test->collection, tmp_bson ("{}"), &test->opts, NULL);
mongoc_cursor_set_batch_size (cursor, 2);
while (mongoc_cursor_next (cursor, &doc)) {
}
test->succeeded = !mongoc_cursor_error (cursor, &test->error);
mongoc_cursor_destroy (cursor);
}
static void
test_drop (session_test_t *test)
{
/* create the collection so that "drop" can succeed */
insert_10_docs (test);
test->succeeded = mongoc_collection_drop_with_opts (
test->collection, &test->opts, &test->error);
}
static void
test_drop_index (session_test_t *test)
{
bson_error_t error;
bool r;
/* create the index so that "dropIndexes" can succeed */
r = mongoc_database_write_command_with_opts (
test->session_db,
tmp_bson ("{'createIndexes': '%s',"
" 'indexes': [{'key': {'a': 1}, 'name': 'foo'}]}",
test->session_collection->collection),
&test->opts,
NULL,
&error);
ASSERT_OR_PRINT (r, error);
test->succeeded = mongoc_collection_drop_index_with_opts (
test->collection, "foo", &test->opts, &test->error);
}
static void
test_create_index (session_test_t *test)
{
BEGIN_IGNORE_DEPRECATIONS
test->succeeded =
mongoc_collection_create_index_with_opts (test->collection,
tmp_bson ("{'a': 1}"),
NULL,
&test->opts,
NULL,
&test->error);
END_IGNORE_DEPRECATIONS
}
static void
test_replace_one (session_test_t *test)
{
test->succeeded = mongoc_collection_replace_one (test->collection,
tmp_bson ("{}"),
tmp_bson ("{}"),
&test->opts,
NULL,
&test->error);
}
static void
test_update_one (session_test_t *test)
{
test->succeeded =
mongoc_collection_update_one (test->collection,
tmp_bson ("{}"),
tmp_bson ("{'$set': {'x': 1}}"),
&test->opts,
NULL,
&test->error);
}
static void
test_update_many (session_test_t *test)
{
test->succeeded =
mongoc_collection_update_many (test->collection,
tmp_bson ("{}"),
tmp_bson ("{'$set': {'x': 1}}"),
&test->opts,
NULL,
&test->error);
}
static void
test_insert_one (session_test_t *test)
{
test->succeeded = mongoc_collection_insert_one (
test->collection, tmp_bson ("{}"), &test->opts, NULL, &test->error);
}
static void
test_insert_many (session_test_t *test)
{
bson_t *docs[2] = {tmp_bson ("{}"), tmp_bson ("{}")};
test->succeeded = mongoc_collection_insert_many (test->collection,
(const bson_t **) docs,
2,
&test->opts,
NULL,
&test->error);
}
static void
test_delete_one (session_test_t *test)
{
test->succeeded = mongoc_collection_delete_one (
test->collection, tmp_bson ("{}"), &test->opts, NULL, &test->error);
}
static void
test_delete_many (session_test_t *test)
{
test->succeeded = mongoc_collection_delete_many (
test->collection, tmp_bson ("{}"), &test->opts, NULL, &test->error);
}
static void
test_rename (session_test_t *test)
{
mongoc_collection_t *collection;
/* ensure "rename" can succeed */
insert_10_docs (test);
/* mongoc_collection_rename_with_opts mutates the struct! */
collection = mongoc_collection_copy (test->collection);
test->succeeded = mongoc_collection_rename_with_opts (
collection, "db", "newname", true, &test->opts, &test->error);
mongoc_collection_destroy (collection);
}
static void
test_fam (session_test_t *test)
{
mongoc_find_and_modify_opts_t *fam_opts;
fam_opts = mongoc_find_and_modify_opts_new ();
mongoc_find_and_modify_opts_set_update (fam_opts,
tmp_bson ("{'$set': {'x': 1}}"));
BSON_ASSERT (mongoc_find_and_modify_opts_append (fam_opts, &test->opts));
test->succeeded = mongoc_collection_find_and_modify_with_opts (
test->collection, tmp_bson ("{}"), fam_opts, NULL, &test->error);
mongoc_find_and_modify_opts_destroy (fam_opts);
}
static void
test_db_drop (session_test_t *test)
{
test->succeeded =
mongoc_database_drop_with_opts (test->db, &test->opts, &test->error);
}
static void
test_gridfs_find (session_test_t *test)
{
mongoc_gridfs_t *gfs;
bson_error_t error;
mongoc_gridfs_file_list_t *list;
mongoc_gridfs_file_t *f;
/* work around lack of mongoc_client_get_gridfs_with_opts for now, can't yet
* include lsid with the GridFS createIndexes command */
mongoc_client_set_apm_callbacks (test->client, NULL, NULL);
gfs = mongoc_client_get_gridfs (test->client, "test", NULL, &error);
ASSERT_OR_PRINT (gfs, error);
set_session_test_callbacks (test);
list = mongoc_gridfs_find_with_opts (gfs, tmp_bson ("{}"), &test->opts);
f = mongoc_gridfs_file_list_next (list);
test->succeeded = !mongoc_gridfs_file_list_error (list, &test->error);
if (f) {
mongoc_gridfs_file_destroy (f);
}
mongoc_gridfs_file_list_destroy (list);
mongoc_gridfs_destroy (gfs);
}
static void
test_gridfs_find_one (session_test_t *test)
{
mongoc_gridfs_t *gfs;
bson_error_t error;
mongoc_gridfs_file_t *f;
/* work around lack of mongoc_client_get_gridfs_with_opts for now, can't yet
* include lsid with the GridFS createIndexes command */
mongoc_client_set_apm_callbacks (test->client, NULL, NULL);
gfs = mongoc_client_get_gridfs (test->client, "test", NULL, &error);
ASSERT_OR_PRINT (gfs, error);
set_session_test_callbacks (test);
f = mongoc_gridfs_find_one_with_opts (
gfs, tmp_bson ("{}"), &test->opts, &test->error);
test->succeeded = test->error.domain == 0;
if (f) {
mongoc_gridfs_file_destroy (f);
}
mongoc_gridfs_destroy (gfs);
}
static void
test_watch (session_test_t *test)
{
mongoc_change_stream_t *change_stream;
insert_10_docs (test);
change_stream =
mongoc_collection_watch (test->collection, tmp_bson ("{}"), &test->opts);
test->succeeded =
!mongoc_change_stream_error_document (change_stream, &test->error, NULL);
mongoc_change_stream_destroy (change_stream);
}
static void
test_aggregate (session_test_t *test)
{
bson_t opts;
mongoc_cursor_t *cursor;
const bson_t *doc;
/* ensure multiple batches */
insert_10_docs (test);
bson_copy_to (&test->opts, &opts);
BSON_APPEND_INT32 (&opts, "batchSize", 2);
cursor = mongoc_collection_aggregate (
test->collection, MONGOC_QUERY_NONE, tmp_bson ("{}"), &opts, NULL);
while (mongoc_cursor_next (cursor, &doc)) {
}
test->succeeded = !mongoc_cursor_error (cursor, &test->error);
mongoc_cursor_destroy (cursor);
bson_destroy (&opts);
}
static void
test_create (session_test_t *test)
{
mongoc_collection_t *collection;
/* ensure "create" can succeed */
mongoc_database_write_command_with_opts (test->session_db,
tmp_bson ("{'drop': 'newname'}"),
&test->opts,
NULL,
NULL);
collection = mongoc_database_create_collection (
test->db, "newname", &test->opts, &test->error);
test->succeeded = (collection != NULL);
if (collection) {
mongoc_collection_destroy (collection);
}
}
static void
test_database_names (session_test_t *test)
{
char **names;
names = mongoc_client_get_database_names_with_opts (
test->client, &test->opts, &test->error);
test->succeeded = (names != NULL);
if (names) {
bson_strfreev (names);
}
}
static void
test_find_databases (session_test_t *test)
{
mongoc_cursor_t *cursor;
const bson_t *doc;
cursor = mongoc_client_find_databases_with_opts (test->client, &test->opts);
while (mongoc_cursor_next (cursor, &doc)) {
}
test->succeeded = !mongoc_cursor_error (cursor, &test->error);
mongoc_cursor_destroy (cursor);
}
static void
test_find_collections (session_test_t *test)
{
mongoc_cursor_t *cursor;
const bson_t *doc;
cursor = mongoc_database_find_collections_with_opts (test->db, &test->opts);
while (mongoc_cursor_next (cursor, &doc)) {
}
test->succeeded = !mongoc_cursor_error (cursor, &test->error);
mongoc_cursor_destroy (cursor);
}
static void
test_collection_names (session_test_t *test)
{
char **strv;
strv = mongoc_database_get_collection_names_with_opts (
test->db, &test->opts, &test->error);
test->succeeded = (strv != NULL);
bson_strfreev (strv);
}
static void
test_find_indexes (session_test_t *test)
{
mongoc_cursor_t *cursor;
const bson_t *doc;
/* ensure the collection exists so the listIndexes command succeeds */
insert_10_docs (test);
cursor =
mongoc_collection_find_indexes_with_opts (test->collection, &test->opts);
while (mongoc_cursor_next (cursor, &doc)) {
}
test->succeeded = !mongoc_cursor_error (cursor, &test->error);
mongoc_cursor_destroy (cursor);
}
static void
_test_bulk (session_test_t *test, mongoc_bulk_operation_t *bulk)
{
uint32_t i;
test->succeeded = mongoc_bulk_operation_insert_with_opts (
bulk, tmp_bson ("{}"), NULL, &test->error);
check_sessions_from_same_client_enforced (test);
test->succeeded = mongoc_bulk_operation_update_one_with_opts (
bulk,
tmp_bson ("{}"),
tmp_bson ("{'$set': {'x': 1}}"),
NULL,
&test->error);
check_sessions_from_same_client_enforced (test);
test->succeeded = mongoc_bulk_operation_remove_one_with_opts (
bulk, tmp_bson ("{}"), NULL, &test->error);
check_sessions_from_same_client_enforced (test);
i = mongoc_bulk_operation_execute (bulk, NULL, &test->error);
test->succeeded = (i != 0);
check_sessions_with_w0_prohibited (test);
mongoc_bulk_operation_destroy (bulk);
}
/* test the standard mongoc_collection_create_bulk_operation_with_opts */
static void
test_bulk (session_test_t *test)
{
mongoc_bulk_operation_t *bulk;
bulk = mongoc_collection_create_bulk_operation_with_opts (test->collection,
&test->opts);
_test_bulk (test, bulk);
}
/* instead of the standard mongoc_collection_create_bulk_operation_with_opts,
* test a quirky way of setting the client session on an existing bulk */
static void
test_bulk_set_session (session_test_t *test)
{
mongoc_bulk_operation_t *bulk;
bson_iter_t iter;
mongoc_client_session_t *cs;
bson_error_t error;
bool r;
bulk = mongoc_bulk_operation_new (true /* ordered */);
mongoc_bulk_operation_set_client (bulk, test->client);
mongoc_bulk_operation_set_database (bulk,
mongoc_database_get_name (test->db));
mongoc_bulk_operation_set_collection (
bulk, mongoc_collection_get_name (test->collection));
if (bson_iter_init_find (&iter, &test->opts, "sessionId")) {
r = _mongoc_client_session_from_iter (
test->session_client, &iter, &cs, &error);
ASSERT_OR_PRINT (r, error);
mongoc_bulk_operation_set_client_session (bulk, cs);
}
_test_bulk (test, bulk);
}
/* like test_bulk_set_session, but set session first, then client */
static void
test_bulk_set_client (session_test_t *test)
{
mongoc_bulk_operation_t *bulk;
bson_iter_t iter;
mongoc_client_session_t *cs;
bson_error_t error;
bool r;
bulk = mongoc_bulk_operation_new (true /* ordered */);
if (bson_iter_init_find (&iter, &test->opts, "sessionId")) {
r = _mongoc_client_session_from_iter (
test->session_client, &iter, &cs, &error);
ASSERT_OR_PRINT (r, error);
mongoc_bulk_operation_set_client_session (bulk, cs);
}
mongoc_bulk_operation_set_client (bulk, test->client);
mongoc_bulk_operation_set_database (bulk,
mongoc_database_get_name (test->db));
mongoc_bulk_operation_set_collection (
bulk, mongoc_collection_get_name (test->collection));
_test_bulk (test, bulk);
}
static void
test_cursor_implicit_session (void *ctx)
{
session_test_t *test;
mongoc_topology_t *topology;
mongoc_cursor_t *cursor;
const bson_t *doc;
mongoc_client_session_t *cs;
bson_t find_lsid;
bson_error_t error;
mongoc_server_session_t *ss;
test = session_test_new (CORRECT_CLIENT, NOT_CAUSAL);
test->expect_explicit_lsid = false;
topology = test->client->topology;
cs = mongoc_client_start_session (test->client, NULL, &error);
ASSERT_OR_PRINT (cs, error);
mongoc_collection_drop_with_opts (test->session_collection, NULL, NULL);
insert_10_docs (test);
cursor = mongoc_collection_find_with_opts (
test->collection, tmp_bson ("{}"), &test->opts, NULL);
BSON_ASSERT (!cursor->client_session);
mongoc_cursor_set_batch_size (cursor, 2);
/* start the cursor. it makes an implicit session & sends it with "find" */
BSON_ASSERT (mongoc_cursor_next (cursor, &doc));
BSON_ASSERT (cursor->client_session);
BSON_ASSERT (!cursor->explicit_session);
bson_copy_to (&cursor->client_session->server_session->lsid, &find_lsid);
ASSERT_CMPSIZE_T (
mongoc_server_session_pool_size (topology->session_pool), ==, 0);
ASSERT_SESSIONS_MATCH (&test->sent_lsid, &find_lsid);
/* push a new server session into the pool. server session is only pushed
* if it is used. therefore mark session as used prior to
* destroying session by sending a ping */
bson_reinit (&test->sent_lsid);
send_ping (test->client, cs);
mongoc_client_session_destroy (cs);
BSON_ASSERT (mongoc_server_session_pool_size (topology->session_pool) == 1);
ss = mongoc_server_session_pool_get_existing (topology->session_pool);
BSON_ASSERT (ss);
ASSERT_SESSIONS_DIFFER (&find_lsid, &ss->lsid);
mongoc_server_session_pool_return (ss);
/* "getMore" uses the same lsid as "find" did */
bson_reinit (&test->sent_lsid);
ASSERT_CURSOR_COUNT (9, cursor);
ASSERT_SESSIONS_MATCH (&test->sent_lsid, &find_lsid);
ASSERT_OR_PRINT (!mongoc_cursor_error (cursor, &error), error);
/* lsid returned after last batch, doesn't wait for mongoc_cursor_destroy */
check_session_returned (test, &find_lsid);
ASSERT_CMPSIZE_T (
mongoc_server_session_pool_size (topology->session_pool), ==, 2);
bson_destroy (&find_lsid);
mongoc_cursor_destroy (cursor);
session_test_destroy (test);
}
static void
test_change_stream_implicit_session (void *ctx)
{
session_test_t *test;
mongoc_topology_t *topology;
mongoc_client_session_t *cs;
bson_error_t error;
mongoc_change_stream_t *change_stream;
bson_t pipeline = BSON_INITIALIZER;
const bson_t *doc;
bson_t aggregate_lsid;
mongoc_server_session_t *ss;
test = session_test_new (CORRECT_CLIENT, NOT_CAUSAL);
test->expect_explicit_lsid = false;
topology = test->client->topology;
cs = mongoc_client_start_session (test->client, NULL, &error);
ASSERT_OR_PRINT (cs, error);
change_stream =
mongoc_collection_watch (test->session_collection, &pipeline, NULL);
bson_destroy (&pipeline);
bson_copy_to (&test->sent_lsid, &aggregate_lsid);
ASSERT_CMPSIZE_T (
mongoc_server_session_pool_size (topology->session_pool), ==, 0);
BSON_ASSERT (change_stream->implicit_session);
/* push a new server session into the pool. server session is only pushed
* if it is used. therefore mark session as used prior to
* destroying session by sending a ping */
bson_reinit (&test->sent_lsid);
send_ping (test->client, cs);
mongoc_client_session_destroy (cs);
ASSERT_CMPSIZE_T (
mongoc_server_session_pool_size (topology->session_pool), ==, 1);
ss = mongoc_server_session_pool_get_existing (topology->session_pool);
BSON_ASSERT (ss);
ASSERT_SESSIONS_DIFFER (&aggregate_lsid, &ss->lsid);
mongoc_server_session_pool_return (ss);
/* "getMore" uses the same lsid as "aggregate" did */
bson_reinit (&test->sent_lsid);
mongoc_change_stream_next (change_stream, &doc);
ASSERT_SESSIONS_MATCH (
&test->sent_lsid, &change_stream->implicit_session->server_session->lsid);
ASSERT_SESSIONS_MATCH (
&test->sent_lsid,
&change_stream->cursor->client_session->server_session->lsid);
ASSERT_SESSIONS_MATCH (&test->sent_lsid, &aggregate_lsid);
ASSERT_OR_PRINT (
!mongoc_change_stream_error_document (change_stream, &error, NULL),
error);
bson_destroy (&aggregate_lsid);
mongoc_change_stream_destroy (change_stream);
session_test_destroy (test);
}
static void
test_cmd_error (void *ctx)
{
session_test_t *test;
bson_error_t error;
test = session_test_new (CORRECT_CLIENT, CAUSAL);
/*
* explicit session. command error still updates operation time
*/
test->expect_explicit_lsid = true;
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
BSON_ASSERT (test->cs->operation_timestamp == 0);
BSON_ASSERT (!mongoc_client_command_with_opts (test->session_client,
"db",
tmp_bson ("{'bad': 1}"),
NULL,
&test->opts,
NULL,
NULL));
BSON_ASSERT (test->cs->operation_timestamp != 0);
session_test_destroy (test);
}
static void
test_read_concern (void *ctx)
{
session_test_t *test;
mongoc_read_concern_t *rc;
mongoc_session_opt_t *cs_opts;
bson_error_t error;
test = session_test_new (CORRECT_CLIENT, CAUSAL);
test->expect_explicit_lsid = true;
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
/* first exchange sets session's operationTime */
test_read_cmd (test);
check_success (test);
BSON_ASSERT (!bson_has_field (last_non_getmore_cmd (test), "readConcern"));
/*
* default: no explicit read concern, driver sends afterClusterTime
*/
test_read_cmd (test);
check_success (test);
ASSERT_MATCH (last_non_getmore_cmd (test),
"{"
" 'readConcern': {"
" 'level': {'$exists': false},"
" 'afterClusterTime': {'$exists': true}"
" }"
"}");
/*
* explicit read concern
*/
rc = mongoc_read_concern_new ();
mongoc_read_concern_set_level (rc, MONGOC_READ_CONCERN_LEVEL_LOCAL);
BSON_ASSERT (mongoc_read_concern_append (rc, &test->opts));
test_read_cmd (test);
check_success (test);
ASSERT_MATCH (last_non_getmore_cmd (test),
"{"
" 'readConcern': {"
" 'level': 'local',"
" 'afterClusterTime': {'$exists': true}"
" }"
"}");
/*
* explicit read concern, not causal
*/
cs_opts = mongoc_session_opts_new ();
mongoc_session_opts_set_causal_consistency (cs_opts, false);
mongoc_client_session_destroy (test->cs);
test->cs = mongoc_client_start_session (test->client, cs_opts, &error);
ASSERT_OR_PRINT (test->cs, error);
bson_reinit (&test->opts);
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
BSON_ASSERT (mongoc_read_concern_append (rc, &test->opts));
/* set new session's operationTime */
test_read_cmd (test);
check_success (test);
ASSERT_CMPUINT32 (test->cs->operation_timestamp, >, (uint32_t) 0);
/* afterClusterTime is not sent */
test_read_cmd (test);
check_success (test);
ASSERT_MATCH (last_non_getmore_cmd (test),
"{"
" 'readConcern': {"
" 'level': 'local',"
" 'afterClusterTime': {'$exists': false}"
" }"
"}");
/*
* no read concern, not causal
*/
bson_reinit (&test->opts);
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
/* afterClusterTime is not sent */
test_read_cmd (test);
check_success (test);
ASSERT_MATCH (last_non_getmore_cmd (test),
"{'readConcern': {'$exists': false}}");
mongoc_session_opts_destroy (cs_opts);
mongoc_read_concern_destroy (rc);
session_test_destroy (test);
}
static void
_test_unacknowledged (session_test_fn_t test_fn,
bool explicit_cs,
bool inherit_wc)
{
session_test_t *test;
mongoc_write_concern_t *wc;
bson_error_t error;
/* The following tests assert that unacknowledged command does not set the
* operationTime. Additionally, the "started" APM callback asserts that the
* command does not include an lsid. */
test = session_test_new (CORRECT_CLIENT, CAUSAL);
test->expect_explicit_lsid = explicit_cs;
test->acknowledged = false;
wc = mongoc_write_concern_new ();
mongoc_write_concern_set_w (wc, 0);
if (explicit_cs) {
ASSERT_OR_PRINT (
mongoc_client_session_append (test->cs, &test->opts, &error), error);
}
if (inherit_wc) {
mongoc_client_set_write_concern (test->client, wc);
mongoc_database_set_write_concern (test->db, wc);
mongoc_collection_set_write_concern (test->collection, wc);
} else {
BSON_ASSERT (mongoc_write_concern_append_bad (wc, &test->opts));
}
test_fn (test);
check_success (test);
if (test->succeeded) {
ASSERT_MATCH (last_non_getmore_cmd (test), "{'writeConcern': {'w': 0}}");
ASSERT_CMPUINT32 (test->cs->operation_timestamp, ==, (uint32_t) 0);
}
mongoc_write_concern_destroy (wc);
session_test_destroy (test);
}
static void
test_unacknowledged_explicit_cs_inherit_wc (void *ctx)
{
_test_unacknowledged (
(session_test_fn_t) ((TestFnCtx *) ctx)->test_fn, true, true);
}
static void
test_unacknowledged_implicit_cs_explicit_wc (void *ctx)
{
_test_unacknowledged (
(session_test_fn_t) ((TestFnCtx *) ctx)->test_fn, true, false);
}
static void
test_unacknowledged_implicit_cs_inherit_wc (void *ctx)
{
_test_unacknowledged (
(session_test_fn_t) ((TestFnCtx *) ctx)->test_fn, false, true);
}
static void
test_unacknowledged_explicit_cs_explicit_wc (void *ctx)
{
_test_unacknowledged (
(session_test_fn_t) ((TestFnCtx *) ctx)->test_fn, false, false);
}
#define add_session_test(_suite, _name, _test_fn, _allow_read_concern) \
TestSuite_AddFullWithTestFn ( \
_suite, \
_name, \
(_allow_read_concern) ? run_session_test : run_session_test_no_rc, \
NULL, \
_test_fn, \
test_framework_skip_if_no_cluster_time, \
test_framework_skip_if_no_crypto)
#define add_session_test_wc(_suite, _name, _test_fn, _allow_read_concern, ...) \
TestSuite_AddFullWithTestFn ( \
_suite, \
_name, \
(_allow_read_concern) ? run_session_test : run_session_test_no_rc, \
NULL, \
_test_fn, \
test_framework_skip_if_no_cluster_time, \
test_framework_skip_if_no_crypto, \
__VA_ARGS__)
#define add_unacknowledged_test( \
_suite, _name, _test_fn, _explicit_cs, _inherit_wc) \
TestSuite_AddFullWithTestFn ( \
_suite, \
_name, \
(_explicit_cs) \
? (_inherit_wc ? test_unacknowledged_explicit_cs_inherit_wc \
: test_unacknowledged_implicit_cs_explicit_wc) \
: (_inherit_wc ? test_unacknowledged_implicit_cs_inherit_wc \
: test_unacknowledged_explicit_cs_explicit_wc), \
NULL, \
_test_fn, \
test_framework_skip_if_no_cluster_time, \
test_framework_skip_if_no_crypto)
static bool
_test_run_operation (json_test_ctx_t *ctx,
const bson_t *test,
const bson_t *operation)
{
bson_t reply;
mongoc_client_session_t *session = NULL;
/* Look up the session to use by name. Really, json_test_operation should
* probably handle this. Let's wait until unified test runner is spec'ed. */
if (bson_has_field (operation, "arguments.session")) {
session = session_from_name (
ctx, bson_lookup_utf8 (operation, "arguments.session"));
}
json_test_operation (ctx, test, operation, ctx->collection, session, &reply);
bson_destroy (&reply);
return true;
}
static void
test_sessions_spec_cb (bson_t *scenario)
{
json_test_config_t config = JSON_TEST_CONFIG_INIT;
config.run_operation_cb = _test_run_operation;
config.scenario = scenario;
config.command_started_events_only = true;
run_json_general_test (&config);
}
/* Test that a session is made dirty after a network error, and that it is not
* added back to the session pool. */
static void
_test_session_dirty_helper (bool retry_succeeds)
{
mongoc_client_t *client;
mongoc_collection_t *coll;
mongoc_client_session_t *session;
bson_t opts;
bool ret;
bson_error_t error;
bson_t *failpoint_cmd;
int pooled_session_count_pre;
int pooled_session_count_post;
int fail_count;
mongoc_uri_t *uri;
uri = test_framework_get_uri ();
mongoc_uri_set_option_as_bool (uri, MONGOC_URI_RETRYWRITES, true);
client = test_framework_client_new_from_uri (uri, NULL);
test_framework_set_ssl_opts (client);
session = mongoc_client_start_session (client, NULL /* opts */, &error);
ASSERT_OR_PRINT (session, error);
coll = mongoc_client_get_collection (client, "test", "test");
bson_init (&opts);
ret = mongoc_client_session_append (session, &opts, &error);
ASSERT_OR_PRINT (ret, error);
ret = mongoc_collection_insert_one (
coll, tmp_bson ("{}"), &opts, NULL /* reply */, &error);
ASSERT_OR_PRINT (ret, error);
BSON_ASSERT (!session->server_session->dirty);
if (retry_succeeds) {
/* Only fail once, so retried insert succeeds. */
fail_count = 1;
} else {
/* Fail twice, so retried insert fails as well. */
fail_count = 2;
}
/* Enable failpoint. */
failpoint_cmd = BCON_NEW ("configureFailPoint",
"failCommand",
"mode",
"{",
"times",
BCON_INT32 (fail_count),
"}",
"data",
"{",
"failCommands",
"[",
"insert",
"]",
"closeConnection",
BCON_BOOL (true),
"}");
ret = mongoc_client_command_simple (client,
"admin",
failpoint_cmd,
NULL /* read prefs */,
NULL /* reply */,
&error);
ASSERT_OR_PRINT (ret, error);
ret = mongoc_collection_insert_one (
coll, tmp_bson ("{}"), &opts, NULL /* reply */, &error);
if (retry_succeeds) {
ASSERT_OR_PRINT (ret, error);
} else {
BSON_ASSERT (!ret);
ASSERT_ERROR_CONTAINS (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"socket error");
}
/* Regardless of whether the retry succeeded, the session should be marked
* dirty */
BSON_ASSERT (session->server_session->dirty);
pooled_session_count_pre =
mongoc_server_session_pool_size (client->topology->session_pool);
mongoc_client_session_destroy (session);
pooled_session_count_post =
mongoc_server_session_pool_size (client->topology->session_pool);
/* Check that destroying in the session did not add it back to the pool. */
ASSERT_CMPINT (pooled_session_count_pre, ==, pooled_session_count_post);
mongoc_client_command_simple (
client,
"admin",
tmp_bson ("{'configureFailPoint': 'failCommand', 'mode': 'off'}"),
NULL /* read prefs */,
NULL /* reply */,
&error);
bson_destroy (&opts);
bson_destroy (failpoint_cmd);
mongoc_collection_destroy (coll);
mongoc_client_destroy (client);
mongoc_uri_destroy (uri);
}
static void
test_session_dirty (void *unused)
{
_test_session_dirty_helper (true /* retry succceeds */);
_test_session_dirty_helper (false /* retry succceeds */);
}
void
test_sessions_snapshot_prose_test_1 (void *ctx)
{
mongoc_client_t *client = NULL;
mongoc_session_opt_t *session_opts = NULL;
bson_error_t error;
bool r;
client = test_framework_new_default_client ();
BSON_ASSERT (client);
session_opts = mongoc_session_opts_new ();
mongoc_session_opts_set_causal_consistency (session_opts, true);
mongoc_session_opts_set_snapshot (session_opts, true);
/* assert that starting session with causal consistency and snapshot enabled
* results in an error. */
r = mongoc_client_start_session (client, session_opts, &error);
ASSERT (!r);
ASSERT_ERROR_CONTAINS (
error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_SESSION_FAILURE,
"Only one of causal consistency and snapshot can be enabled.");
mongoc_session_opts_destroy (session_opts);
mongoc_client_destroy (client);
}
void
test_session_install (TestSuite *suite)
{
char resolved[PATH_MAX];
TestSuite_Add (suite, "/Session/opts/clone", test_session_opts_clone);
TestSuite_Add (suite,
"/Session/opts/causal_consistency_and_snapshot",
test_session_opts_causal_consistency_and_snapshot);
TestSuite_AddFull (suite,
"/Session/no_crypto",
test_session_no_crypto,
NULL,
NULL,
TestSuite_CheckLive,
test_framework_skip_if_no_sessions,
test_framework_skip_if_crypto);
TestSuite_AddFull (suite,
"/Session/lifo/single",
test_session_pool_lifo_single,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/lifo/pooled",
test_session_pool_lifo_pooled,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/timeout/single",
test_session_pool_timeout_single,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto,
test_framework_skip_if_slow);
TestSuite_AddFull (suite,
"/Session/timeout/pooled",
test_session_pool_timeout_pooled,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto,
test_framework_skip_if_slow);
TestSuite_AddFull (suite,
"/Session/reap/single",
test_session_pool_reap_single,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto,
test_framework_skip_if_slow);
TestSuite_AddFull (suite,
"/Session/reap/pooled",
test_session_pool_reap_pooled,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto,
test_framework_skip_if_slow);
TestSuite_AddFull (suite,
"/Session/id_bad",
test_session_id_bad,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/supported/single",
test_session_supported_single,
NULL,
NULL,
TestSuite_CheckLive,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/supported/pooled",
test_session_supported_pooled,
NULL,
NULL,
TestSuite_CheckLive,
test_framework_skip_if_no_crypto);
TestSuite_AddMockServerTest (suite,
"/Session/end/mock/single",
test_mock_end_sessions_single,
test_framework_skip_if_no_crypto);
TestSuite_AddMockServerTest (suite,
"/Session/end/mock/pooled",
test_mock_end_sessions_pooled,
test_framework_skip_if_no_crypto);
TestSuite_AddMockServerTest (suite,
"/Session/end/mock/disconnected",
test_mock_end_sessions_server_disconnect,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/end/single",
test_end_sessions_single,
NULL,
NULL,
test_framework_skip_if_no_crypto,
test_framework_skip_if_max_wire_version_less_than_6);
TestSuite_AddFull (suite,
"/Session/end/pooled",
test_end_sessions_pooled,
NULL,
NULL,
test_framework_skip_if_no_crypto,
test_framework_skip_if_max_wire_version_less_than_6);
TestSuite_AddFull (suite,
"/Session/end/many/single",
test_end_sessions_many_single,
NULL,
NULL,
test_framework_skip_if_no_crypto,
test_framework_skip_if_max_wire_version_less_than_6,
test_framework_skip_if_slow);
TestSuite_AddFull (suite,
"/Session/end/many/pooled",
test_end_sessions_many_pooled,
NULL,
NULL,
test_framework_skip_if_no_crypto,
test_framework_skip_if_max_wire_version_less_than_6,
test_framework_skip_if_slow);
TestSuite_AddFull (suite,
"/Session/advance_cluster_time",
test_session_advance_cluster_time,
NULL,
NULL,
test_framework_skip_if_no_crypto,
test_framework_skip_if_no_sessions);
TestSuite_AddFull (suite,
"/Session/advance_operation_time",
test_session_advance_operation_time,
NULL,
NULL,
test_framework_skip_if_no_crypto,
test_framework_skip_if_no_sessions);
/* "true" is for tests that expect readConcern: afterClusterTime for causally
* consistent sessions, "false" is for tests that prohibit readConcern */
add_session_test (suite, "/Session/cmd", test_cmd, false);
add_session_test (suite, "/Session/read_cmd", test_read_cmd, true);
add_session_test (suite, "/Session/write_cmd", test_write_cmd, false);
add_session_test (
suite, "/Session/read_write_cmd", test_read_write_cmd, true);
add_session_test (suite, "/Session/db_cmd", test_db_cmd, false);
TestSuite_AddFullWithTestFn (suite,
"/Session/count",
(TestFuncWC) run_count_test,
NULL,
test_count,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
add_session_test (suite, "/Session/cursor", test_cursor, true);
add_session_test (suite, "/Session/drop", test_drop, false);
add_session_test (suite, "/Session/drop_index", test_drop_index, false);
add_session_test (suite, "/Session/create_index", test_create_index, false);
add_session_test (suite, "/Session/replace_one", test_replace_one, false);
add_session_test (suite, "/Session/update_one", test_update_one, false);
add_session_test (suite, "/Session/update_many", test_update_many, false);
add_session_test (suite, "/Session/insert_one", test_insert_one, false);
add_session_test (suite, "/Session/insert_many", test_insert_many, false);
add_session_test (suite, "/Session/delete_one", test_delete_one, false);
add_session_test (suite, "/Session/delete_many", test_delete_many, false);
add_session_test (suite, "/Session/rename", test_rename, false);
add_session_test (suite, "/Session/fam", test_fam, true);
add_session_test (suite, "/Session/db_drop", test_db_drop, false);
add_session_test (suite, "/Session/gridfs_find", test_gridfs_find, true);
add_session_test (
suite, "/Session/gridfs_find_one", test_gridfs_find_one, true);
add_session_test_wc (suite,
"/Session/watch",
test_watch,
true,
test_framework_skip_if_not_rs_version_6);
add_session_test (suite, "/Session/aggregate", test_aggregate, true);
add_session_test (suite, "/Session/create", test_create, false);
add_session_test (
suite, "/Session/database_names", test_database_names, true);
add_session_test (
suite, "/Session/find_databases", test_find_databases, true);
add_session_test (
suite, "/Session/find_collections", test_find_collections, true);
add_session_test (
suite, "/Session/collection_names", test_collection_names, true);
add_session_test (suite, "/Session/bulk", test_bulk, false);
add_session_test (suite, "/Session/find_indexes", test_find_indexes, true);
TestSuite_AddFullWithTestFn (suite,
"/Session/bulk_set_session",
run_session_test_bulk_operation,
NULL,
test_bulk_set_session,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
TestSuite_AddFullWithTestFn (suite,
"/Session/bulk_set_client",
run_session_test_bulk_operation,
NULL,
test_bulk_set_client,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/cursor_implicit_session",
test_cursor_implicit_session,
NULL,
NULL,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/change_stream_implicit_session",
test_change_stream_implicit_session,
NULL,
NULL,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/cmd_error",
test_cmd_error,
NULL,
NULL,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
TestSuite_AddFull (suite,
"/Session/read_concern",
test_read_concern,
NULL,
NULL,
test_framework_skip_if_no_cluster_time,
test_framework_skip_if_no_crypto);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/insert_one/explicit_cs/inherit_wc",
test_insert_one,
true,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/insert_one/explicit_cs/explicit_wc",
test_insert_one,
true,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/insert_one/implicit_cs/inherit_wc",
test_insert_one,
false,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/insert_one/implicit_cs/explicit_wc",
test_insert_one,
false,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/bulk/explicit_cs/inherit_wc",
test_bulk,
true,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/bulk/explicit_cs/explicit_wc",
test_bulk,
true,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/bulk/implicit_cs/inherit_wc",
test_bulk,
false,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/bulk/implicit_cs/explicit_wc",
test_bulk,
false,
false);
/* find_and_modify_with_opts only inherits acknowledged write concerns, so
* skip tests that inherit a write concern. Technically, an explicit
* unacknowledged write concern doesn't make much sense with findAndModify,
* but this is testing the common code path for command execution. */
add_unacknowledged_test (
suite,
"/Session/unacknowledged/find_and_modify/explicit_cs/explicit_wc",
test_fam,
true,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/find_and_modify/implicit_cs/explicit_wc",
test_fam,
false,
false);
/* command_with_opts also does not inherit write concerns, but we still want
* to test the common code path for command execution. */
add_unacknowledged_test (
suite,
"/Session/unacknowledged/db_cmd/explicit_cs/explicit_wc",
test_db_cmd,
true,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/db_cmd/implicit_cs/explicit_wc",
test_db_cmd,
false,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/read_write_cmd/explicit_cs/inherit_wc",
test_read_write_cmd,
true,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/read_write_cmd/explicit_cs/explicit_wc",
test_read_write_cmd,
true,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/read_write_cmd/implicit_cs/inherit_wc",
test_read_write_cmd,
false,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/read_write_cmd/implicit_cs/explicit_wc",
test_read_write_cmd,
false,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/write_cmd/explicit_cs/inherit_wc",
test_write_cmd,
true,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/write_cmd/explicit_cs/explicit_wc",
test_write_cmd,
true,
false);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/write_cmd/implicit_cs/inherit_wc",
test_write_cmd,
false,
true);
add_unacknowledged_test (
suite,
"/Session/unacknowledged/write_cmd/implicit_cs/explicit_wc",
test_write_cmd,
false,
false);
ASSERT (realpath (JSON_DIR "/sessions/legacy", resolved));
install_json_test_suite_with_check (suite,
resolved,
test_sessions_spec_cb,
test_framework_skip_if_no_sessions);
TestSuite_AddFull (
suite,
"/Session/dirty",
test_session_dirty,
NULL /* dtor */,
NULL /* ctx */,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_failpoint,
/* Tests with retryable writes, requires non-standalone. */
test_framework_skip_if_single);
TestSuite_AddFull (suite,
"/Session/snapshot/prose_test_1",
test_sessions_snapshot_prose_test_1,
NULL,
NULL,
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto);
}
|
703266.c | // -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2018, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <messages/messages.h>
#include <lib/support-lean/OSUtil.h>
#include "sample_prob.h"
#define HPCRUN_SAMPLE_PROB "HPCRUN_PROCESS_FRACTION"
#define DEFAULT_PROB 0.1
#define HASH_PRIME 2001001003
#define HASH_GEN 4011
static int is_init = 0;
static pid_t orig_pid = 0;
static int sample_prob_ans = 1;
static char *sample_prob_str = NULL;
static int prob_str_broken = 0;
static int prob_str_mesg = 0;
// -------------------------------------------------------------------
// This file implements probability-based sampling. All processes
// continue to take samples, but if HPCRUN_SAMPLE_PROB is set in the
// environment, then only a fraction of the processes (based on a
// pseudo-random seed) open their .log and .hpcrun files and write out
// their results.
//
// HPCRUN_SAMPLE_PROB may be written as a a floating point number or
// as a fraction. So, '0.10' and '1/10' are equivalent.
//
// The decision of which processes are active is process-wide, not
// per-thread (for now).
// -------------------------------------------------------------------
// Accept 0.ddd as floating point or x/y as fraction.
// Note: must delay printing any errors.
//
static float
string_to_prob(char *str)
{
int x, y;
float ans;
if (strchr(str, '/') != NULL) {
if (sscanf(str, "%d/%d", &x, &y) == 2 && y > 0) {
ans = (float)x / (float)y;
} else {
prob_str_broken = 1;
ans = DEFAULT_PROB;
}
}
else {
if (sscanf(str, "%f", &ans) < 1) {
prob_str_broken = 1;
ans = DEFAULT_PROB;
}
}
return ans;
}
// Combine the hostid, the time of day in microseconds and
// /dev/urandom (if available), run it through a hash function and
// produce a pseudo-random value in the range [0.0, 1.0).
//
// This is a simple hash function based on the exponential mod
// function with good cryptographic properties. MD5 or SHA-1 would
// make sense, but those require bringing in extra libraries.
//
// Anyway, the choice of seed is far more important than the hash
// function here.
//
static float
random_hash_prob(void)
{
struct timeval tv;
uint64_t a, b, x, rand;
int fd;
// Add /dev/urandom if available.
rand = 0;
fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
read(fd, &rand, sizeof(rand));
close(fd);
}
gettimeofday(&tv, NULL);
x = (((uint64_t) OSUtil_hostid()) << 24) + (tv.tv_usec << 4) + rand;
x = (x & ~(((uint64_t) 15) << 60)) % HASH_PRIME;
// Compute gen^x (mod prime).
// Invariant: a * (b ^ x) = gen^(orig x) (mod prime).
a = 1;
b = HASH_GEN;
while (x > 0) {
if (x % 2 == 0) {
b = (b * b) % HASH_PRIME;
x = x/2;
} else {
a = (a * b) % HASH_PRIME;
x = x - 1;
}
}
return (float)a / (float)HASH_PRIME;
}
void
hpcrun_sample_prob_init(void)
{
pid_t cur_pid;
// For consistency, don't recompute the sample probability if the
// pid hasn't changed. But do recompute in the child after fork.
cur_pid = getpid();
if (is_init && cur_pid == orig_pid)
return;
orig_pid = cur_pid;
// If HPCRUN_SAMPLE_PROB is not set in the environment, then the
// answer is always on.
sample_prob_str = getenv(HPCRUN_SAMPLE_PROB);
if (sample_prob_str != NULL) {
sample_prob_ans = (random_hash_prob() < string_to_prob(sample_prob_str));
}
else {
sample_prob_ans = 1;
}
is_init = 1;
}
int
hpcrun_sample_prob_active(void)
{
if (! is_init) {
hpcrun_sample_prob_init();
}
return sample_prob_ans;
}
// We can't print messages while computing the sample probability
// because that would trigger opening the log files and recomputing
// the sample probability. Instead, we have to record the failure and
// depend on the caller to call us again after the log files are
// opened.
//
void
hpcrun_sample_prob_mesg(void)
{
if (prob_str_broken && (! prob_str_mesg) && sample_prob_ans) {
EMSG("malformed probability in %s (%s), using default value of %f",
HPCRUN_SAMPLE_PROB, sample_prob_str, DEFAULT_PROB);
prob_str_mesg = 1;
}
}
|
837282.c | #include "test_ip4.h"
#include "lwip/ip4.h"
#include "lwip/inet_chksum.h"
#include "lwip/stats.h"
#include "lwip/prot/ip.h"
#include "lwip/prot/ip4.h"
#include "lwip/tcpip.h"
#if !LWIP_IPV4 || !IP_REASSEMBLY || !MIB2_STATS || !IPFRAG_STATS
#error "This tests needs LWIP_IPV4, IP_REASSEMBLY; MIB2- and IPFRAG-statistics enabled"
#endif
/* Helper functions */
static void
create_ip4_input_fragment(u16_t ip_id, u16_t start, u16_t len, int last)
{
struct pbuf *p;
struct netif *input_netif = netif_list; /* just use any netif */
fail_unless((start & 7) == 0);
fail_unless(((len & 7) == 0) || last);
fail_unless(input_netif != NULL);
p = pbuf_alloc(PBUF_RAW, len + sizeof(struct ip_hdr), PBUF_RAM);
fail_unless(p != NULL);
if (p != NULL) {
err_t err;
struct ip_hdr *iphdr = (struct ip_hdr *)p->payload;
IPH_VHL_SET(iphdr, 4, sizeof(struct ip_hdr) / 4);
IPH_TOS_SET(iphdr, 0);
IPH_LEN_SET(iphdr, lwip_htons(p->tot_len));
IPH_ID_SET(iphdr, lwip_htons(ip_id));
if (last) {
IPH_OFFSET_SET(iphdr, lwip_htons(start / 8));
} else {
IPH_OFFSET_SET(iphdr, lwip_htons((start / 8) | IP_MF));
}
IPH_TTL_SET(iphdr, 5);
IPH_PROTO_SET(iphdr, IP_PROTO_UDP);
IPH_CHKSUM_SET(iphdr, 0);
ip4_addr_copy(iphdr->src, *netif_ip4_addr(input_netif));
iphdr->src.addr = lwip_htonl(lwip_htonl(iphdr->src.addr) + 1);
ip4_addr_copy(iphdr->dest, *netif_ip4_addr(input_netif));
IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, sizeof(struct ip_hdr)));
err = ip4_input(p, input_netif);
if (err != ERR_OK) {
pbuf_free(p);
}
fail_unless(err == ERR_OK);
}
}
/* Setups/teardown functions */
static void
ip4_setup(void)
{
lwip_check_ensure_no_alloc(SKIP_POOL(MEMP_SYS_TIMEOUT));
}
static void
ip4_teardown(void)
{
if (netif_list->loop_first != NULL) {
pbuf_free(netif_list->loop_first);
netif_list->loop_first = NULL;
}
netif_list->loop_last = NULL;
/* poll until all memory is released... */
tcpip_thread_poll_one();
lwip_check_ensure_no_alloc(SKIP_POOL(MEMP_SYS_TIMEOUT));
}
/* Test functions */
START_TEST(test_ip4_reass)
{
const u16_t ip_id = 128;
LWIP_UNUSED_ARG(_i);
memset(&lwip_stats.mib2, 0, sizeof(lwip_stats.mib2));
create_ip4_input_fragment(ip_id, 8*200, 200, 1);
fail_unless(lwip_stats.ip_frag.recv == 1);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 0*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 2);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 1*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 3);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 2*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 4);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 3*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 5);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 4*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 6);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 7*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 7);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 6*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 8);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 0);
create_ip4_input_fragment(ip_id, 5*200, 200, 0);
fail_unless(lwip_stats.ip_frag.recv == 9);
fail_unless(lwip_stats.ip_frag.err == 0);
fail_unless(lwip_stats.ip_frag.memerr == 0);
fail_unless(lwip_stats.ip_frag.drop == 0);
fail_unless(lwip_stats.mib2.ipreasmoks == 1);
}
END_TEST
/** Create the suite including all tests for this module */
Suite *
ip4_suite(void)
{
testfunc tests[] = {
TESTFUNC(test_ip4_reass),
};
return create_suite("IPv4", tests, sizeof(tests)/sizeof(testfunc), ip4_setup, ip4_teardown);
}
|
662851.c | #include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h>
MODULE_AUTHOR("Washington Ruan");
MODULE_DESCRIPTION("A very simple loadable module that does almost nothing.");
int int_simple(void)
{
printk("in init module simple\n");
return 0;
}
void cleanup_simple(void)
{
printk("in cleanup module simple\n");
}
module_init(int_simple);
module_exit(cleanup_simple);
MODULE_LICENSE("GPL"); |
363384.c | /*
* The time server.
*/
#include <stdio.h>
#include <string.h>
#include <timeserver.h>
#include <syscall.h>
#include <limits.h>
#define NOTIFIER_PRIORITY (TIMESERVER_PRIORITY + 1)
#define WAITERS_MAX (32)
/* The current time. */
static int curmin;
static int cursec;
static int curms;
/* The waiting processes. */
static struct {
int pid;
int remaining_delay;
} waiters[WAITERS_MAX];
/* Tracks free space in the waiters array. */
static int free_waiter_slots[WAITERS_MAX];
static int free_top;
/* Handles a timer tick. */
static void TimerTick ();
/* Adds a process to wait. */
static int AddDelay (int pid, int delay);
/* Set up and run the server. */
static void Init ();
static void Run ();
int main ()
{
Init();
Run();
Exit();
}
static void Init ()
{
curms = 0;
cursec = 0;
curmin = 0;
/* initialize the waiters */
for (int i = 0; i < WAITERS_MAX; i++) {
waiters[i].pid = 0;
free_waiter_slots[i] = i;
}
free_top = 0;
/* register ourselves */
if (!RegisterAs(TIMESERVER_NAME)) {
printf("RegisterAs failed!\n");
}
/* create our notifier and courier */
int high_pri = MyPriority() + 1;
int notifier_pid = Create("time_notifier", high_pri);
if (notifier_pid <= 0) {
printf("time server: Create notifier failed (%s)\n", strerror(notifier_pid));
Exit();
}
int courier_pid = Create("time_courier", high_pri);
if (courier_pid <= 0) {
printf("time server: Create courier failed (%s)\n", strerror(courier_pid));
Exit();
}
/* initialize them */
int result;
result = Send(courier_pid, 0, 0, 0, 0);
if (result < 0) {
printf("time server: init Send to courier failed (%s)\n", strerror(result));
Exit();
}
TimeNotifierCfg_t cfg = { TIMESERVER_HZ, courier_pid };
result = Send(notifier_pid, (char *)&cfg, sizeof(cfg), 0, 0);
if (result < 0) {
printf("time server: init Send to notifier failed (%s)\n", strerror(result));
Exit();
}
}
static void Run ()
{
int request;
for (;;) {
int pid = Receive((char *)&request, sizeof(request));
if (pid > 0) {
if (request > 0) { /* delay */
if (AddDelay(pid, request) < 0) {
Reply(pid, 0, 0); /* reply right away with failure */
}
} else if (request == TIMESERVER_GETTIME) { /* get time */
Time_t time = { curms, cursec, curmin };
Reply(pid, (char *)&time, sizeof(time));
} else if (request == TIMESERVER_TICK) { /* tick */
Reply(pid, 0, 0);
TimerTick();
}
} else {
printf("time server: Receive failed (%s)\n", strerror(pid));
}
}
}
static void TimerTick ()
{
/* track the time */
curms += 1;
if (curms >= 1000) {
curms = 0;
cursec += 1;
if (cursec >= 60) {
cursec = 0;
curmin += 1;
}
}
/* service our delays */
for (int i = 0; i < WAITERS_MAX; i++) {
if (waiters[i].pid && --waiters[i].remaining_delay == 0) {
/* free the slot */
int pid = waiters[i].pid;
waiters[i].pid = 0;
free_waiter_slots[--free_top] = i;
/* wake up the thread */
int result = Reply(pid, 0, 0);
if (result < 0) {
printf("time server: could not wake up %d (%s)\n", pid, strerror(result));
}
}
}
}
static int AddDelay (int pid, int delay)
{
if (free_top >= WAITERS_MAX) {
return -1;
}
int index = free_waiter_slots[free_top++];
waiters[index].pid = pid;
waiters[index].remaining_delay = delay;
return 0;
}
|
104690.c | #include <defs.h>
#include <stdio.h>
#include <string.h>
#include <console.h>
#include <kdebug.h>
#include <picirq.h>
#include <trap.h>
#include <clock.h>
#include <intr.h>
#include <pmm.h>
#include <vmm.h>
#include <ide.h>
#include <swap.h>
#include <proc.h>
int kern_init(void) __attribute__((noreturn));
static void lab1_switch_test(void);
int
kern_init(void) {
extern char edata[], end[];
memset(edata, 0, end - edata);
cons_init(); // init the console
const char *message = "(THU.CST) os is loading ...";
cprintf("%s\n\n", message);
print_kerninfo();
grade_backtrace();
pmm_init(); // init physical memory management
pic_init(); // init interrupt controller
idt_init(); // init interrupt descriptor table
vmm_init(); // init virtual memory management
proc_init(); // init process table
ide_init(); // init ide devices
swap_init(); // init swap
clock_init(); // init clock interrupt
intr_enable(); // enable irq interrupt
//LAB1: CAHLLENGE 1 If you try to do it, uncomment lab1_switch_test()
// user/kernel mode switch test
//lab1_switch_test();
cpu_idle(); // run idle process
}
void __attribute__((noinline))
grade_backtrace2(int arg0, int arg1, int arg2, int arg3) {
mon_backtrace(0, NULL, NULL);
}
void __attribute__((noinline))
grade_backtrace1(int arg0, int arg1) {
grade_backtrace2(arg0, (int)&arg0, arg1, (int)&arg1);
}
void __attribute__((noinline))
grade_backtrace0(int arg0, int arg1, int arg2) {
grade_backtrace1(arg0, arg2);
}
void
grade_backtrace(void) {
grade_backtrace0(0, (int)kern_init, 0xffff0000);
}
static void
lab1_print_cur_status(void) {
static int round = 0;
uint16_t reg1, reg2, reg3, reg4;
asm volatile (
"mov %%cs, %0;"
"mov %%ds, %1;"
"mov %%es, %2;"
"mov %%ss, %3;"
: "=m"(reg1), "=m"(reg2), "=m"(reg3), "=m"(reg4));
cprintf("%d: @ring %d\n", round, reg1 & 3);
cprintf("%d: cs = %x\n", round, reg1);
cprintf("%d: ds = %x\n", round, reg2);
cprintf("%d: es = %x\n", round, reg3);
cprintf("%d: ss = %x\n", round, reg4);
round ++;
}
static void
lab1_switch_to_user(void) {
//LAB1 CHALLENGE 1 : TODO
}
static void
lab1_switch_to_kernel(void) {
//LAB1 CHALLENGE 1 : TODO
}
static void
lab1_switch_test(void) {
lab1_print_cur_status();
cprintf("+++ switch to user mode +++\n");
lab1_switch_to_user();
lab1_print_cur_status();
cprintf("+++ switch to kernel mode +++\n");
lab1_switch_to_kernel();
lab1_print_cur_status();
}
|
918639.c | #include <spell.h>
inherit SPELL;
void create() {
::create();
set_author("nienne");
set_spell_name("frightful blast");
set_spell_level(([ "warlock" : 1 ]));
set_syntax("<blasttype frightful>");
set_description("An eldritch blast with this essence applied will cause the target/s to "
"become shaken for 1min. This will inflict a penalty to their attack rolls, saving throws, and "
"skill checks. Subsequent eldritch blasts will not stack this effect, but will refresh its "
"duration.\nSee also: <help blasttype>.");
set_save("will");
}
int preSpell() {
tell_object(caster,"You can't cast this as an invocation! See <help blasttype> for syntax.");
return 0;
} |
899948.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_fp64
// op(A') function: GB_tran__lnot_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_fp64
(
uint8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
584234.c | #include <stdio.h>
int main (void)
{
int ratingCounters[11], i, response;
for ( i = 1; i <= 10; ++i )
ratingCounters[i] = 0;
printf ("Enter your responses\n");
for ( i = 1; i <= 20; ++i )
{
scanf ("%i", &response);
if ( response < 1 || response > 10 )
printf ("Bad response: %i\n", response);
else
++ratingCounters[response];
}
printf ("\n\nRating Number of Responses\n");
printf ("------ -------------------\n");
for ( i = 1; i <= 10; ++i )
printf ("%4i%14i\n", i, ratingCounters[i]);
return 0;
} |
183982.c | /* ---------------------------------------------------------------------
*
* -- Automatically Tuned Linear Algebra Software (ATLAS)
* (C) Copyright 2000 All Rights Reserved
*
* -- ATLAS routine -- Version 3.9.24 -- December 25, 2000
*
* Author : Antoine P. Petitet
* Originally developed at the University of Tennessee,
* Innovative Computing Laboratory, Knoxville TN, 37996-1301, USA.
*
* ---------------------------------------------------------------------
*
* -- Copyright notice and Licensing terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions, and the following disclaimer in
* the documentation and/or other materials provided with the distri-
* bution.
* 3. The name of the University, the ATLAS group, or the names of its
* contributors may not be used to endorse or promote products deri-
* ved from this software without specific written permission.
*
* -- Disclaimer:
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEO-
* RY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (IN-
* CLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---------------------------------------------------------------------
*/
/*
* Include files
*/
#include "atlas_refmisc.h"
#include "atlas_reflevel1.h"
void ATL_crefaxpy
(
const int N,
const float * ALPHA,
const float * X,
const int INCX,
float * Y,
const int INCY
)
{
/*
* Purpose
* =======
*
* ATL_crefaxpy performs the following operation:
*
* y := y + alpha * x,
*
* where alpha is a scalar and x and y are two n-vectors.
*
* Arguments
* =========
*
* N (input) const int
* On entry, N specifies the length of the vector x. N must be
* at least zero. Unchanged on exit.
*
* ALPHA (input) const float *
* On entry, ALPHA specifies the scalar alpha. When ALPHA is
* supplied as zero, then the entries of the incremented array X
* need not be set on input. Unchanged on exit.
*
* X (input) const float *
* On entry, X points to the first entry to be accessed of an
* incremented array of size equal to or greater than
* ( 1 + ( n - 1 ) * abs( INCX ) ) * sizeof( float [2] ),
* that contains the vector x. Unchanged on exit.
*
* INCX (input) const int
* On entry, INCX specifies the increment for the elements of X.
* INCX must not be zero. Unchanged on exit.
*
* Y (input/output) float *
* On entry, Y points to the first entry to be accessed of an
* incremented array of size equal to or greater than
* ( 1 + ( n - 1 ) * abs( INCY ) ) * sizeof( float [2] ),
* that contains the vector y. On exit, the entries of the in-
* cremented array Y are updated with the scaled entries of the
* incremented array X.
*
* INCY (input) const int
* On entry, INCY specifies the increment for the elements of Y.
* INCY must not be zero. Unchanged on exit.
*
* ---------------------------------------------------------------------
*/
/*
* .. Local Variables ..
*/
register const float alpha_r = *ALPHA, alpha_i = ALPHA[1];
register float x0_r, x0_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i;
float * StX;
register int i;
int incx2 = 2 * INCX, incy2 = 2 * INCY, nu;
const int incX2 = 2 * incx2, incY2 = 2 * incy2;
/* ..
* .. Executable Statements ..
*
*/
if( ( N > 0 ) && !Mszero( alpha_r, alpha_i ) )
{
if( ( nu = ( N >> 1 ) << 1 ) != 0 )
{
StX = (float *)X + nu * incx2;
do
{
x0_r = (*X); y0_r = (*Y);
x0_i = X[1]; y0_i = Y[1];
x1_r = X[incx2 ]; y1_r = Y[incy2 ];
x1_i = X[incx2+1]; y1_i = Y[incy2+1];
*Y = y0_r + alpha_r * x0_r - alpha_i * x0_i;
Y[1] = y0_i + alpha_i * x0_r + alpha_r * x0_i;
Y[incy2 ] = y1_r + alpha_r * x1_r - alpha_i * x1_i;
Y[incy2+1] = y1_i + alpha_i * x1_r + alpha_r * x1_i;
X += incX2;
Y += incY2;
} while( X != StX );
}
for( i = N - nu; i != 0; i-- )
{
x0_r = (*X); y0_r = (*Y);
x0_i = X[1]; y0_i = Y[1];
*Y = y0_r + alpha_r * x0_r - alpha_i * x0_i;
Y[1] = y0_i + alpha_i * x0_r + alpha_r * x0_i;
X += incx2;
Y += incy2;
}
}
/*
* End of ATL_crefaxpy
*/
}
|
482257.c | /* NetHack 3.7 minion.c $NHDT-Date: 1624322864 2021/06/22 00:47:44 $ $NHDT-Branch: NetHack-3.7 $:$NHDT-Revision: 1.60 $ */
/* Copyright (c) Stichting Mathematisch Centrum, Amsterdam, 1985. */
/*-Copyright (c) Robert Patrick Rankin, 2008. */
/* NetHack may be freely redistributed. See license for details. */
#include "hack.h"
/* used to pick among the four basic elementals without worrying whether
they've been reordered (difficulty reassessment?) or any new ones have
been introduced (hybrid types added to 'E'-class?) */
static const int elementals[4] = {
PM_AIR_ELEMENTAL, PM_FIRE_ELEMENTAL,
PM_EARTH_ELEMENTAL, PM_WATER_ELEMENTAL
};
void
newemin(struct monst *mtmp)
{
if (!mtmp->mextra)
mtmp->mextra = newmextra();
if (!EMIN(mtmp)) {
EMIN(mtmp) = (struct emin *) alloc(sizeof(struct emin));
(void) memset((genericptr_t) EMIN(mtmp), 0, sizeof(struct emin));
}
}
void
free_emin(struct monst *mtmp)
{
if (mtmp->mextra && EMIN(mtmp)) {
free((genericptr_t) EMIN(mtmp));
EMIN(mtmp) = (struct emin *) 0;
}
mtmp->isminion = 0;
}
/* count the number of monsters on the level */
int
monster_census(boolean spotted) /* seen|sensed vs all */
{
struct monst *mtmp;
int count = 0;
for (mtmp = fmon; mtmp; mtmp = mtmp->nmon) {
if (DEADMONSTER(mtmp))
continue;
if (mtmp->isgd && mtmp->mx == 0)
continue;
if (spotted && !canspotmon(mtmp))
continue;
++count;
}
return count;
}
/* mon summons a monster */
int
msummon(struct monst *mon)
{
struct permonst *ptr;
int dtype = NON_PM, cnt = 0, result = 0, census;
boolean xlight;
aligntyp atyp;
struct monst *mtmp;
if (mon) {
ptr = mon->data;
if (uwep && uwep->oartifact == ART_DEMONBANE && is_demon(ptr)) {
if (canseemon(mon))
pline("%s looks puzzled for a moment.", Monnam(mon));
return 0;
}
atyp = mon->ispriest ? EPRI(mon)->shralign
: mon->isminion ? EMIN(mon)->min_align
: (ptr->maligntyp == A_NONE) ? A_NONE
: sgn(ptr->maligntyp);
} else {
ptr = &mons[PM_WIZARD_OF_YENDOR];
atyp = (ptr->maligntyp == A_NONE) ? A_NONE : sgn(ptr->maligntyp);
}
if (is_dprince(ptr) || (ptr == &mons[PM_WIZARD_OF_YENDOR])) {
dtype = (!rn2(20)) ? dprince(atyp) : (!rn2(4)) ? dlord(atyp)
: ndemon(atyp);
cnt = ((dtype != NON_PM)
&& !rn2(4) && is_ndemon(&mons[dtype])) ? 2 : 1;
} else if (is_dlord(ptr)) {
dtype = (!rn2(50)) ? dprince(atyp) : (!rn2(20)) ? dlord(atyp)
: ndemon(atyp);
cnt = ((dtype != NON_PM)
&& !rn2(4) && is_ndemon(&mons[dtype])) ? 2 : 1;
} else if (is_ndemon(ptr)) {
dtype = (!rn2(20)) ? dlord(atyp) : (!rn2(6)) ? ndemon(atyp)
: monsndx(ptr);
cnt = 1;
} else if (is_lminion(mon)) {
dtype = (is_lord(ptr) && !rn2(20))
? llord()
: (is_lord(ptr) || !rn2(6)) ? lminion() : monsndx(ptr);
cnt = ((dtype != NON_PM)
&& !rn2(4) && !is_lord(&mons[dtype])) ? 2 : 1;
} else if (ptr == &mons[PM_ANGEL]) {
/* non-lawful angels can also summon */
if (!rn2(6)) {
switch (atyp) { /* see summon_minion */
case A_NEUTRAL:
dtype = elementals[rn2(SIZE(elementals))];
break;
case A_CHAOTIC:
case A_NONE:
dtype = ndemon(atyp);
break;
}
} else {
dtype = PM_ANGEL;
}
cnt = ((dtype != NON_PM)
&& !rn2(4) && !is_lord(&mons[dtype])) ? 2 : 1;
}
if (dtype == NON_PM)
return 0;
/* sanity checks */
if (cnt > 1 && (mons[dtype].geno & G_UNIQ) != 0)
cnt = 1;
/*
* If this daemon is unique and being re-summoned (the only way we
* could get this far with an extinct dtype), try another.
*/
if ((g.mvitals[dtype].mvflags & G_GONE) != 0) {
dtype = ndemon(atyp);
if (dtype == NON_PM)
return 0;
}
/* some candidates can generate a group of monsters, so simple
count of non-null makemon() result is not sufficient */
census = monster_census(FALSE);
xlight = FALSE;
while (cnt > 0) {
mtmp = makemon(&mons[dtype], u.ux, u.uy, MM_EMIN);
if (mtmp) {
result++;
/* an angel's alignment should match the summoner */
if (dtype == PM_ANGEL) {
mtmp->isminion = 1;
EMIN(mtmp)->min_align = atyp;
/* renegade if same alignment but not peaceful
or peaceful but different alignment */
EMIN(mtmp)->renegade =
(atyp != u.ualign.type) ^ !mtmp->mpeaceful;
}
if (mtmp->data->mlet == S_ANGEL && !Blind) {
/* for any 'A', 'cloud of smoke' will be 'flash of light';
if more than one monster is being created, that message
might be skipped for this monster but show 'mtmp' anyway */
show_transient_light((struct obj *) 0, mtmp->mx, mtmp->my);
xlight = TRUE;
/* we don't do this for 'burst of flame' (fire elemental)
because those monsters become their own light source */
}
if (cnt == 1 && canseemon(mtmp)) {
const char *cloud = 0,
*what = msummon_environ(mtmp->data, &cloud);
pline("%s appears in a %s of %s!", Amonnam(mtmp),
cloud, what);
}
}
cnt--;
}
if (xlight) {
/* Note: if we forced --More-- here, the 'A's would be visible for
long enough to be seen, but like with clairvoyance, some players
would be annoyed at the disruption of having to acknowledge it */
transient_light_cleanup();
}
/* how many monsters exist now compared to before? */
if (result)
result = monster_census(FALSE) - census;
return result;
}
void
summon_minion(aligntyp alignment, boolean talk)
{
register struct monst *mon;
int mnum;
switch ((int) alignment) {
case A_LAWFUL:
mnum = lminion();
break;
case A_NEUTRAL:
mnum = elementals[rn2(SIZE(elementals))];
break;
case A_CHAOTIC:
case A_NONE:
mnum = ndemon(alignment);
break;
default:
impossible("unaligned player?");
mnum = ndemon(A_NONE);
break;
}
if (mnum == NON_PM) {
mon = 0;
} else if (mnum == PM_ANGEL) {
mon = makemon(&mons[mnum], u.ux, u.uy, MM_EMIN);
if (mon) {
mon->isminion = 1;
EMIN(mon)->min_align = alignment;
EMIN(mon)->renegade = FALSE;
}
} else if (mnum != PM_SHOPKEEPER && mnum != PM_GUARD
&& mnum != PM_ALIGNED_CLERIC && mnum != PM_HIGH_CLERIC) {
/* This was mons[mnum].pxlth == 0 but is this restriction
appropriate or necessary now that the structures are separate? */
mon = makemon(&mons[mnum], u.ux, u.uy, MM_EMIN);
if (mon) {
mon->isminion = 1;
EMIN(mon)->min_align = alignment;
EMIN(mon)->renegade = FALSE;
}
} else {
mon = makemon(&mons[mnum], u.ux, u.uy, NO_MM_FLAGS);
}
if (mon) {
if (talk) {
if (!Deaf)
pline_The("voice of %s booms:", align_gname(alignment));
else
You_feel("%s booming voice:",
s_suffix(align_gname(alignment)));
verbalize("Thou shalt pay for thine indiscretion!");
if (canspotmon(mon))
pline("%s appears before you.", Amonnam(mon));
mon->mstrategy &= ~STRAT_APPEARMSG;
}
mon->mpeaceful = FALSE;
/* don't call set_malign(); player was naughty */
}
}
#define Athome (Inhell && (mtmp->cham == NON_PM))
/* returns 1 if it won't attack. */
int
demon_talk(register struct monst *mtmp)
{
long cash, demand, offer;
if (uwep && (uwep->oartifact == ART_EXCALIBUR
|| uwep->oartifact == ART_DEMONBANE)) {
if (canspotmon(mtmp))
pline("%s looks very angry.", Amonnam(mtmp));
else
You_feel("tension building.");
mtmp->mpeaceful = mtmp->mtame = 0;
set_malign(mtmp);
newsym(mtmp->mx, mtmp->my);
return 0;
}
if (is_fainted()) {
reset_faint(); /* if fainted - wake up */
} else {
stop_occupation();
if (g.multi > 0) {
nomul(0);
unmul((char *) 0);
}
}
/* Slight advantage given. */
if (is_dprince(mtmp->data) && mtmp->minvis) {
boolean wasunseen = !canspotmon(mtmp);
mtmp->minvis = mtmp->perminvis = 0;
if (wasunseen && canspotmon(mtmp)) {
pline("%s appears before you.", Amonnam(mtmp));
mtmp->mstrategy &= ~STRAT_APPEARMSG;
}
newsym(mtmp->mx, mtmp->my);
}
if (g.youmonst.data->mlet == S_DEMON) { /* Won't blackmail their own. */
if (!Deaf)
pline("%s says, \"Good hunting, %s.\"", Amonnam(mtmp),
flags.female ? "Sister" : "Brother");
else if (canseemon(mtmp))
pline("%s says something.", Amonnam(mtmp));
if (!tele_restrict(mtmp))
(void) rloc(mtmp, TRUE);
return 1;
}
cash = money_cnt(g.invent);
demand = 9000 + d(10, 100);
demand = demand * 10 / ACURR(A_CHA);
if (!Athome)
demand /= 5;
if (sgn(u.ualign.type) == sgn(mtmp->data->maligntyp))
demand = demand * 9 / 10;
if (!demand || g.multi < 0) { /* you have no gold or can't move */
mtmp->mpeaceful = 0;
set_malign(mtmp);
return 0;
} else {
/* make sure that the demand is unmeetable if the monster
has the Amulet, preventing monster from being satisfied
and removed from the game (along with said Amulet...) */
/* [actually the Amulet is safe; it would be dropped when
mongone() gets rid of the monster; force combat anyway;
also make it unmeetable if the player is Deaf, to simplify
handling that case as player-won't-pay] */
if (mon_has_amulet(mtmp) || Deaf)
/* 125: 5*25 in case hero has maximum possible charisma */
demand = cash + (long) rn1(1000, 125);
if (!Deaf)
pline("%s demands %ld %s for safe passage.",
Amonnam(mtmp), demand, currency(demand));
else if (canseemon(mtmp))
pline("%s seems to be demanding something.", Amonnam(mtmp));
offer = 0L;
if (!Deaf && ((offer = bribe(mtmp)) >= demand)) {
pline("%s vanishes, laughing about cowardly mortals.",
Amonnam(mtmp));
} else if (offer > 0L
&& (long) rnd(5 * ACURR(A_CHA)) > (demand - offer)) {
pline("%s scowls at you menacingly, then vanishes.",
Amonnam(mtmp));
} else {
pline("%s gets angry...", Amonnam(mtmp));
mtmp->mpeaceful = 0;
set_malign(mtmp);
return 0;
}
}
mongone(mtmp);
return 1;
}
long
bribe(struct monst *mtmp)
{
char buf[BUFSZ] = DUMMY;
long offer;
long umoney = money_cnt(g.invent);
getlin("How much will you offer?", buf);
if (sscanf(buf, "%ld", &offer) != 1)
offer = 0L;
/*Michael Paddon -- fix for negative offer to monster*/
/*JAR880815 - */
if (offer < 0L) {
You("try to shortchange %s, but fumble.", mon_nam(mtmp));
return 0L;
} else if (offer == 0L) {
You("refuse.");
return 0L;
} else if (offer >= umoney) {
You("give %s all your gold.", mon_nam(mtmp));
offer = umoney;
} else {
You("give %s %ld %s.", mon_nam(mtmp), offer, currency(offer));
}
(void) money2mon(mtmp, offer);
g.context.botl = 1;
return offer;
}
int
dprince(aligntyp atyp)
{
int tryct, pm;
for (tryct = !In_endgame(&u.uz) ? 20 : 0; tryct > 0; --tryct) {
pm = rn1(PM_DEMOGORGON + 1 - PM_ORCUS, PM_ORCUS);
if (!(g.mvitals[pm].mvflags & G_GONE)
&& (atyp == A_NONE || sgn(mons[pm].maligntyp) == sgn(atyp)))
return pm;
}
return dlord(atyp); /* approximate */
}
int
dlord(aligntyp atyp)
{
int tryct, pm;
for (tryct = !In_endgame(&u.uz) ? 20 : 0; tryct > 0; --tryct) {
pm = rn1(PM_YEENOGHU + 1 - PM_JUIBLEX, PM_JUIBLEX);
if (!(g.mvitals[pm].mvflags & G_GONE)
&& (atyp == A_NONE || sgn(mons[pm].maligntyp) == sgn(atyp)))
return pm;
}
return ndemon(atyp); /* approximate */
}
/* create lawful (good) lord */
int
llord(void)
{
if (!(g.mvitals[PM_ARCHON].mvflags & G_GONE))
return PM_ARCHON;
return lminion(); /* approximate */
}
int
lminion(void)
{
int tryct;
struct permonst *ptr;
for (tryct = 0; tryct < 20; tryct++) {
ptr = mkclass(S_ANGEL, 0);
if (ptr && !is_lord(ptr))
return monsndx(ptr);
}
return NON_PM;
}
int
ndemon(aligntyp atyp) /* A_NONE is used for 'any alignment' */
{
struct permonst *ptr;
/*
* 3.6.2: [fixed #H2204, 22-Dec-2010, eight years later...]
* pick a correctly aligned demon in one try. This used to
* use mkclass() to choose a random demon type and keep trying
* (up to 20 times) until it got one with the desired alignment.
* mkclass_aligned() skips wrongly aligned potential candidates.
* [The only neutral demons are djinni and mail daemon and
* mkclass() won't pick them, but call it anyway in case either
* aspect of that changes someday.]
*/
#if 0
if (atyp == A_NEUTRAL)
return NON_PM;
#endif
ptr = mkclass_aligned(S_DEMON, 0, atyp);
return (ptr && is_ndemon(ptr)) ? monsndx(ptr) : NON_PM;
}
/* guardian angel has been affected by conflict so is abandoning hero */
void
lose_guardian_angel(struct monst *mon) /* if null, angel hasn't been created yet */
{
coord mm;
int i;
if (mon) {
if (canspotmon(mon)) {
if (!Deaf) {
pline("%s rebukes you, saying:", Monnam(mon));
verbalize("Since you desire conflict, have some more!");
} else {
pline("%s vanishes!", Monnam(mon));
}
}
mongone(mon);
}
/* create 2 to 4 hostile angels to replace the lost guardian */
for (i = rn1(3, 2); i > 0; --i) {
mm.x = u.ux;
mm.y = u.uy;
if (enexto(&mm, mm.x, mm.y, &mons[PM_ANGEL]))
(void) mk_roamer(&mons[PM_ANGEL], u.ualign.type, mm.x, mm.y,
FALSE);
}
}
/* just entered the Astral Plane; receive tame guardian angel if worthy */
void
gain_guardian_angel(void)
{
struct monst *mtmp;
struct obj *otmp;
coord mm;
Hear_again(); /* attempt to cure any deafness now (divine
message will be heard even if that fails) */
if (Conflict) {
if (!Deaf)
pline("A voice booms:");
else
You_feel("a booming voice:");
verbalize("Thy desire for conflict shall be fulfilled!");
/* send in some hostile angels instead */
lose_guardian_angel((struct monst *) 0);
} else if (u.ualign.record > 8) { /* fervent */
if (!Deaf)
pline("A voice whispers:");
else
You_feel("a soft voice:");
verbalize("Thou hast been worthy of me!");
mm.x = u.ux;
mm.y = u.uy;
if (enexto(&mm, mm.x, mm.y, &mons[PM_ANGEL])
&& (mtmp = mk_roamer(&mons[PM_ANGEL], u.ualign.type, mm.x, mm.y,
TRUE)) != 0) {
mtmp->mstrategy &= ~STRAT_APPEARMSG;
/* guardian angel -- the one case mtame doesn't imply an
* edog structure, so we don't want to call tamedog().
* [Note: this predates mon->mextra which allows a monster
* to have both emin and edog at the same time.]
*/
mtmp->mtame = 10;
/* for 'hilite_pet'; after making tame, before next message */
newsym(mtmp->mx, mtmp->my);
if (!Blind)
pline("An angel appears near you.");
else
You_feel("the presence of a friendly angel near you.");
/* make him strong enough vs. endgame foes */
mtmp->m_lev = rn1(8, 15);
mtmp->mhp = mtmp->mhpmax =
d((int) mtmp->m_lev, 10) + 30 + rnd(30);
if ((otmp = select_hwep(mtmp)) == 0) {
otmp = mksobj(SABER, FALSE, FALSE);
if (mpickobj(mtmp, otmp))
panic("merged weapon?");
}
bless(otmp);
if (otmp->spe < 4)
otmp->spe += rnd(4);
if ((otmp = which_armor(mtmp, W_ARMS)) == 0
|| otmp->otyp != SHIELD_OF_REFLECTION) {
(void) mongets(mtmp, AMULET_OF_REFLECTION);
m_dowear(mtmp, TRUE);
}
}
}
}
/*minion.c*/
|
930169.c | /*Program to traverse a linked list*/
#include <stdio.h>
int main (void)
{
struct entry {
int value;
struct entry *next;
};
struct entry n1, n2, n3;
struct entry *list_pointer = &n1;
n1.value = 100;
n1.next = &n2;
n2.value = 200;
n2.next = &n3;
n3.value = 300;
n3.next = (struct entry *)0; // Mark list end with a null pointer
while ( list_pointer != (struct entry*)0 ) {
printf ("%i\n", list_pointer->value);
list_pointer = list_pointer->next;
}
return 0;
}
|
463499.c | /*
* Copyright (c) 2003, 2007-8 Matteo Frigo
* Copyright (c) 2003, 2007-8 Massachusetts Institute of Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* This file was automatically generated --- DO NOT EDIT */
/* Generated on Sat Nov 15 20:47:30 EST 2008 */
#include "codelet-dft.h"
#ifdef HAVE_FMA
/* Generated by: ../../../genfft/gen_twiddle_c -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 2 -name t1fv_2 -include t1f.h */
/*
* This function contains 3 FP additions, 2 FP multiplications,
* (or, 3 additions, 2 multiplications, 0 fused multiply/add),
* 5 stack variables, 0 constants, and 4 memory accesses
*/
#include "t1f.h"
static void t1fv_2(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
{
INT m;
R *x;
x = ri;
for (m = mb, W = W + (mb * ((TWVL / VL) * 2)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 2), MAKE_VOLATILE_STRIDE(rs)) {
V T1, T2, T3;
T1 = LD(&(x[0]), ms, &(x[0]));
T2 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
T3 = BYTWJ(&(W[0]), T2);
ST(&(x[0]), VADD(T1, T3), ms, &(x[0]));
ST(&(x[WS(rs, 1)]), VSUB(T1, T3), ms, &(x[WS(rs, 1)]));
}
}
static const tw_instr twinstr[] = {
VTW(0, 1),
{TW_NEXT, VL, 0}
};
static const ct_desc desc = { 2, "t1fv_2", twinstr, &GENUS, {3, 2, 0, 0}, 0, 0, 0 };
void X(codelet_t1fv_2) (planner *p) {
X(kdft_dit_register) (p, t1fv_2, &desc);
}
#else /* HAVE_FMA */
/* Generated by: ../../../genfft/gen_twiddle_c -simd -compact -variables 4 -pipeline-latency 8 -n 2 -name t1fv_2 -include t1f.h */
/*
* This function contains 3 FP additions, 2 FP multiplications,
* (or, 3 additions, 2 multiplications, 0 fused multiply/add),
* 5 stack variables, 0 constants, and 4 memory accesses
*/
#include "t1f.h"
static void t1fv_2(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
{
INT m;
R *x;
x = ri;
for (m = mb, W = W + (mb * ((TWVL / VL) * 2)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 2), MAKE_VOLATILE_STRIDE(rs)) {
V T1, T3, T2;
T1 = LD(&(x[0]), ms, &(x[0]));
T2 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
T3 = BYTWJ(&(W[0]), T2);
ST(&(x[WS(rs, 1)]), VSUB(T1, T3), ms, &(x[WS(rs, 1)]));
ST(&(x[0]), VADD(T1, T3), ms, &(x[0]));
}
}
static const tw_instr twinstr[] = {
VTW(0, 1),
{TW_NEXT, VL, 0}
};
static const ct_desc desc = { 2, "t1fv_2", twinstr, &GENUS, {3, 2, 0, 0}, 0, 0, 0 };
void X(codelet_t1fv_2) (planner *p) {
X(kdft_dit_register) (p, t1fv_2, &desc);
}
#endif /* HAVE_FMA */
|
764832.c |
#line 1 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
#include <assert.h>
#include <ruby.h>
#if defined(_WIN32)
#include <stddef.h>
#endif
#ifdef HAVE_RUBY_RE_H
#include <ruby/re.h>
#else
#include <re.h>
#endif
#ifdef HAVE_RUBY_ENCODING_H
#include <ruby/encoding.h>
#define ENCODED_STR_NEW(ptr, len) \
rb_enc_str_new(ptr, len, rb_utf8_encoding())
#else
#define ENCODED_STR_NEW(ptr, len) \
rb_str_new(ptr, len)
#endif
#ifndef RSTRING_PTR
#define RSTRING_PTR(s) (RSTRING(s)->ptr)
#endif
#ifndef RSTRING_LEN
#define RSTRING_LEN(s) (RSTRING(s)->len)
#endif
#define DATA_GET(FROM, TYPE, NAME) \
Data_Get_Struct(FROM, TYPE, NAME); \
if (NAME == NULL) { \
rb_raise(rb_eArgError, "NULL found for " # NAME " when it shouldn't be."); \
}
typedef struct lexer_state {
int content_len;
int line_number;
int current_line;
int start_col;
size_t mark;
size_t keyword_start;
size_t keyword_end;
size_t next_keyword_start;
size_t content_start;
size_t content_end;
size_t docstring_content_type_start;
size_t docstring_content_type_end;
size_t query_start;
size_t last_newline;
size_t final_newline;
} lexer_state;
static VALUE mGherkin;
static VALUE mGherkinLexer;
static VALUE mCLexer;
static VALUE cI18nLexer;
static VALUE rb_eGherkinLexingError;
#define LEN(AT, P) (P - data - lexer->AT)
#define MARK(M, P) (lexer->M = (P) - data)
#define PTR_TO(P) (data + lexer->P)
#define STORE_KW_END_CON(EVENT) \
store_multiline_kw_con(listener, # EVENT, \
PTR_TO(keyword_start), LEN(keyword_start, PTR_TO(keyword_end - 1)), \
PTR_TO(content_start), LEN(content_start, PTR_TO(content_end)), \
lexer->current_line, lexer->start_col); \
if (lexer->content_end != 0) { \
p = PTR_TO(content_end - 1); \
} \
lexer->content_end = 0
#define STORE_ATTR(ATTR) \
store_attr(listener, # ATTR, \
PTR_TO(content_start), LEN(content_start, p), \
lexer->line_number)
#line 254 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
/** Data **/
#line 89 "ext/gherkin_lexer_hu/gherkin_lexer_hu.c"
static const char _lexer_actions[] = {
0, 1, 0, 1, 1, 1, 2, 1,
3, 1, 4, 1, 5, 1, 6, 1,
7, 1, 8, 1, 9, 1, 10, 1,
11, 1, 12, 1, 13, 1, 16, 1,
17, 1, 18, 1, 19, 1, 20, 1,
21, 1, 22, 1, 23, 2, 1, 18,
2, 4, 5, 2, 13, 0, 2, 14,
15, 2, 17, 0, 2, 17, 2, 2,
17, 16, 2, 17, 19, 2, 18, 6,
2, 18, 7, 2, 18, 8, 2, 18,
9, 2, 18, 10, 2, 18, 16, 2,
20, 21, 2, 22, 0, 2, 22, 2,
2, 22, 16, 2, 22, 19, 3, 3,
14, 15, 3, 5, 14, 15, 3, 11,
14, 15, 3, 12, 14, 15, 3, 13,
14, 15, 3, 14, 15, 18, 3, 17,
0, 11, 3, 17, 14, 15, 4, 1,
14, 15, 18, 4, 4, 5, 14, 15,
4, 17, 0, 14, 15, 5, 17, 0,
11, 14, 15
};
static const short _lexer_key_offsets[] = {
0, 0, 19, 20, 21, 22, 24, 26,
44, 45, 46, 48, 50, 55, 60, 65,
70, 74, 78, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92,
93, 94, 95, 100, 107, 112, 115, 116,
117, 118, 119, 120, 121, 123, 124, 125,
126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139, 140, 141,
142, 143, 144, 146, 147, 148, 149, 150,
151, 152, 153, 154, 155, 156, 171, 173,
175, 177, 195, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 221, 223, 225,
227, 229, 231, 233, 235, 237, 239, 241,
243, 245, 247, 249, 251, 253, 255, 259,
261, 263, 265, 267, 269, 271, 274, 276,
278, 280, 282, 284, 286, 288, 290, 292,
294, 296, 298, 300, 302, 304, 306, 308,
310, 312, 314, 316, 319, 321, 323, 325,
327, 329, 331, 333, 335, 337, 339, 341,
343, 345, 347, 349, 351, 353, 355, 357,
359, 360, 361, 362, 363, 364, 365, 366,
367, 368, 369, 370, 381, 383, 385, 387,
389, 391, 393, 395, 397, 399, 401, 403,
405, 407, 409, 411, 413, 415, 417, 419,
421, 423, 425, 427, 429, 431, 433, 435,
438, 440, 442, 444, 446, 448, 450, 452,
454, 456, 458, 460, 462, 464, 466, 468,
470, 472, 474, 476, 478, 480, 482, 484,
486, 488, 490, 492, 494, 496, 498, 499,
500, 501, 502, 503, 504, 505, 506, 507,
508, 509, 510, 511, 518, 520, 522, 524,
526, 528, 530, 532, 534, 536, 540, 546,
549, 551, 557, 575, 577, 579, 581, 583,
585, 587, 589, 591, 593, 595, 597, 599,
601, 603, 607, 609, 611, 613, 615, 617,
619, 622, 624, 626, 628, 630, 632, 634,
636, 638, 640, 642, 644, 646, 648, 650,
652, 654, 656, 658, 660, 662, 664, 666,
668, 670, 672, 674, 676, 678, 680, 682,
684, 686, 688, 690, 691, 692, 707, 709,
711, 713, 715, 717, 719, 721, 723, 725,
727, 729, 731, 733, 735, 737, 739, 741,
745, 747, 749, 751, 753, 755, 757, 760,
762, 764, 766, 768, 770, 772, 774, 776,
778, 780, 782, 784, 786, 788, 790, 792,
794, 796, 798, 800, 802, 805, 807, 809,
811, 813, 815, 817, 819, 821, 824, 826,
828, 830, 832, 834, 836, 838, 840, 842,
844, 846, 848, 850, 852, 854, 856, 858,
859, 860
};
static const char _lexer_trans_keys[] = {
-61, -17, 10, 32, 34, 35, 37, 42,
64, 65, 68, 70, 72, 74, 77, 80,
124, 9, 13, -119, 115, 32, 10, 13,
10, 13, -61, 10, 32, 34, 35, 37,
42, 64, 65, 68, 70, 72, 74, 77,
80, 124, 9, 13, 34, 34, 10, 13,
10, 13, 10, 32, 34, 9, 13, 10,
32, 34, 9, 13, 10, 32, 34, 9,
13, 10, 32, 34, 9, 13, 10, 32,
9, 13, 10, 32, 9, 13, 10, 13,
10, 95, 70, 69, 65, 84, 85, 82,
69, 95, 69, 78, 68, 95, 37, 13,
32, 64, 9, 10, 9, 10, 13, 32,
64, 11, 12, 10, 32, 64, 9, 13,
100, 107, 109, 111, 116, 116, 107, 111,
114, 101, 105, 110, 110, 121, 105, 98,
101, 110, 101, 111, 114, 103, 97, 116,
-61, -77, 107, -61, -74, 110, 121, 118,
32, 58, 118, -61, -95, 122, 108, 97,
116, 58, 10, 10, -61, 10, 32, 35,
37, 42, 64, 65, 68, 70, 72, 74,
77, 9, 13, -119, 10, 10, 115, 10,
32, -61, 10, 32, 34, 35, 37, 42,
64, 65, 68, 70, 72, 74, 77, 80,
124, 9, 13, -61, 97, -95, 116, 116,
-61, -87, 114, 58, 10, 10, -61, 10,
32, 35, 37, 42, 64, 65, 68, 70,
72, 74, 77, 9, 13, -119, 10, 10,
115, 10, 32, 10, 95, 10, 70, 10,
69, 10, 65, 10, 84, 10, 85, 10,
82, 10, 69, 10, 95, 10, 69, 10,
78, 10, 68, 10, 95, 10, 37, 10,
100, 107, 109, 10, 111, 10, 116, 10,
116, 10, 107, 10, 111, 10, 114, 10,
101, 105, 10, 110, 10, 110, 10, 121,
10, 105, 10, 98, 10, 101, 10, 110,
10, 101, 10, 111, 10, 114, 10, 103,
10, 97, 10, 116, -61, 10, -77, 10,
10, 107, -61, 10, -74, 10, 10, 110,
10, 121, 10, 118, 10, 32, 58, 10,
118, -61, 10, -95, 10, 10, 122, 10,
108, 10, 97, 10, 116, 10, 58, 10,
97, 10, 101, 10, 108, 10, 108, 10,
101, 10, 109, 10, 122, -59, 10, -111,
10, 10, 97, 10, 106, 10, 100, 101,
108, 108, 101, 109, 122, -59, -111, 58,
10, 10, 10, 32, 35, 37, 64, 70,
72, 74, 80, 9, 13, 10, 95, 10,
70, 10, 69, 10, 65, 10, 84, 10,
85, 10, 82, 10, 69, 10, 95, 10,
69, 10, 78, 10, 68, 10, 95, 10,
37, 10, 111, 10, 114, 10, 103, 10,
97, 10, 116, -61, 10, -77, 10, 10,
107, -61, 10, -74, 10, 10, 110, 10,
121, 10, 118, 10, 32, 58, 10, 118,
-61, 10, -95, 10, 10, 122, 10, 108,
10, 97, 10, 116, 10, 58, -61, 10,
-95, 10, 10, 116, 10, 116, -61, 10,
-87, 10, 10, 114, 10, 101, 10, 108,
10, 108, 10, 101, 10, 109, 10, 122,
-59, 10, -111, 10, -61, 10, -87, 10,
10, 108, 10, 100, -61, 10, -95, 10,
10, 107, 97, 106, 100, -61, -87, 108,
100, -61, -95, 107, 58, 10, 10, 10,
32, 35, 74, 124, 9, 13, 10, 101,
10, 108, 10, 108, 10, 101, 10, 109,
10, 122, -59, 10, -111, 10, 10, 58,
32, 124, 9, 13, 10, 32, 92, 124,
9, 13, 10, 92, 124, 10, 92, 10,
32, 92, 124, 9, 13, -61, 10, 32,
34, 35, 37, 42, 64, 65, 68, 70,
72, 74, 77, 80, 124, 9, 13, 10,
95, 10, 70, 10, 69, 10, 65, 10,
84, 10, 85, 10, 82, 10, 69, 10,
95, 10, 69, 10, 78, 10, 68, 10,
95, 10, 37, 10, 100, 107, 109, 10,
111, 10, 116, 10, 116, 10, 107, 10,
111, 10, 114, 10, 101, 105, 10, 110,
10, 110, 10, 121, 10, 105, 10, 98,
10, 101, 10, 110, 10, 101, 10, 111,
10, 114, 10, 103, 10, 97, 10, 116,
-61, 10, -77, 10, 10, 107, -61, 10,
-74, 10, 10, 110, 10, 121, 10, 118,
10, 58, 10, 97, 10, 101, 10, 108,
10, 108, 10, 101, 10, 109, 10, 122,
-59, 10, -111, 10, 10, 97, 10, 106,
10, 100, 10, 10, -61, 10, 32, 35,
37, 42, 64, 65, 68, 70, 72, 74,
77, 9, 13, -119, 10, 10, 115, 10,
32, 10, 95, 10, 70, 10, 69, 10,
65, 10, 84, 10, 85, 10, 82, 10,
69, 10, 95, 10, 69, 10, 78, 10,
68, 10, 95, 10, 37, 10, 100, 107,
109, 10, 111, 10, 116, 10, 116, 10,
107, 10, 111, 10, 114, 10, 101, 105,
10, 110, 10, 110, 10, 121, 10, 105,
10, 98, 10, 101, 10, 110, 10, 101,
10, 111, 10, 114, 10, 103, 10, 97,
10, 116, -61, 10, -77, 10, 10, 107,
-61, 10, -74, 10, 10, 110, 10, 121,
10, 118, 10, 32, 58, 10, 118, -61,
10, -95, 10, 10, 122, 10, 108, 10,
97, 10, 116, 10, 58, -61, 10, 97,
-95, 10, 10, 116, 10, 116, -61, 10,
-87, 10, 10, 114, 10, 101, 10, 108,
10, 108, 10, 101, 10, 109, 10, 122,
-59, 10, -111, 10, 10, 97, 10, 106,
10, 100, -69, -65, 0
};
static const char _lexer_single_lengths[] = {
0, 17, 1, 1, 1, 2, 2, 16,
1, 1, 2, 2, 3, 3, 3, 3,
2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 3, 5, 3, 3, 1, 1,
1, 1, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 13, 2, 2,
2, 16, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 13, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 4, 2,
2, 2, 2, 2, 2, 3, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 3, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 9, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 3,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 5, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 4, 3,
2, 4, 16, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 4, 2, 2, 2, 2, 2, 2,
3, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 1, 1, 13, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 4,
2, 2, 2, 2, 2, 2, 3, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 3, 2, 2, 2,
2, 2, 2, 2, 2, 3, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 1,
1, 0
};
static const char _lexer_range_lengths[] = {
0, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 0,
0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0
};
static const short _lexer_index_offsets[] = {
0, 0, 19, 21, 23, 25, 28, 31,
49, 51, 53, 56, 59, 64, 69, 74,
79, 83, 87, 90, 92, 94, 96, 98,
100, 102, 104, 106, 108, 110, 112, 114,
116, 118, 120, 125, 132, 137, 141, 143,
145, 147, 149, 151, 153, 156, 158, 160,
162, 164, 166, 168, 170, 172, 174, 176,
178, 180, 182, 184, 186, 188, 190, 192,
194, 196, 198, 201, 203, 205, 207, 209,
211, 213, 215, 217, 219, 221, 236, 239,
242, 245, 263, 266, 268, 270, 272, 274,
276, 278, 280, 282, 284, 299, 302, 305,
308, 311, 314, 317, 320, 323, 326, 329,
332, 335, 338, 341, 344, 347, 350, 355,
358, 361, 364, 367, 370, 373, 377, 380,
383, 386, 389, 392, 395, 398, 401, 404,
407, 410, 413, 416, 419, 422, 425, 428,
431, 434, 437, 440, 444, 447, 450, 453,
456, 459, 462, 465, 468, 471, 474, 477,
480, 483, 486, 489, 492, 495, 498, 501,
504, 506, 508, 510, 512, 514, 516, 518,
520, 522, 524, 526, 537, 540, 543, 546,
549, 552, 555, 558, 561, 564, 567, 570,
573, 576, 579, 582, 585, 588, 591, 594,
597, 600, 603, 606, 609, 612, 615, 618,
622, 625, 628, 631, 634, 637, 640, 643,
646, 649, 652, 655, 658, 661, 664, 667,
670, 673, 676, 679, 682, 685, 688, 691,
694, 697, 700, 703, 706, 709, 712, 714,
716, 718, 720, 722, 724, 726, 728, 730,
732, 734, 736, 738, 745, 748, 751, 754,
757, 760, 763, 766, 769, 772, 776, 782,
786, 789, 795, 813, 816, 819, 822, 825,
828, 831, 834, 837, 840, 843, 846, 849,
852, 855, 860, 863, 866, 869, 872, 875,
878, 882, 885, 888, 891, 894, 897, 900,
903, 906, 909, 912, 915, 918, 921, 924,
927, 930, 933, 936, 939, 942, 945, 948,
951, 954, 957, 960, 963, 966, 969, 972,
975, 978, 981, 984, 986, 988, 1003, 1006,
1009, 1012, 1015, 1018, 1021, 1024, 1027, 1030,
1033, 1036, 1039, 1042, 1045, 1048, 1051, 1054,
1059, 1062, 1065, 1068, 1071, 1074, 1077, 1081,
1084, 1087, 1090, 1093, 1096, 1099, 1102, 1105,
1108, 1111, 1114, 1117, 1120, 1123, 1126, 1129,
1132, 1135, 1138, 1141, 1144, 1148, 1151, 1154,
1157, 1160, 1163, 1166, 1169, 1172, 1176, 1179,
1182, 1185, 1188, 1191, 1194, 1197, 1200, 1203,
1206, 1209, 1212, 1215, 1218, 1221, 1224, 1227,
1229, 1231
};
static const short _lexer_trans_targs[] = {
2, 391, 7, 7, 8, 18, 20, 4,
34, 37, 52, 53, 82, 160, 230, 233,
253, 7, 0, 3, 0, 4, 0, 5,
0, 7, 19, 6, 7, 19, 6, 2,
7, 7, 8, 18, 20, 4, 34, 37,
52, 53, 82, 160, 230, 233, 253, 7,
0, 9, 0, 10, 0, 12, 11, 11,
12, 11, 11, 13, 13, 14, 13, 13,
13, 13, 14, 13, 13, 13, 13, 15,
13, 13, 13, 13, 16, 13, 13, 7,
17, 17, 0, 7, 17, 17, 0, 7,
19, 18, 7, 0, 21, 0, 22, 0,
23, 0, 24, 0, 25, 0, 26, 0,
27, 0, 28, 0, 29, 0, 30, 0,
31, 0, 32, 0, 33, 0, 393, 0,
0, 0, 0, 0, 35, 36, 7, 36,
36, 34, 35, 35, 7, 36, 34, 36,
0, 38, 41, 44, 0, 39, 0, 40,
0, 4, 0, 42, 0, 43, 0, 4,
0, 45, 41, 0, 46, 0, 47, 0,
48, 0, 49, 0, 50, 0, 51, 0,
4, 0, 4, 0, 54, 0, 55, 0,
56, 0, 57, 0, 58, 0, 59, 0,
60, 0, 61, 0, 62, 0, 63, 0,
64, 0, 65, 0, 66, 0, 67, 315,
0, 68, 0, 69, 0, 70, 0, 71,
0, 72, 0, 73, 0, 74, 0, 75,
0, 77, 76, 77, 76, 78, 77, 77,
7, 259, 80, 7, 273, 288, 289, 303,
304, 312, 77, 76, 79, 77, 76, 77,
80, 76, 77, 81, 76, 2, 7, 7,
8, 18, 20, 4, 34, 37, 52, 53,
82, 160, 230, 233, 253, 7, 0, 83,
4, 0, 84, 0, 85, 0, 86, 0,
87, 0, 88, 0, 89, 0, 90, 0,
92, 91, 92, 91, 93, 92, 92, 7,
96, 95, 7, 110, 125, 126, 148, 149,
157, 92, 91, 94, 92, 91, 92, 95,
91, 92, 81, 91, 92, 97, 91, 92,
98, 91, 92, 99, 91, 92, 100, 91,
92, 101, 91, 92, 102, 91, 92, 103,
91, 92, 104, 91, 92, 105, 91, 92,
106, 91, 92, 107, 91, 92, 108, 91,
92, 109, 91, 92, 7, 91, 92, 111,
114, 117, 91, 92, 112, 91, 92, 113,
91, 92, 95, 91, 92, 115, 91, 92,
116, 91, 92, 95, 91, 92, 118, 114,
91, 92, 119, 91, 92, 120, 91, 92,
121, 91, 92, 122, 91, 92, 123, 91,
92, 124, 91, 92, 95, 91, 92, 95,
91, 92, 127, 91, 92, 128, 91, 92,
129, 91, 92, 130, 91, 92, 131, 91,
132, 92, 91, 133, 92, 91, 92, 134,
91, 135, 92, 91, 136, 92, 91, 92,
137, 91, 92, 138, 91, 92, 139, 91,
92, 140, 81, 91, 92, 141, 91, 142,
92, 91, 143, 92, 91, 92, 144, 91,
92, 145, 91, 92, 146, 91, 92, 147,
91, 92, 81, 91, 92, 95, 91, 92,
150, 91, 92, 151, 91, 92, 152, 91,
92, 153, 91, 92, 154, 91, 92, 155,
91, 156, 92, 91, 147, 92, 91, 92,
158, 91, 92, 159, 91, 92, 95, 91,
161, 0, 162, 0, 163, 0, 164, 0,
165, 0, 166, 0, 167, 0, 168, 0,
169, 0, 171, 170, 171, 170, 171, 171,
7, 172, 7, 186, 208, 215, 223, 171,
170, 171, 173, 170, 171, 174, 170, 171,
175, 170, 171, 176, 170, 171, 177, 170,
171, 178, 170, 171, 179, 170, 171, 180,
170, 171, 181, 170, 171, 182, 170, 171,
183, 170, 171, 184, 170, 171, 185, 170,
171, 7, 170, 171, 187, 170, 171, 188,
170, 171, 189, 170, 171, 190, 170, 171,
191, 170, 192, 171, 170, 193, 171, 170,
171, 194, 170, 195, 171, 170, 196, 171,
170, 171, 197, 170, 171, 198, 170, 171,
199, 170, 171, 200, 81, 170, 171, 201,
170, 202, 171, 170, 203, 171, 170, 171,
204, 170, 171, 205, 170, 171, 206, 170,
171, 207, 170, 171, 81, 170, 209, 171,
170, 210, 171, 170, 171, 211, 170, 171,
212, 170, 213, 171, 170, 214, 171, 170,
171, 207, 170, 171, 216, 170, 171, 217,
170, 171, 218, 170, 171, 219, 170, 171,
220, 170, 171, 221, 170, 222, 171, 170,
207, 171, 170, 224, 171, 170, 225, 171,
170, 171, 226, 170, 171, 227, 170, 228,
171, 170, 229, 171, 170, 171, 207, 170,
231, 0, 232, 0, 4, 0, 234, 0,
235, 0, 236, 0, 237, 0, 238, 0,
239, 0, 240, 0, 241, 0, 243, 242,
243, 242, 243, 243, 7, 244, 7, 243,
242, 243, 245, 242, 243, 246, 242, 243,
247, 242, 243, 248, 242, 243, 249, 242,
243, 250, 242, 251, 243, 242, 252, 243,
242, 243, 81, 242, 253, 254, 253, 0,
258, 257, 256, 254, 257, 255, 0, 256,
254, 255, 0, 256, 255, 258, 257, 256,
254, 257, 255, 2, 258, 258, 8, 18,
20, 4, 34, 37, 52, 53, 82, 160,
230, 233, 253, 258, 0, 77, 260, 76,
77, 261, 76, 77, 262, 76, 77, 263,
76, 77, 264, 76, 77, 265, 76, 77,
266, 76, 77, 267, 76, 77, 268, 76,
77, 269, 76, 77, 270, 76, 77, 271,
76, 77, 272, 76, 77, 7, 76, 77,
274, 277, 280, 76, 77, 275, 76, 77,
276, 76, 77, 80, 76, 77, 278, 76,
77, 279, 76, 77, 80, 76, 77, 281,
277, 76, 77, 282, 76, 77, 283, 76,
77, 284, 76, 77, 285, 76, 77, 286,
76, 77, 287, 76, 77, 80, 76, 77,
80, 76, 77, 290, 76, 77, 291, 76,
77, 292, 76, 77, 293, 76, 77, 294,
76, 295, 77, 76, 296, 77, 76, 77,
297, 76, 298, 77, 76, 299, 77, 76,
77, 300, 76, 77, 301, 76, 77, 302,
76, 77, 81, 76, 77, 80, 76, 77,
305, 76, 77, 306, 76, 77, 307, 76,
77, 308, 76, 77, 309, 76, 77, 310,
76, 311, 77, 76, 302, 77, 76, 77,
313, 76, 77, 314, 76, 77, 80, 76,
317, 316, 317, 316, 318, 317, 317, 7,
321, 320, 7, 335, 350, 351, 373, 380,
388, 317, 316, 319, 317, 316, 317, 320,
316, 317, 81, 316, 317, 322, 316, 317,
323, 316, 317, 324, 316, 317, 325, 316,
317, 326, 316, 317, 327, 316, 317, 328,
316, 317, 329, 316, 317, 330, 316, 317,
331, 316, 317, 332, 316, 317, 333, 316,
317, 334, 316, 317, 7, 316, 317, 336,
339, 342, 316, 317, 337, 316, 317, 338,
316, 317, 320, 316, 317, 340, 316, 317,
341, 316, 317, 320, 316, 317, 343, 339,
316, 317, 344, 316, 317, 345, 316, 317,
346, 316, 317, 347, 316, 317, 348, 316,
317, 349, 316, 317, 320, 316, 317, 320,
316, 317, 352, 316, 317, 353, 316, 317,
354, 316, 317, 355, 316, 317, 356, 316,
357, 317, 316, 358, 317, 316, 317, 359,
316, 360, 317, 316, 361, 317, 316, 317,
362, 316, 317, 363, 316, 317, 364, 316,
317, 365, 81, 316, 317, 366, 316, 367,
317, 316, 368, 317, 316, 317, 369, 316,
317, 370, 316, 317, 371, 316, 317, 372,
316, 317, 81, 316, 374, 317, 320, 316,
375, 317, 316, 317, 376, 316, 317, 377,
316, 378, 317, 316, 379, 317, 316, 317,
372, 316, 317, 381, 316, 317, 382, 316,
317, 383, 316, 317, 384, 316, 317, 385,
316, 317, 386, 316, 387, 317, 316, 372,
317, 316, 317, 389, 316, 317, 390, 316,
317, 320, 316, 392, 0, 7, 0, 0,
0
};
static const unsigned char _lexer_trans_actions[] = {
29, 0, 54, 0, 5, 1, 0, 29,
1, 29, 29, 29, 29, 29, 29, 29,
35, 0, 43, 0, 43, 0, 43, 0,
43, 149, 126, 57, 110, 23, 0, 29,
54, 0, 5, 1, 0, 29, 1, 29,
29, 29, 29, 29, 29, 29, 35, 0,
43, 0, 43, 0, 43, 139, 48, 9,
106, 11, 0, 134, 45, 45, 45, 3,
122, 33, 33, 33, 0, 122, 33, 33,
33, 0, 122, 33, 0, 33, 0, 102,
7, 7, 43, 54, 0, 0, 43, 114,
25, 0, 54, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
43, 43, 43, 43, 0, 27, 118, 27,
27, 51, 27, 0, 54, 0, 1, 0,
43, 0, 0, 0, 43, 0, 43, 0,
43, 0, 43, 0, 43, 0, 43, 0,
43, 0, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 0,
43, 0, 43, 0, 43, 0, 43, 0,
43, 0, 43, 0, 43, 0, 43, 0,
43, 144, 57, 54, 0, 84, 54, 0,
78, 33, 84, 78, 84, 84, 84, 84,
84, 84, 0, 0, 0, 54, 0, 54,
0, 0, 54, 19, 0, 63, 130, 31,
60, 57, 31, 63, 57, 63, 63, 63,
63, 63, 63, 63, 66, 31, 43, 0,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
144, 57, 54, 0, 84, 54, 0, 72,
33, 84, 72, 84, 84, 84, 84, 84,
84, 0, 0, 0, 54, 0, 54, 0,
0, 54, 15, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 15, 0, 54, 0,
0, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
0, 54, 0, 0, 54, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 15, 0, 54, 0, 0, 0,
54, 0, 0, 54, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 15, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 144, 57, 54, 0, 54, 0,
69, 33, 69, 84, 84, 84, 84, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 13, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 0, 54, 0, 0, 54, 0,
54, 0, 0, 0, 54, 0, 0, 54,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 13, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 13, 0, 0, 54,
0, 0, 54, 0, 54, 0, 0, 54,
0, 0, 0, 54, 0, 0, 54, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 54, 0, 0, 54, 0, 0, 0,
54, 0, 0, 54, 0, 54, 0, 0,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 0, 43,
0, 43, 0, 43, 0, 43, 144, 57,
54, 0, 54, 0, 81, 84, 81, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 0, 54, 0, 0, 54,
0, 54, 21, 0, 0, 0, 0, 43,
54, 37, 37, 87, 37, 37, 43, 0,
39, 0, 43, 0, 0, 54, 0, 0,
39, 0, 0, 96, 54, 0, 93, 90,
41, 96, 90, 96, 96, 96, 96, 96,
96, 96, 99, 0, 43, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 19, 0, 54,
0, 0, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 0, 54, 0, 0, 54, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 19, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
144, 57, 54, 0, 84, 54, 0, 75,
33, 84, 75, 84, 84, 84, 84, 84,
84, 0, 0, 0, 54, 0, 54, 0,
0, 54, 17, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 17, 0, 54, 0,
0, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 54, 0, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
0, 54, 0, 0, 54, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 17, 0, 54, 0, 0, 0,
54, 0, 0, 54, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 17, 0, 0, 54, 0, 0,
0, 54, 0, 54, 0, 0, 54, 0,
0, 0, 54, 0, 0, 54, 0, 54,
0, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 54, 0, 0, 54, 0,
0, 54, 0, 0, 0, 54, 0, 0,
54, 0, 54, 0, 0, 54, 0, 0,
54, 0, 0, 0, 43, 0, 43, 0,
0
};
static const unsigned char _lexer_eof_actions[] = {
0, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43,
43, 43
};
static const int lexer_start = 1;
static const int lexer_first_final = 393;
static const int lexer_error = 0;
static const int lexer_en_main = 1;
#line 258 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
static VALUE
unindent(VALUE con, int start_col)
{
VALUE re;
// Gherkin will crash gracefully if the string representation of start_col pushes the pattern past 32 characters
char pat[32];
snprintf(pat, 32, "^[\t ]{0,%d}", start_col);
re = rb_reg_regcomp(rb_str_new2(pat));
rb_funcall(con, rb_intern("gsub!"), 2, re, rb_str_new2(""));
return Qnil;
}
static void
store_kw_con(VALUE listener, const char * event_name,
const char * keyword_at, size_t keyword_length,
const char * at, size_t length,
int current_line)
{
VALUE con = Qnil, kw = Qnil;
kw = ENCODED_STR_NEW(keyword_at, keyword_length);
con = ENCODED_STR_NEW(at, length);
rb_funcall(con, rb_intern("strip!"), 0);
rb_funcall(listener, rb_intern(event_name), 3, kw, con, INT2FIX(current_line));
}
static void
store_multiline_kw_con(VALUE listener, const char * event_name,
const char * keyword_at, size_t keyword_length,
const char * at, size_t length,
int current_line, int start_col)
{
VALUE split;
VALUE con = Qnil, kw = Qnil, name = Qnil, desc = Qnil;
kw = ENCODED_STR_NEW(keyword_at, keyword_length);
con = ENCODED_STR_NEW(at, length);
unindent(con, start_col);
split = rb_str_split(con, "\n");
name = rb_funcall(split, rb_intern("shift"), 0);
desc = rb_ary_join(split, rb_str_new2( "\n" ));
if( name == Qnil )
{
name = rb_str_new2("");
}
if( rb_funcall(desc, rb_intern("size"), 0) == 0)
{
desc = rb_str_new2("");
}
rb_funcall(name, rb_intern("strip!"), 0);
rb_funcall(desc, rb_intern("rstrip!"), 0);
rb_funcall(listener, rb_intern(event_name), 4, kw, name, desc, INT2FIX(current_line));
}
static void
store_attr(VALUE listener, const char * attr_type,
const char * at, size_t length,
int line)
{
VALUE val = ENCODED_STR_NEW(at, length);
rb_funcall(listener, rb_intern(attr_type), 2, val, INT2FIX(line));
}
static void
store_docstring_content(VALUE listener,
int start_col,
const char *type_at, size_t type_length,
const char *at, size_t length,
int current_line)
{
VALUE re2;
VALUE unescape_escaped_quotes;
VALUE con = ENCODED_STR_NEW(at, length);
VALUE con_type = ENCODED_STR_NEW(type_at, type_length);
unindent(con, start_col);
re2 = rb_reg_regcomp(rb_str_new2("\r\\Z"));
unescape_escaped_quotes = rb_reg_regcomp(rb_str_new2("\\\\\"\\\\\"\\\\\""));
rb_funcall(con, rb_intern("sub!"), 2, re2, rb_str_new2(""));
rb_funcall(con_type, rb_intern("strip!"), 0);
rb_funcall(con, rb_intern("gsub!"), 2, unescape_escaped_quotes, rb_str_new2("\"\"\""));
rb_funcall(listener, rb_intern("doc_string"), 3, con_type, con, INT2FIX(current_line));
}
static void
raise_lexer_error(const char * at, int line)
{
rb_raise(rb_eGherkinLexingError, "Lexing error on line %d: '%s'. See http://wiki.github.com/cucumber/gherkin/lexingerror for more information.", line, at);
}
static void lexer_init(lexer_state *lexer) {
lexer->content_start = 0;
lexer->content_end = 0;
lexer->content_len = 0;
lexer->docstring_content_type_start = 0;
lexer->docstring_content_type_end = 0;
lexer->mark = 0;
lexer->keyword_start = 0;
lexer->keyword_end = 0;
lexer->next_keyword_start = 0;
lexer->line_number = 1;
lexer->last_newline = 0;
lexer->final_newline = 0;
lexer->start_col = 0;
}
static VALUE CLexer_alloc(VALUE klass)
{
VALUE obj;
lexer_state *lxr = ALLOC(lexer_state);
lexer_init(lxr);
obj = Data_Wrap_Struct(klass, NULL, -1, lxr);
return obj;
}
static VALUE CLexer_init(VALUE self, VALUE listener)
{
lexer_state *lxr;
rb_iv_set(self, "@listener", listener);
lxr = NULL;
DATA_GET(self, lexer_state, lxr);
lexer_init(lxr);
return self;
}
static VALUE CLexer_scan(VALUE self, VALUE input)
{
VALUE input_copy;
char *data;
size_t len;
VALUE listener = rb_iv_get(self, "@listener");
lexer_state *lexer;
lexer = NULL;
DATA_GET(self, lexer_state, lexer);
input_copy = rb_str_dup(input);
rb_str_append(input_copy, rb_str_new2("\n%_FEATURE_END_%"));
data = RSTRING_PTR(input_copy);
len = RSTRING_LEN(input_copy);
if (len == 0) {
rb_raise(rb_eGherkinLexingError, "No content to lex.");
} else {
const char *p, *pe, *eof;
int cs = 0;
VALUE current_row = Qnil;
p = data;
pe = data + len;
eof = pe;
assert(*pe == '\0' && "pointer does not end on NULL");
#line 980 "ext/gherkin_lexer_hu/gherkin_lexer_hu.c"
{
cs = lexer_start;
}
#line 425 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
#line 987 "ext/gherkin_lexer_hu/gherkin_lexer_hu.c"
{
int _klen;
unsigned int _trans;
const char *_acts;
unsigned int _nacts;
const char *_keys;
if ( p == pe )
goto _test_eof;
if ( cs == 0 )
goto _out;
_resume:
_keys = _lexer_trans_keys + _lexer_key_offsets[cs];
_trans = _lexer_index_offsets[cs];
_klen = _lexer_single_lengths[cs];
if ( _klen > 0 ) {
const char *_lower = _keys;
const char *_mid;
const char *_upper = _keys + _klen - 1;
while (1) {
if ( _upper < _lower )
break;
_mid = _lower + ((_upper-_lower) >> 1);
if ( (*p) < *_mid )
_upper = _mid - 1;
else if ( (*p) > *_mid )
_lower = _mid + 1;
else {
_trans += (_mid - _keys);
goto _match;
}
}
_keys += _klen;
_trans += _klen;
}
_klen = _lexer_range_lengths[cs];
if ( _klen > 0 ) {
const char *_lower = _keys;
const char *_mid;
const char *_upper = _keys + (_klen<<1) - 2;
while (1) {
if ( _upper < _lower )
break;
_mid = _lower + (((_upper-_lower) >> 1) & ~1);
if ( (*p) < _mid[0] )
_upper = _mid - 2;
else if ( (*p) > _mid[1] )
_lower = _mid + 2;
else {
_trans += ((_mid - _keys)>>1);
goto _match;
}
}
_trans += _klen;
}
_match:
cs = _lexer_trans_targs[_trans];
if ( _lexer_trans_actions[_trans] == 0 )
goto _again;
_acts = _lexer_actions + _lexer_trans_actions[_trans];
_nacts = (unsigned int) *_acts++;
while ( _nacts-- > 0 )
{
switch ( *_acts++ )
{
case 0:
#line 83 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(content_start, p);
lexer->current_line = lexer->line_number;
lexer->start_col = lexer->content_start - lexer->last_newline - (lexer->keyword_end - lexer->keyword_start) + 2;
}
break;
case 1:
#line 89 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(content_start, p);
}
break;
case 2:
#line 93 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
lexer->current_line = lexer->line_number;
lexer->start_col = p - data - lexer->last_newline;
}
break;
case 3:
#line 98 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
int len = LEN(content_start, PTR_TO(final_newline));
int type_len = LEN(docstring_content_type_start, PTR_TO(docstring_content_type_end));
if (len < 0) len = 0;
if (type_len < 0) len = 0;
store_docstring_content(listener, lexer->start_col, PTR_TO(docstring_content_type_start), type_len, PTR_TO(content_start), len, lexer->current_line);
}
break;
case 4:
#line 108 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(docstring_content_type_start, p);
}
break;
case 5:
#line 112 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(docstring_content_type_end, p);
}
break;
case 6:
#line 116 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_KW_END_CON(feature);
}
break;
case 7:
#line 120 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_KW_END_CON(background);
}
break;
case 8:
#line 124 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_KW_END_CON(scenario);
}
break;
case 9:
#line 128 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_KW_END_CON(scenario_outline);
}
break;
case 10:
#line 132 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_KW_END_CON(examples);
}
break;
case 11:
#line 136 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
store_kw_con(listener, "step",
PTR_TO(keyword_start), LEN(keyword_start, PTR_TO(keyword_end)),
PTR_TO(content_start), LEN(content_start, p),
lexer->current_line);
}
break;
case 12:
#line 143 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_ATTR(comment);
lexer->mark = 0;
}
break;
case 13:
#line 148 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
STORE_ATTR(tag);
lexer->mark = 0;
}
break;
case 14:
#line 153 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
lexer->line_number += 1;
MARK(final_newline, p);
}
break;
case 15:
#line 158 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(last_newline, p + 1);
}
break;
case 16:
#line 162 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
if (lexer->mark == 0) {
MARK(mark, p);
}
}
break;
case 17:
#line 168 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(keyword_end, p);
MARK(keyword_start, PTR_TO(mark));
MARK(content_start, p + 1);
lexer->mark = 0;
}
break;
case 18:
#line 175 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(content_end, p);
}
break;
case 19:
#line 179 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
p = p - 1;
lexer->current_line = lexer->line_number;
current_row = rb_ary_new();
}
break;
case 20:
#line 185 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
MARK(content_start, p);
}
break;
case 21:
#line 189 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
VALUE re_pipe, re_newline, re_backslash;
VALUE con = ENCODED_STR_NEW(PTR_TO(content_start), LEN(content_start, p));
rb_funcall(con, rb_intern("strip!"), 0);
re_pipe = rb_reg_regcomp(rb_str_new2("\\\\\\|"));
re_newline = rb_reg_regcomp(rb_str_new2("\\\\n"));
re_backslash = rb_reg_regcomp(rb_str_new2("\\\\\\\\"));
rb_funcall(con, rb_intern("gsub!"), 2, re_pipe, rb_str_new2("|"));
rb_funcall(con, rb_intern("gsub!"), 2, re_newline, rb_str_new2("\n"));
rb_funcall(con, rb_intern("gsub!"), 2, re_backslash, rb_str_new2("\\"));
rb_ary_push(current_row, con);
}
break;
case 22:
#line 203 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
rb_funcall(listener, rb_intern("row"), 2, current_row, INT2FIX(lexer->current_line));
}
break;
case 23:
#line 207 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
int line;
if (cs < lexer_first_final) {
size_t count = 0;
VALUE newstr_val;
char *newstr;
int newstr_count = 0;
size_t len;
const char *buff;
if (lexer->last_newline != 0) {
len = LEN(last_newline, eof);
buff = PTR_TO(last_newline);
} else {
len = strlen(data);
buff = data;
}
// Allocate as a ruby string so that it gets cleaned up by GC
newstr_val = rb_str_new(buff, len);
newstr = RSTRING_PTR(newstr_val);
for (count = 0; count < len; count++) {
if(buff[count] == 10) {
newstr[newstr_count] = '\0'; // terminate new string at first newline found
break;
} else {
if (buff[count] == '%') {
newstr[newstr_count++] = buff[count];
newstr[newstr_count] = buff[count];
} else {
newstr[newstr_count] = buff[count];
}
}
newstr_count++;
}
line = lexer->line_number;
lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
raise_lexer_error(newstr, line);
} else {
rb_funcall(listener, rb_intern("eof"), 0);
}
}
break;
#line 1277 "ext/gherkin_lexer_hu/gherkin_lexer_hu.c"
}
}
_again:
if ( cs == 0 )
goto _out;
if ( ++p != pe )
goto _resume;
_test_eof: {}
if ( p == eof )
{
const char *__acts = _lexer_actions + _lexer_eof_actions[cs];
unsigned int __nacts = (unsigned int) *__acts++;
while ( __nacts-- > 0 ) {
switch ( *__acts++ ) {
case 23:
#line 207 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
{
int line;
if (cs < lexer_first_final) {
size_t count = 0;
VALUE newstr_val;
char *newstr;
int newstr_count = 0;
size_t len;
const char *buff;
if (lexer->last_newline != 0) {
len = LEN(last_newline, eof);
buff = PTR_TO(last_newline);
} else {
len = strlen(data);
buff = data;
}
// Allocate as a ruby string so that it gets cleaned up by GC
newstr_val = rb_str_new(buff, len);
newstr = RSTRING_PTR(newstr_val);
for (count = 0; count < len; count++) {
if(buff[count] == 10) {
newstr[newstr_count] = '\0'; // terminate new string at first newline found
break;
} else {
if (buff[count] == '%') {
newstr[newstr_count++] = buff[count];
newstr[newstr_count] = buff[count];
} else {
newstr[newstr_count] = buff[count];
}
}
newstr_count++;
}
line = lexer->line_number;
lexer_init(lexer); // Re-initialize so we can scan again with the same lexer
raise_lexer_error(newstr, line);
} else {
rb_funcall(listener, rb_intern("eof"), 0);
}
}
break;
#line 1340 "ext/gherkin_lexer_hu/gherkin_lexer_hu.c"
}
}
}
_out: {}
}
#line 426 "/Users/ahellesoy/github/gherkin/tasks/../ragel/i18n/hu.c.rl"
assert(p <= pe && "data overflow after parsing execute");
assert(lexer->content_start <= len && "content starts after data end");
assert(lexer->mark < len && "mark is after data end");
// Reset lexer by re-initializing the whole thing
lexer_init(lexer);
if (cs == lexer_error) {
rb_raise(rb_eGherkinLexingError, "Invalid format, lexing fails.");
} else {
return Qtrue;
}
}
}
void Init_gherkin_lexer_hu()
{
mGherkin = rb_define_module("Gherkin");
mGherkinLexer = rb_define_module_under(mGherkin, "Lexer");
rb_eGherkinLexingError = rb_const_get(mGherkinLexer, rb_intern("LexingError"));
mCLexer = rb_define_module_under(mGherkin, "CLexer");
cI18nLexer = rb_define_class_under(mCLexer, "Hu", rb_cObject);
rb_define_alloc_func(cI18nLexer, CLexer_alloc);
rb_define_method(cI18nLexer, "initialize", CLexer_init, 1);
rb_define_method(cI18nLexer, "scan", CLexer_scan, 1);
}
|
284529.c | /*
* Elliptic curve J-PAKE
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
/*
* References in the code are to the Thread v1.0 Specification,
* available to members of the Thread Group http://threadgroup.org/
*/
#if !defined(MBEDTLS_CONFIG_FILE)
#include "mbedtls/config.h"
#else
#include MBEDTLS_CONFIG_FILE
#endif
#if defined(MBEDTLS_ECJPAKE_C)
#include "mbedtls/ecjpake.h"
#include "mbedtls/platform_util.h"
#include <string.h>
#if !defined(MBEDTLS_ECJPAKE_ALT)
/* Parameter validation macros based on platform_util.h */
#define ECJPAKE_VALIDATE_RET( cond ) \
MBEDTLS_INTERNAL_VALIDATE_RET( cond, MBEDTLS_ERR_ECP_BAD_INPUT_DATA )
#define ECJPAKE_VALIDATE( cond ) \
MBEDTLS_INTERNAL_VALIDATE( cond )
/*
* Convert a mbedtls_ecjpake_role to identifier string
*/
static const char * const ecjpake_id[] = {
"client",
"server"
};
#define ID_MINE ( ecjpake_id[ ctx->role ] )
#define ID_PEER ( ecjpake_id[ 1 - ctx->role ] )
/*
* Initialize context
*/
void mbedtls_ecjpake_init( mbedtls_ecjpake_context *ctx )
{
ECJPAKE_VALIDATE( ctx != NULL );
ctx->md_info = NULL;
mbedtls_ecp_group_init( &ctx->grp );
ctx->point_format = MBEDTLS_ECP_PF_UNCOMPRESSED;
mbedtls_ecp_point_init( &ctx->Xm1 );
mbedtls_ecp_point_init( &ctx->Xm2 );
mbedtls_ecp_point_init( &ctx->Xp1 );
mbedtls_ecp_point_init( &ctx->Xp2 );
mbedtls_ecp_point_init( &ctx->Xp );
mbedtls_mpi_init( &ctx->xm1 );
mbedtls_mpi_init( &ctx->xm2 );
mbedtls_mpi_init( &ctx->s );
}
/*
* Free context
*/
void mbedtls_ecjpake_free( mbedtls_ecjpake_context *ctx )
{
if( ctx == NULL )
return;
ctx->md_info = NULL;
mbedtls_ecp_group_free( &ctx->grp );
mbedtls_ecp_point_free( &ctx->Xm1 );
mbedtls_ecp_point_free( &ctx->Xm2 );
mbedtls_ecp_point_free( &ctx->Xp1 );
mbedtls_ecp_point_free( &ctx->Xp2 );
mbedtls_ecp_point_free( &ctx->Xp );
mbedtls_mpi_free( &ctx->xm1 );
mbedtls_mpi_free( &ctx->xm2 );
mbedtls_mpi_free( &ctx->s );
}
/*
* Setup context
*/
int mbedtls_ecjpake_setup( mbedtls_ecjpake_context *ctx,
mbedtls_ecjpake_role role,
mbedtls_md_type_t hash,
mbedtls_ecp_group_id curve,
const unsigned char *secret,
size_t len )
{
int ret;
ECJPAKE_VALIDATE_RET( ctx != NULL );
ECJPAKE_VALIDATE_RET( role == MBEDTLS_ECJPAKE_CLIENT ||
role == MBEDTLS_ECJPAKE_SERVER );
ECJPAKE_VALIDATE_RET( secret != NULL || len == 0 );
ctx->role = role;
if( ( ctx->md_info = mbedtls_md_info_from_type( hash ) ) == NULL )
return( MBEDTLS_ERR_MD_FEATURE_UNAVAILABLE );
MBEDTLS_MPI_CHK( mbedtls_ecp_group_load( &ctx->grp, curve ) );
MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( &ctx->s, secret, len ) );
cleanup:
if( ret != 0 )
mbedtls_ecjpake_free( ctx );
return( ret );
}
/*
* Check if context is ready for use
*/
int mbedtls_ecjpake_check( const mbedtls_ecjpake_context *ctx )
{
ECJPAKE_VALIDATE_RET( ctx != NULL );
if( ctx->md_info == NULL ||
ctx->grp.id == MBEDTLS_ECP_DP_NONE ||
ctx->s.p == NULL )
{
return( MBEDTLS_ERR_ECP_BAD_INPUT_DATA );
}
return( 0 );
}
/*
* Write a point plus its length to a buffer
*/
static int ecjpake_write_len_point( unsigned char **p,
const unsigned char *end,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *P )
{
int ret;
size_t len;
/* Need at least 4 for length plus 1 for point */
if( end < *p || end - *p < 5 )
return( MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL );
ret = mbedtls_ecp_point_write_binary( grp, P, pf,
&len, *p + 4, end - ( *p + 4 ) );
if( ret != 0 )
return( ret );
(*p)[0] = (unsigned char)( ( len >> 24 ) & 0xFF );
(*p)[1] = (unsigned char)( ( len >> 16 ) & 0xFF );
(*p)[2] = (unsigned char)( ( len >> 8 ) & 0xFF );
(*p)[3] = (unsigned char)( ( len ) & 0xFF );
*p += 4 + len;
return( 0 );
}
/*
* Size of the temporary buffer for ecjpake_hash:
* 3 EC points plus their length, plus ID and its length (4 + 6 bytes)
*/
#define ECJPAKE_HASH_BUF_LEN ( 3 * ( 4 + MBEDTLS_ECP_MAX_PT_LEN ) + 4 + 6 )
/*
* Compute hash for ZKP (7.4.2.2.2.1)
*/
static int ecjpake_hash( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
const mbedtls_ecp_point *V,
const mbedtls_ecp_point *X,
const char *id,
mbedtls_mpi *h )
{
int ret;
unsigned char buf[ECJPAKE_HASH_BUF_LEN];
unsigned char *p = buf;
const unsigned char *end = buf + sizeof( buf );
const size_t id_len = strlen( id );
unsigned char hash[MBEDTLS_MD_MAX_SIZE];
/* Write things to temporary buffer */
MBEDTLS_MPI_CHK( ecjpake_write_len_point( &p, end, grp, pf, G ) );
MBEDTLS_MPI_CHK( ecjpake_write_len_point( &p, end, grp, pf, V ) );
MBEDTLS_MPI_CHK( ecjpake_write_len_point( &p, end, grp, pf, X ) );
if( end - p < 4 )
return( MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL );
*p++ = (unsigned char)( ( id_len >> 24 ) & 0xFF );
*p++ = (unsigned char)( ( id_len >> 16 ) & 0xFF );
*p++ = (unsigned char)( ( id_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( id_len ) & 0xFF );
if( end < p || (size_t)( end - p ) < id_len )
return( MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL );
memcpy( p, id, id_len );
p += id_len;
/* Compute hash */
MBEDTLS_MPI_CHK( mbedtls_md( md_info, buf, p - buf, hash ) );
/* Turn it into an integer mod n */
MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( h, hash,
mbedtls_md_get_size( md_info ) ) );
MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( h, h, &grp->N ) );
cleanup:
return( ret );
}
/*
* Parse a ECShnorrZKP (7.4.2.2.2) and verify it (7.4.2.3.3)
*/
static int ecjpake_zkp_read( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
const mbedtls_ecp_point *X,
const char *id,
const unsigned char **p,
const unsigned char *end )
{
int ret;
mbedtls_ecp_point V, VV;
mbedtls_mpi r, h;
size_t r_len;
mbedtls_ecp_point_init( &V );
mbedtls_ecp_point_init( &VV );
mbedtls_mpi_init( &r );
mbedtls_mpi_init( &h );
/*
* struct {
* ECPoint V;
* opaque r<1..2^8-1>;
* } ECSchnorrZKP;
*/
if( end < *p )
return( MBEDTLS_ERR_ECP_BAD_INPUT_DATA );
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_read_point( grp, &V, p, end - *p ) );
if( end < *p || (size_t)( end - *p ) < 1 )
{
ret = MBEDTLS_ERR_ECP_BAD_INPUT_DATA;
goto cleanup;
}
r_len = *(*p)++;
if( end < *p || (size_t)( end - *p ) < r_len )
{
ret = MBEDTLS_ERR_ECP_BAD_INPUT_DATA;
goto cleanup;
}
MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( &r, *p, r_len ) );
*p += r_len;
/*
* Verification
*/
MBEDTLS_MPI_CHK( ecjpake_hash( md_info, grp, pf, G, &V, X, id, &h ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_muladd( (mbedtls_ecp_group *) grp,
&VV, &h, X, &r, G ) );
if( mbedtls_ecp_point_cmp( &VV, &V ) != 0 )
{
ret = MBEDTLS_ERR_ECP_VERIFY_FAILED;
goto cleanup;
}
cleanup:
mbedtls_ecp_point_free( &V );
mbedtls_ecp_point_free( &VV );
mbedtls_mpi_free( &r );
mbedtls_mpi_free( &h );
return( ret );
}
/*
* Generate ZKP (7.4.2.3.2) and write it as ECSchnorrZKP (7.4.2.2.2)
*/
static int ecjpake_zkp_write( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
const mbedtls_mpi *x,
const mbedtls_ecp_point *X,
const char *id,
unsigned char **p,
const unsigned char *end,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
mbedtls_ecp_point V;
mbedtls_mpi v;
mbedtls_mpi h; /* later recycled to hold r */
size_t len;
if( end < *p )
return( MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL );
mbedtls_ecp_point_init( &V );
mbedtls_mpi_init( &v );
mbedtls_mpi_init( &h );
/* Compute signature */
MBEDTLS_MPI_CHK( mbedtls_ecp_gen_keypair_base( (mbedtls_ecp_group *) grp,
G, &v, &V, f_rng, p_rng ) );
MBEDTLS_MPI_CHK( ecjpake_hash( md_info, grp, pf, G, &V, X, id, &h ) );
MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &h, &h, x ) ); /* x*h */
MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &h, &v, &h ) ); /* v - x*h */
MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &h, &h, &grp->N ) ); /* r */
/* Write it out */
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_write_point( grp, &V,
pf, &len, *p, end - *p ) );
*p += len;
len = mbedtls_mpi_size( &h ); /* actually r */
if( end < *p || (size_t)( end - *p ) < 1 + len || len > 255 )
{
ret = MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL;
goto cleanup;
}
*(*p)++ = (unsigned char)( len & 0xFF );
MBEDTLS_MPI_CHK( mbedtls_mpi_write_binary( &h, *p, len ) ); /* r */
*p += len;
cleanup:
mbedtls_ecp_point_free( &V );
mbedtls_mpi_free( &v );
mbedtls_mpi_free( &h );
return( ret );
}
/*
* Parse a ECJPAKEKeyKP (7.4.2.2.1) and check proof
* Output: verified public key X
*/
static int ecjpake_kkp_read( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
mbedtls_ecp_point *X,
const char *id,
const unsigned char **p,
const unsigned char *end )
{
int ret;
if( end < *p )
return( MBEDTLS_ERR_ECP_BAD_INPUT_DATA );
/*
* struct {
* ECPoint X;
* ECSchnorrZKP zkp;
* } ECJPAKEKeyKP;
*/
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_read_point( grp, X, p, end - *p ) );
if( mbedtls_ecp_is_zero( X ) )
{
ret = MBEDTLS_ERR_ECP_INVALID_KEY;
goto cleanup;
}
MBEDTLS_MPI_CHK( ecjpake_zkp_read( md_info, grp, pf, G, X, id, p, end ) );
cleanup:
return( ret );
}
/*
* Generate an ECJPAKEKeyKP
* Output: the serialized structure, plus private/public key pair
*/
static int ecjpake_kkp_write( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
mbedtls_mpi *x,
mbedtls_ecp_point *X,
const char *id,
unsigned char **p,
const unsigned char *end,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
size_t len;
if( end < *p )
return( MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL );
/* Generate key (7.4.2.3.1) and write it out */
MBEDTLS_MPI_CHK( mbedtls_ecp_gen_keypair_base( (mbedtls_ecp_group *) grp, G, x, X,
f_rng, p_rng ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_write_point( grp, X,
pf, &len, *p, end - *p ) );
*p += len;
/* Generate and write proof */
MBEDTLS_MPI_CHK( ecjpake_zkp_write( md_info, grp, pf, G, x, X, id,
p, end, f_rng, p_rng ) );
cleanup:
return( ret );
}
/*
* Read a ECJPAKEKeyKPPairList (7.4.2.3) and check proofs
* Ouputs: verified peer public keys Xa, Xb
*/
static int ecjpake_kkpp_read( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
mbedtls_ecp_point *Xa,
mbedtls_ecp_point *Xb,
const char *id,
const unsigned char *buf,
size_t len )
{
int ret;
const unsigned char *p = buf;
const unsigned char *end = buf + len;
/*
* struct {
* ECJPAKEKeyKP ecjpake_key_kp_pair_list[2];
* } ECJPAKEKeyKPPairList;
*/
MBEDTLS_MPI_CHK( ecjpake_kkp_read( md_info, grp, pf, G, Xa, id, &p, end ) );
MBEDTLS_MPI_CHK( ecjpake_kkp_read( md_info, grp, pf, G, Xb, id, &p, end ) );
if( p != end )
ret = MBEDTLS_ERR_ECP_BAD_INPUT_DATA;
cleanup:
return( ret );
}
/*
* Generate a ECJPAKEKeyKPPairList
* Outputs: the serialized structure, plus two private/public key pairs
*/
static int ecjpake_kkpp_write( const mbedtls_md_info_t *md_info,
const mbedtls_ecp_group *grp,
const int pf,
const mbedtls_ecp_point *G,
mbedtls_mpi *xm1,
mbedtls_ecp_point *Xa,
mbedtls_mpi *xm2,
mbedtls_ecp_point *Xb,
const char *id,
unsigned char *buf,
size_t len,
size_t *olen,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
unsigned char *p = buf;
const unsigned char *end = buf + len;
MBEDTLS_MPI_CHK( ecjpake_kkp_write( md_info, grp, pf, G, xm1, Xa, id,
&p, end, f_rng, p_rng ) );
MBEDTLS_MPI_CHK( ecjpake_kkp_write( md_info, grp, pf, G, xm2, Xb, id,
&p, end, f_rng, p_rng ) );
*olen = p - buf;
cleanup:
return( ret );
}
/*
* Read and process the first round message
*/
int mbedtls_ecjpake_read_round_one( mbedtls_ecjpake_context *ctx,
const unsigned char *buf,
size_t len )
{
ECJPAKE_VALIDATE_RET( ctx != NULL );
ECJPAKE_VALIDATE_RET( buf != NULL );
return( ecjpake_kkpp_read( ctx->md_info, &ctx->grp, ctx->point_format,
&ctx->grp.G,
&ctx->Xp1, &ctx->Xp2, ID_PEER,
buf, len ) );
}
/*
* Generate and write the first round message
*/
int mbedtls_ecjpake_write_round_one( mbedtls_ecjpake_context *ctx,
unsigned char *buf, size_t len, size_t *olen,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
ECJPAKE_VALIDATE_RET( ctx != NULL );
ECJPAKE_VALIDATE_RET( buf != NULL );
ECJPAKE_VALIDATE_RET( olen != NULL );
ECJPAKE_VALIDATE_RET( f_rng != NULL );
return( ecjpake_kkpp_write( ctx->md_info, &ctx->grp, ctx->point_format,
&ctx->grp.G,
&ctx->xm1, &ctx->Xm1, &ctx->xm2, &ctx->Xm2,
ID_MINE, buf, len, olen, f_rng, p_rng ) );
}
/*
* Compute the sum of three points R = A + B + C
*/
static int ecjpake_ecp_add3( mbedtls_ecp_group *grp, mbedtls_ecp_point *R,
const mbedtls_ecp_point *A,
const mbedtls_ecp_point *B,
const mbedtls_ecp_point *C )
{
int ret;
mbedtls_mpi one;
mbedtls_mpi_init( &one );
MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &one, 1 ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_muladd( grp, R, &one, A, &one, B ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_muladd( grp, R, &one, R, &one, C ) );
cleanup:
mbedtls_mpi_free( &one );
return( ret );
}
/*
* Read and process second round message (C: 7.4.2.5, S: 7.4.2.6)
*/
int mbedtls_ecjpake_read_round_two( mbedtls_ecjpake_context *ctx,
const unsigned char *buf,
size_t len )
{
int ret;
const unsigned char *p = buf;
const unsigned char *end = buf + len;
mbedtls_ecp_group grp;
mbedtls_ecp_point G; /* C: GB, S: GA */
ECJPAKE_VALIDATE_RET( ctx != NULL );
ECJPAKE_VALIDATE_RET( buf != NULL );
mbedtls_ecp_group_init( &grp );
mbedtls_ecp_point_init( &G );
/*
* Server: GA = X3 + X4 + X1 (7.4.2.6.1)
* Client: GB = X1 + X2 + X3 (7.4.2.5.1)
* Unified: G = Xm1 + Xm2 + Xp1
* We need that before parsing in order to check Xp as we read it
*/
MBEDTLS_MPI_CHK( ecjpake_ecp_add3( &ctx->grp, &G,
&ctx->Xm1, &ctx->Xm2, &ctx->Xp1 ) );
/*
* struct {
* ECParameters curve_params; // only client reading server msg
* ECJPAKEKeyKP ecjpake_key_kp;
* } Client/ServerECJPAKEParams;
*/
if( ctx->role == MBEDTLS_ECJPAKE_CLIENT )
{
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_read_group( &grp, &p, len ) );
if( grp.id != ctx->grp.id )
{
ret = MBEDTLS_ERR_ECP_FEATURE_UNAVAILABLE;
goto cleanup;
}
}
MBEDTLS_MPI_CHK( ecjpake_kkp_read( ctx->md_info, &ctx->grp,
ctx->point_format,
&G, &ctx->Xp, ID_PEER, &p, end ) );
if( p != end )
{
ret = MBEDTLS_ERR_ECP_BAD_INPUT_DATA;
goto cleanup;
}
cleanup:
mbedtls_ecp_group_free( &grp );
mbedtls_ecp_point_free( &G );
return( ret );
}
/*
* Compute R = +/- X * S mod N, taking care not to leak S
*/
static int ecjpake_mul_secret( mbedtls_mpi *R, int sign,
const mbedtls_mpi *X,
const mbedtls_mpi *S,
const mbedtls_mpi *N,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
mbedtls_mpi b; /* Blinding value, then s + N * blinding */
mbedtls_mpi_init( &b );
/* b = s + rnd-128-bit * N */
MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &b, 16, f_rng, p_rng ) );
MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &b, &b, N ) );
MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &b, &b, S ) );
/* R = sign * X * b mod N */
MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( R, X, &b ) );
R->s *= sign;
MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( R, R, N ) );
cleanup:
mbedtls_mpi_free( &b );
return( ret );
}
/*
* Generate and write the second round message (S: 7.4.2.5, C: 7.4.2.6)
*/
int mbedtls_ecjpake_write_round_two( mbedtls_ecjpake_context *ctx,
unsigned char *buf, size_t len, size_t *olen,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
mbedtls_ecp_point G; /* C: GA, S: GB */
mbedtls_ecp_point Xm; /* C: Xc, S: Xs */
mbedtls_mpi xm; /* C: xc, S: xs */
unsigned char *p = buf;
const unsigned char *end = buf + len;
size_t ec_len;
ECJPAKE_VALIDATE_RET( ctx != NULL );
ECJPAKE_VALIDATE_RET( buf != NULL );
ECJPAKE_VALIDATE_RET( olen != NULL );
ECJPAKE_VALIDATE_RET( f_rng != NULL );
mbedtls_ecp_point_init( &G );
mbedtls_ecp_point_init( &Xm );
mbedtls_mpi_init( &xm );
/*
* First generate private/public key pair (S: 7.4.2.5.1, C: 7.4.2.6.1)
*
* Client: GA = X1 + X3 + X4 | xs = x2 * s | Xc = xc * GA
* Server: GB = X3 + X1 + X2 | xs = x4 * s | Xs = xs * GB
* Unified: G = Xm1 + Xp1 + Xp2 | xm = xm2 * s | Xm = xm * G
*/
MBEDTLS_MPI_CHK( ecjpake_ecp_add3( &ctx->grp, &G,
&ctx->Xp1, &ctx->Xp2, &ctx->Xm1 ) );
MBEDTLS_MPI_CHK( ecjpake_mul_secret( &xm, 1, &ctx->xm2, &ctx->s,
&ctx->grp.N, f_rng, p_rng ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &ctx->grp, &Xm, &xm, &G, f_rng, p_rng ) );
/*
* Now write things out
*
* struct {
* ECParameters curve_params; // only server writing its message
* ECJPAKEKeyKP ecjpake_key_kp;
* } Client/ServerECJPAKEParams;
*/
if( ctx->role == MBEDTLS_ECJPAKE_SERVER )
{
if( end < p )
{
ret = MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL;
goto cleanup;
}
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_write_group( &ctx->grp, &ec_len,
p, end - p ) );
p += ec_len;
}
if( end < p )
{
ret = MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL;
goto cleanup;
}
MBEDTLS_MPI_CHK( mbedtls_ecp_tls_write_point( &ctx->grp, &Xm,
ctx->point_format, &ec_len, p, end - p ) );
p += ec_len;
MBEDTLS_MPI_CHK( ecjpake_zkp_write( ctx->md_info, &ctx->grp,
ctx->point_format,
&G, &xm, &Xm, ID_MINE,
&p, end, f_rng, p_rng ) );
*olen = p - buf;
cleanup:
mbedtls_ecp_point_free( &G );
mbedtls_ecp_point_free( &Xm );
mbedtls_mpi_free( &xm );
return( ret );
}
/*
* Derive PMS (7.4.2.7 / 7.4.2.8)
*/
int mbedtls_ecjpake_derive_secret( mbedtls_ecjpake_context *ctx,
unsigned char *buf, size_t len, size_t *olen,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
mbedtls_ecp_point K;
mbedtls_mpi m_xm2_s, one;
unsigned char kx[MBEDTLS_ECP_MAX_BYTES];
size_t x_bytes;
ECJPAKE_VALIDATE_RET( ctx != NULL );
ECJPAKE_VALIDATE_RET( buf != NULL );
ECJPAKE_VALIDATE_RET( olen != NULL );
ECJPAKE_VALIDATE_RET( f_rng != NULL );
*olen = mbedtls_md_get_size( ctx->md_info );
if( len < *olen )
return( MBEDTLS_ERR_ECP_BUFFER_TOO_SMALL );
mbedtls_ecp_point_init( &K );
mbedtls_mpi_init( &m_xm2_s );
mbedtls_mpi_init( &one );
MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &one, 1 ) );
/*
* Client: K = ( Xs - X4 * x2 * s ) * x2
* Server: K = ( Xc - X2 * x4 * s ) * x4
* Unified: K = ( Xp - Xp2 * xm2 * s ) * xm2
*/
MBEDTLS_MPI_CHK( ecjpake_mul_secret( &m_xm2_s, -1, &ctx->xm2, &ctx->s,
&ctx->grp.N, f_rng, p_rng ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_muladd( &ctx->grp, &K,
&one, &ctx->Xp,
&m_xm2_s, &ctx->Xp2 ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &ctx->grp, &K, &ctx->xm2, &K,
f_rng, p_rng ) );
/* PMS = SHA-256( K.X ) */
x_bytes = ( ctx->grp.pbits + 7 ) / 8;
MBEDTLS_MPI_CHK( mbedtls_mpi_write_binary( &K.X, kx, x_bytes ) );
MBEDTLS_MPI_CHK( mbedtls_md( ctx->md_info, kx, x_bytes, buf ) );
cleanup:
mbedtls_ecp_point_free( &K );
mbedtls_mpi_free( &m_xm2_s );
mbedtls_mpi_free( &one );
return( ret );
}
#undef ID_MINE
#undef ID_PEER
#endif /* ! MBEDTLS_ECJPAKE_ALT */
#if defined(MBEDTLS_SELF_TEST)
#if defined(MBEDTLS_PLATFORM_C)
#include "mbedtls/platform.h"
#else
#include <stdio.h>
#define mbedtls_printf printf
#endif
#if !defined(MBEDTLS_ECP_DP_SECP256R1_ENABLED) || \
!defined(MBEDTLS_SHA256_C)
int mbedtls_ecjpake_self_test( int verbose )
{
(void) verbose;
return( 0 );
}
#else
static const unsigned char ecjpake_test_password[] = {
0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x6a, 0x70, 0x61, 0x6b, 0x65, 0x74,
0x65, 0x73, 0x74
};
static const unsigned char ecjpake_test_x1[] = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x21
};
static const unsigned char ecjpake_test_x2[] = {
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x81
};
static const unsigned char ecjpake_test_x3[] = {
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x81
};
static const unsigned char ecjpake_test_x4[] = {
0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc,
0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe1
};
static const unsigned char ecjpake_test_cli_one[] = {
0x41, 0x04, 0xac, 0xcf, 0x01, 0x06, 0xef, 0x85, 0x8f, 0xa2, 0xd9, 0x19,
0x33, 0x13, 0x46, 0x80, 0x5a, 0x78, 0xb5, 0x8b, 0xba, 0xd0, 0xb8, 0x44,
0xe5, 0xc7, 0x89, 0x28, 0x79, 0x14, 0x61, 0x87, 0xdd, 0x26, 0x66, 0xad,
0xa7, 0x81, 0xbb, 0x7f, 0x11, 0x13, 0x72, 0x25, 0x1a, 0x89, 0x10, 0x62,
0x1f, 0x63, 0x4d, 0xf1, 0x28, 0xac, 0x48, 0xe3, 0x81, 0xfd, 0x6e, 0xf9,
0x06, 0x07, 0x31, 0xf6, 0x94, 0xa4, 0x41, 0x04, 0x1d, 0xd0, 0xbd, 0x5d,
0x45, 0x66, 0xc9, 0xbe, 0xd9, 0xce, 0x7d, 0xe7, 0x01, 0xb5, 0xe8, 0x2e,
0x08, 0xe8, 0x4b, 0x73, 0x04, 0x66, 0x01, 0x8a, 0xb9, 0x03, 0xc7, 0x9e,
0xb9, 0x82, 0x17, 0x22, 0x36, 0xc0, 0xc1, 0x72, 0x8a, 0xe4, 0xbf, 0x73,
0x61, 0x0d, 0x34, 0xde, 0x44, 0x24, 0x6e, 0xf3, 0xd9, 0xc0, 0x5a, 0x22,
0x36, 0xfb, 0x66, 0xa6, 0x58, 0x3d, 0x74, 0x49, 0x30, 0x8b, 0xab, 0xce,
0x20, 0x72, 0xfe, 0x16, 0x66, 0x29, 0x92, 0xe9, 0x23, 0x5c, 0x25, 0x00,
0x2f, 0x11, 0xb1, 0x50, 0x87, 0xb8, 0x27, 0x38, 0xe0, 0x3c, 0x94, 0x5b,
0xf7, 0xa2, 0x99, 0x5d, 0xda, 0x1e, 0x98, 0x34, 0x58, 0x41, 0x04, 0x7e,
0xa6, 0xe3, 0xa4, 0x48, 0x70, 0x37, 0xa9, 0xe0, 0xdb, 0xd7, 0x92, 0x62,
0xb2, 0xcc, 0x27, 0x3e, 0x77, 0x99, 0x30, 0xfc, 0x18, 0x40, 0x9a, 0xc5,
0x36, 0x1c, 0x5f, 0xe6, 0x69, 0xd7, 0x02, 0xe1, 0x47, 0x79, 0x0a, 0xeb,
0x4c, 0xe7, 0xfd, 0x65, 0x75, 0xab, 0x0f, 0x6c, 0x7f, 0xd1, 0xc3, 0x35,
0x93, 0x9a, 0xa8, 0x63, 0xba, 0x37, 0xec, 0x91, 0xb7, 0xe3, 0x2b, 0xb0,
0x13, 0xbb, 0x2b, 0x41, 0x04, 0xa4, 0x95, 0x58, 0xd3, 0x2e, 0xd1, 0xeb,
0xfc, 0x18, 0x16, 0xaf, 0x4f, 0xf0, 0x9b, 0x55, 0xfc, 0xb4, 0xca, 0x47,
0xb2, 0xa0, 0x2d, 0x1e, 0x7c, 0xaf, 0x11, 0x79, 0xea, 0x3f, 0xe1, 0x39,
0x5b, 0x22, 0xb8, 0x61, 0x96, 0x40, 0x16, 0xfa, 0xba, 0xf7, 0x2c, 0x97,
0x56, 0x95, 0xd9, 0x3d, 0x4d, 0xf0, 0xe5, 0x19, 0x7f, 0xe9, 0xf0, 0x40,
0x63, 0x4e, 0xd5, 0x97, 0x64, 0x93, 0x77, 0x87, 0xbe, 0x20, 0xbc, 0x4d,
0xee, 0xbb, 0xf9, 0xb8, 0xd6, 0x0a, 0x33, 0x5f, 0x04, 0x6c, 0xa3, 0xaa,
0x94, 0x1e, 0x45, 0x86, 0x4c, 0x7c, 0xad, 0xef, 0x9c, 0xf7, 0x5b, 0x3d,
0x8b, 0x01, 0x0e, 0x44, 0x3e, 0xf0
};
static const unsigned char ecjpake_test_srv_one[] = {
0x41, 0x04, 0x7e, 0xa6, 0xe3, 0xa4, 0x48, 0x70, 0x37, 0xa9, 0xe0, 0xdb,
0xd7, 0x92, 0x62, 0xb2, 0xcc, 0x27, 0x3e, 0x77, 0x99, 0x30, 0xfc, 0x18,
0x40, 0x9a, 0xc5, 0x36, 0x1c, 0x5f, 0xe6, 0x69, 0xd7, 0x02, 0xe1, 0x47,
0x79, 0x0a, 0xeb, 0x4c, 0xe7, 0xfd, 0x65, 0x75, 0xab, 0x0f, 0x6c, 0x7f,
0xd1, 0xc3, 0x35, 0x93, 0x9a, 0xa8, 0x63, 0xba, 0x37, 0xec, 0x91, 0xb7,
0xe3, 0x2b, 0xb0, 0x13, 0xbb, 0x2b, 0x41, 0x04, 0x09, 0xf8, 0x5b, 0x3d,
0x20, 0xeb, 0xd7, 0x88, 0x5c, 0xe4, 0x64, 0xc0, 0x8d, 0x05, 0x6d, 0x64,
0x28, 0xfe, 0x4d, 0xd9, 0x28, 0x7a, 0xa3, 0x65, 0xf1, 0x31, 0xf4, 0x36,
0x0f, 0xf3, 0x86, 0xd8, 0x46, 0x89, 0x8b, 0xc4, 0xb4, 0x15, 0x83, 0xc2,
0xa5, 0x19, 0x7f, 0x65, 0xd7, 0x87, 0x42, 0x74, 0x6c, 0x12, 0xa5, 0xec,
0x0a, 0x4f, 0xfe, 0x2f, 0x27, 0x0a, 0x75, 0x0a, 0x1d, 0x8f, 0xb5, 0x16,
0x20, 0x93, 0x4d, 0x74, 0xeb, 0x43, 0xe5, 0x4d, 0xf4, 0x24, 0xfd, 0x96,
0x30, 0x6c, 0x01, 0x17, 0xbf, 0x13, 0x1a, 0xfa, 0xbf, 0x90, 0xa9, 0xd3,
0x3d, 0x11, 0x98, 0xd9, 0x05, 0x19, 0x37, 0x35, 0x14, 0x41, 0x04, 0x19,
0x0a, 0x07, 0x70, 0x0f, 0xfa, 0x4b, 0xe6, 0xae, 0x1d, 0x79, 0xee, 0x0f,
0x06, 0xae, 0xb5, 0x44, 0xcd, 0x5a, 0xdd, 0xaa, 0xbe, 0xdf, 0x70, 0xf8,
0x62, 0x33, 0x21, 0x33, 0x2c, 0x54, 0xf3, 0x55, 0xf0, 0xfb, 0xfe, 0xc7,
0x83, 0xed, 0x35, 0x9e, 0x5d, 0x0b, 0xf7, 0x37, 0x7a, 0x0f, 0xc4, 0xea,
0x7a, 0xce, 0x47, 0x3c, 0x9c, 0x11, 0x2b, 0x41, 0xcc, 0xd4, 0x1a, 0xc5,
0x6a, 0x56, 0x12, 0x41, 0x04, 0x36, 0x0a, 0x1c, 0xea, 0x33, 0xfc, 0xe6,
0x41, 0x15, 0x64, 0x58, 0xe0, 0xa4, 0xea, 0xc2, 0x19, 0xe9, 0x68, 0x31,
0xe6, 0xae, 0xbc, 0x88, 0xb3, 0xf3, 0x75, 0x2f, 0x93, 0xa0, 0x28, 0x1d,
0x1b, 0xf1, 0xfb, 0x10, 0x60, 0x51, 0xdb, 0x96, 0x94, 0xa8, 0xd6, 0xe8,
0x62, 0xa5, 0xef, 0x13, 0x24, 0xa3, 0xd9, 0xe2, 0x78, 0x94, 0xf1, 0xee,
0x4f, 0x7c, 0x59, 0x19, 0x99, 0x65, 0xa8, 0xdd, 0x4a, 0x20, 0x91, 0x84,
0x7d, 0x2d, 0x22, 0xdf, 0x3e, 0xe5, 0x5f, 0xaa, 0x2a, 0x3f, 0xb3, 0x3f,
0xd2, 0xd1, 0xe0, 0x55, 0xa0, 0x7a, 0x7c, 0x61, 0xec, 0xfb, 0x8d, 0x80,
0xec, 0x00, 0xc2, 0xc9, 0xeb, 0x12
};
static const unsigned char ecjpake_test_srv_two[] = {
0x03, 0x00, 0x17, 0x41, 0x04, 0x0f, 0xb2, 0x2b, 0x1d, 0x5d, 0x11, 0x23,
0xe0, 0xef, 0x9f, 0xeb, 0x9d, 0x8a, 0x2e, 0x59, 0x0a, 0x1f, 0x4d, 0x7c,
0xed, 0x2c, 0x2b, 0x06, 0x58, 0x6e, 0x8f, 0x2a, 0x16, 0xd4, 0xeb, 0x2f,
0xda, 0x43, 0x28, 0xa2, 0x0b, 0x07, 0xd8, 0xfd, 0x66, 0x76, 0x54, 0xca,
0x18, 0xc5, 0x4e, 0x32, 0xa3, 0x33, 0xa0, 0x84, 0x54, 0x51, 0xe9, 0x26,
0xee, 0x88, 0x04, 0xfd, 0x7a, 0xf0, 0xaa, 0xa7, 0xa6, 0x41, 0x04, 0x55,
0x16, 0xea, 0x3e, 0x54, 0xa0, 0xd5, 0xd8, 0xb2, 0xce, 0x78, 0x6b, 0x38,
0xd3, 0x83, 0x37, 0x00, 0x29, 0xa5, 0xdb, 0xe4, 0x45, 0x9c, 0x9d, 0xd6,
0x01, 0xb4, 0x08, 0xa2, 0x4a, 0xe6, 0x46, 0x5c, 0x8a, 0xc9, 0x05, 0xb9,
0xeb, 0x03, 0xb5, 0xd3, 0x69, 0x1c, 0x13, 0x9e, 0xf8, 0x3f, 0x1c, 0xd4,
0x20, 0x0f, 0x6c, 0x9c, 0xd4, 0xec, 0x39, 0x22, 0x18, 0xa5, 0x9e, 0xd2,
0x43, 0xd3, 0xc8, 0x20, 0xff, 0x72, 0x4a, 0x9a, 0x70, 0xb8, 0x8c, 0xb8,
0x6f, 0x20, 0xb4, 0x34, 0xc6, 0x86, 0x5a, 0xa1, 0xcd, 0x79, 0x06, 0xdd,
0x7c, 0x9b, 0xce, 0x35, 0x25, 0xf5, 0x08, 0x27, 0x6f, 0x26, 0x83, 0x6c
};
static const unsigned char ecjpake_test_cli_two[] = {
0x41, 0x04, 0x69, 0xd5, 0x4e, 0xe8, 0x5e, 0x90, 0xce, 0x3f, 0x12, 0x46,
0x74, 0x2d, 0xe5, 0x07, 0xe9, 0x39, 0xe8, 0x1d, 0x1d, 0xc1, 0xc5, 0xcb,
0x98, 0x8b, 0x58, 0xc3, 0x10, 0xc9, 0xfd, 0xd9, 0x52, 0x4d, 0x93, 0x72,
0x0b, 0x45, 0x54, 0x1c, 0x83, 0xee, 0x88, 0x41, 0x19, 0x1d, 0xa7, 0xce,
0xd8, 0x6e, 0x33, 0x12, 0xd4, 0x36, 0x23, 0xc1, 0xd6, 0x3e, 0x74, 0x98,
0x9a, 0xba, 0x4a, 0xff, 0xd1, 0xee, 0x41, 0x04, 0x07, 0x7e, 0x8c, 0x31,
0xe2, 0x0e, 0x6b, 0xed, 0xb7, 0x60, 0xc1, 0x35, 0x93, 0xe6, 0x9f, 0x15,
0xbe, 0x85, 0xc2, 0x7d, 0x68, 0xcd, 0x09, 0xcc, 0xb8, 0xc4, 0x18, 0x36,
0x08, 0x91, 0x7c, 0x5c, 0x3d, 0x40, 0x9f, 0xac, 0x39, 0xfe, 0xfe, 0xe8,
0x2f, 0x72, 0x92, 0xd3, 0x6f, 0x0d, 0x23, 0xe0, 0x55, 0x91, 0x3f, 0x45,
0xa5, 0x2b, 0x85, 0xdd, 0x8a, 0x20, 0x52, 0xe9, 0xe1, 0x29, 0xbb, 0x4d,
0x20, 0x0f, 0x01, 0x1f, 0x19, 0x48, 0x35, 0x35, 0xa6, 0xe8, 0x9a, 0x58,
0x0c, 0x9b, 0x00, 0x03, 0xba, 0xf2, 0x14, 0x62, 0xec, 0xe9, 0x1a, 0x82,
0xcc, 0x38, 0xdb, 0xdc, 0xae, 0x60, 0xd9, 0xc5, 0x4c
};
static const unsigned char ecjpake_test_pms[] = {
0xf3, 0xd4, 0x7f, 0x59, 0x98, 0x44, 0xdb, 0x92, 0xa5, 0x69, 0xbb, 0xe7,
0x98, 0x1e, 0x39, 0xd9, 0x31, 0xfd, 0x74, 0x3b, 0xf2, 0x2e, 0x98, 0xf9,
0xb4, 0x38, 0xf7, 0x19, 0xd3, 0xc4, 0xf3, 0x51
};
/* Load my private keys and generate the corresponding public keys */
static int ecjpake_test_load( mbedtls_ecjpake_context *ctx,
const unsigned char *xm1, size_t len1,
const unsigned char *xm2, size_t len2 )
{
int ret;
MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( &ctx->xm1, xm1, len1 ) );
MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( &ctx->xm2, xm2, len2 ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &ctx->grp, &ctx->Xm1, &ctx->xm1,
&ctx->grp.G, NULL, NULL ) );
MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &ctx->grp, &ctx->Xm2, &ctx->xm2,
&ctx->grp.G, NULL, NULL ) );
cleanup:
return( ret );
}
/* For tests we don't need a secure RNG;
* use the LGC from Numerical Recipes for simplicity */
static int ecjpake_lgc( void *p, unsigned char *out, size_t len )
{
static uint32_t x = 42;
(void) p;
while( len > 0 )
{
size_t use_len = len > 4 ? 4 : len;
x = 1664525 * x + 1013904223;
memcpy( out, &x, use_len );
out += use_len;
len -= use_len;
}
return( 0 );
}
#define TEST_ASSERT( x ) \
do { \
if( x ) \
ret = 0; \
else \
{ \
ret = 1; \
goto cleanup; \
} \
} while( 0 )
/*
* Checkup routine
*/
int mbedtls_ecjpake_self_test( int verbose )
{
int ret;
mbedtls_ecjpake_context cli;
mbedtls_ecjpake_context srv;
unsigned char buf[512], pms[32];
size_t len, pmslen;
mbedtls_ecjpake_init( &cli );
mbedtls_ecjpake_init( &srv );
if( verbose != 0 )
mbedtls_printf( " ECJPAKE test #0 (setup): " );
TEST_ASSERT( mbedtls_ecjpake_setup( &cli, MBEDTLS_ECJPAKE_CLIENT,
MBEDTLS_MD_SHA256, MBEDTLS_ECP_DP_SECP256R1,
ecjpake_test_password,
sizeof( ecjpake_test_password ) ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_setup( &srv, MBEDTLS_ECJPAKE_SERVER,
MBEDTLS_MD_SHA256, MBEDTLS_ECP_DP_SECP256R1,
ecjpake_test_password,
sizeof( ecjpake_test_password ) ) == 0 );
if( verbose != 0 )
mbedtls_printf( "passed\n" );
if( verbose != 0 )
mbedtls_printf( " ECJPAKE test #1 (random handshake): " );
TEST_ASSERT( mbedtls_ecjpake_write_round_one( &cli,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_read_round_one( &srv, buf, len ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_write_round_one( &srv,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_read_round_one( &cli, buf, len ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_write_round_two( &srv,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_read_round_two( &cli, buf, len ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_derive_secret( &cli,
pms, sizeof( pms ), &pmslen, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_write_round_two( &cli,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_read_round_two( &srv, buf, len ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_derive_secret( &srv,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( len == pmslen );
TEST_ASSERT( memcmp( buf, pms, len ) == 0 );
if( verbose != 0 )
mbedtls_printf( "passed\n" );
if( verbose != 0 )
mbedtls_printf( " ECJPAKE test #2 (reference handshake): " );
/* Simulate generation of round one */
MBEDTLS_MPI_CHK( ecjpake_test_load( &cli,
ecjpake_test_x1, sizeof( ecjpake_test_x1 ),
ecjpake_test_x2, sizeof( ecjpake_test_x2 ) ) );
MBEDTLS_MPI_CHK( ecjpake_test_load( &srv,
ecjpake_test_x3, sizeof( ecjpake_test_x3 ),
ecjpake_test_x4, sizeof( ecjpake_test_x4 ) ) );
/* Read round one */
TEST_ASSERT( mbedtls_ecjpake_read_round_one( &srv,
ecjpake_test_cli_one,
sizeof( ecjpake_test_cli_one ) ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_read_round_one( &cli,
ecjpake_test_srv_one,
sizeof( ecjpake_test_srv_one ) ) == 0 );
/* Skip generation of round two, read round two */
TEST_ASSERT( mbedtls_ecjpake_read_round_two( &cli,
ecjpake_test_srv_two,
sizeof( ecjpake_test_srv_two ) ) == 0 );
TEST_ASSERT( mbedtls_ecjpake_read_round_two( &srv,
ecjpake_test_cli_two,
sizeof( ecjpake_test_cli_two ) ) == 0 );
/* Server derives PMS */
TEST_ASSERT( mbedtls_ecjpake_derive_secret( &srv,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( len == sizeof( ecjpake_test_pms ) );
TEST_ASSERT( memcmp( buf, ecjpake_test_pms, len ) == 0 );
memset( buf, 0, len ); /* Avoid interferences with next step */
/* Client derives PMS */
TEST_ASSERT( mbedtls_ecjpake_derive_secret( &cli,
buf, sizeof( buf ), &len, ecjpake_lgc, NULL ) == 0 );
TEST_ASSERT( len == sizeof( ecjpake_test_pms ) );
TEST_ASSERT( memcmp( buf, ecjpake_test_pms, len ) == 0 );
if( verbose != 0 )
mbedtls_printf( "passed\n" );
cleanup:
mbedtls_ecjpake_free( &cli );
mbedtls_ecjpake_free( &srv );
if( ret != 0 )
{
if( verbose != 0 )
mbedtls_printf( "failed\n" );
ret = 1;
}
if( verbose != 0 )
mbedtls_printf( "\n" );
return( ret );
}
#undef TEST_ASSERT
#endif /* MBEDTLS_ECP_DP_SECP256R1_ENABLED && MBEDTLS_SHA256_C */
#endif /* MBEDTLS_SELF_TEST */
#endif /* MBEDTLS_ECJPAKE_C */
|
462711.c | /*
Microsoft Azure IoT Client Libraries
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the Software), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
#include "testrunnerswitcher.h"
int main(void)
{
size_t failedTestCount = 0;
RUN_TEST_SUITE(mqttapi_paho_unittests, failedTestCount);
return failedTestCount;
}
|
445057.c | #include <sys/types.h>
#include <regex.h>
#include <stdlib.h>
void regfree(regex_t * preg)
{
free(preg);
}
/*
POSIX(2)
*/
|
605205.c | #include "private.h"
#include <Elementary.h>
#include <assert.h>
#include "config.h"
#include "termio.h"
#include "media.h"
#include "options.h"
#include "options_background.h"
#include "extns.h"
#include "media.h"
#include "main.h"
#include <sys/stat.h>
typedef struct _Background_Ctx {
Config *config;
Evas_Object *frame;
Evas_Object *flip;
Evas_Object *bg_grid;
Evas_Object *term;
Evas_Object *entry;
Evas_Object *bubble;
Evas_Object *op_trans;
Evas_Object *op_opacity;
Evas_Object *op_shine_slider;
Eina_Stringshare *system_path;
Eina_Stringshare *user_path;
Eina_List *background_list;
Ecore_Timer *bubble_disappear;
} Background_Ctx;
typedef struct _Background_Item
{
const char *path;
Eina_Bool selected;
Elm_Object_Item *item;
} Background_Item;
typedef struct _Insert_Gen_Grid_Item_Notify
{
Elm_Gengrid_Item_Class *class;
Background_Item *item;
} Insert_Gen_Grid_Item_Notify;
static void
_cb_op_shine_sel(void *data,
Evas_Object *obj,
void *_event EINA_UNUSED)
{
Background_Ctx *ctx = data;
Config *config = ctx->config;
Term *term = termio_term_get(ctx->term);
Win *wn = term_win_get(term);
int shine = elm_slider_value_get(obj);
Eina_List *l, *wn_list;
if (config->shine == shine)
return;
wn_list = win_terms_get(wn);
EINA_LIST_FOREACH(wn_list, l, term)
{
term_apply_shine(term, shine);
}
}
static void
_cb_op_video_trans_chg(void *data,
Evas_Object *obj,
void *_event EINA_UNUSED)
{
Background_Ctx *ctx = data;
Config *config = ctx->config;
config->translucent = elm_check_state_get(obj);
elm_object_disabled_set(ctx->op_opacity, !config->translucent);
config_save(config);
main_trans_update();
}
static void
_cb_op_video_opacity_chg(void *data,
Evas_Object *obj,
void *_event EINA_UNUSED)
{
Background_Ctx *ctx = data;
Config *config = ctx->config;
config->opacity = elm_slider_value_get(obj);
if (!config->translucent)
return;
config_save(config);
main_trans_update();
}
static void
_cb_fileselector(void *data,
Evas_Object *obj,
void *event)
{
Background_Ctx *ctx = data;
if (event)
{
elm_object_text_set(ctx->entry, elm_fileselector_path_get(obj));
elm_flip_go_to(ctx->flip, EINA_TRUE, ELM_FLIP_PAGE_LEFT);
}
else
{
elm_flip_go_to(ctx->flip, EINA_TRUE, ELM_FLIP_PAGE_LEFT);
}
}
static Eina_Bool
_cb_timer_bubble_disappear(void *data)
{
Background_Ctx *ctx = data;
evas_object_del(ctx->bubble);
ctx->bubble = NULL;
ctx->bubble_disappear = NULL;
return ECORE_CALLBACK_CANCEL;
}
static void
_bubble_show(Background_Ctx *ctx, char *text)
{
Evas_Object *opbox = elm_object_top_widget_get(ctx->bg_grid);
Evas_Object *o;
int x = 0, y = 0, w , h;
evas_object_geometry_get(ctx->bg_grid, &x, &y, &w ,&h);
if (ctx->bubble_disappear)
{
ecore_timer_del(ctx->bubble_disappear);
_cb_timer_bubble_disappear(ctx);
}
ctx->bubble = elm_bubble_add(opbox);
elm_bubble_pos_set(ctx->bubble, ELM_BUBBLE_POS_BOTTOM_RIGHT);
evas_object_resize(ctx->bubble, 200, 50);
evas_object_move(ctx->bubble, (x + w - 200), (y + h - 50));
evas_object_show(ctx->bubble);
o = elm_label_add(ctx->bubble);
elm_object_text_set(o, text);
elm_object_content_set(ctx->bubble, o);
ctx->bubble_disappear = ecore_timer_add(2.0,
_cb_timer_bubble_disappear, ctx);
}
static char *
_grid_text_get(void *data,
Evas_Object *_obj EINA_UNUSED,
const char *_part EINA_UNUSED)
{
Background_Item *item = data;
const char *s;
if (!item->path)
return strdup(_("None"));
s = ecore_file_file_get(item->path);
if (s)
return strdup(s);
return NULL;
}
static Evas_Object *
_grid_content_get(void *data, Evas_Object *obj, const char *part)
{
Background_Ctx *ctx = evas_object_data_get(obj, "ctx");
assert(ctx);
Background_Item *item = data;
Config *config = ctx->config;
if (strcmp(part, "elm.swallow.icon"))
return NULL;
if (item->path)
{
int i;
Media_Type type;
for (i = 0; extn_edj[i]; i++)
{
if (eina_str_has_extension(item->path, extn_edj[i]))
return media_add(obj, item->path, config,
MEDIA_BG, MEDIA_TYPE_EDJE);
}
type = media_src_type_get(item->path);
return media_add(obj, item->path, config, MEDIA_THUMB, type);
}
else
{
Evas_Object *o, *oe;
char path[PATH_MAX];
if (!config->theme)
return NULL;
snprintf(path, PATH_MAX, "%s/themes/%s", elm_app_data_dir_get(),
config->theme);
o = elm_layout_add(obj);
oe = elm_layout_edje_get(o);
if (!edje_object_file_set(oe, path, "terminology/background"))
{
evas_object_del(o);
return NULL;
}
evas_object_show(o);
return o;
}
return NULL; /* unreached */
}
static void
_item_selected(void *data,
Evas_Object *obj,
void *_event EINA_UNUSED)
{
Background_Ctx *ctx = evas_object_data_get(obj, "ctx");
Background_Item *item = data;
Config *config = ctx->config;
if (!config) return;
if (!item->path)
{
// no background
eina_stringshare_del(config->background);
config->background = NULL;
config_save(config);
main_media_update(config);
}
else if (eina_stringshare_replace(&(config->background), item->path))
{
config_save(config);
main_media_update(config);
}
}
static void
_insert_gengrid_item(Background_Ctx *ctx,
Insert_Gen_Grid_Item_Notify *msg_data)
{
Insert_Gen_Grid_Item_Notify *insert_msg = msg_data;
Config *config = ctx->config;
if (insert_msg && insert_msg->item && insert_msg->class && config)
{
Background_Item *item = insert_msg->item;
Elm_Gengrid_Item_Class *item_class = insert_msg->class;
item->item = elm_gengrid_item_append(ctx->bg_grid, item_class, item,
_item_selected, item);
if ((!item->path) && (!config->background))
{
elm_gengrid_item_selected_set(item->item, EINA_TRUE);
elm_gengrid_item_bring_in(item->item,
ELM_GENGRID_ITEM_SCROLLTO_MIDDLE);
}
else if ((item->path) && (config->background))
{
if (strcmp(item->path, config->background) == 0)
{
elm_gengrid_item_selected_set(item->item, EINA_TRUE);
elm_gengrid_item_bring_in(item->item,
ELM_GENGRID_ITEM_SCROLLTO_MIDDLE);
}
}
}
free(msg_data);
}
static Eina_List*
_rec_read_directorys(Background_Ctx *ctx, Eina_List *list,
const char *root_path, Elm_Gengrid_Item_Class *class)
{
Eina_List *childs = ecore_file_ls(root_path);
char *file_name, path[PATH_MAX];
int i, j;
Background_Item *item;
const char **extns[5] =
{ extn_img, extn_scale, extn_edj, extn_mov, NULL };
const char **ex;
Insert_Gen_Grid_Item_Notify *notify;
if (!childs) return list;
EINA_LIST_FREE(childs, file_name)
{
snprintf(path, PATH_MAX, "%s/%s", root_path, file_name);
if ((!ecore_file_is_dir(path)) && (file_name[0] != '.'))
{
//file is found, search for correct file endings !
for (j = 0; extns[j]; j++)
{
ex = extns[j];
for (i = 0; ex[i]; i++)
{
if (eina_str_has_extension(file_name, ex[i]))
{
//File is found and valid
item = calloc(1, sizeof(Background_Item));
if (item)
{
item->path = eina_stringshare_add(path);
list = eina_list_append(list, item);
notify = calloc(1,
sizeof(Insert_Gen_Grid_Item_Notify));
if (notify)
{
//insert item to gengrid
notify->class = class;
notify->item = item;
//ecore_thread_feedback(th, notify);
_insert_gengrid_item(ctx, notify);
}
}
break;
}
}
}
}
free(file_name);
}
return list;
}
static void
_refresh_directory(Background_Ctx *ctx, const char* data)
{
Background_Item *item;
Elm_Gengrid_Item_Class *item_class;
elm_gengrid_clear(ctx->bg_grid);
if (ctx->background_list)
{
EINA_LIST_FREE(ctx->background_list, item)
{
if (item->path)
eina_stringshare_del(item->path);
free(item);
}
ctx->background_list = NULL;
}
item_class = elm_gengrid_item_class_new();
item_class->func.text_get = _grid_text_get;
item_class->func.content_get = _grid_content_get;
item = calloc(1, sizeof(Background_Item));
if (!item)
{
elm_gengrid_item_class_free(item_class);
return;
}
ctx->background_list = eina_list_append(ctx->background_list, item);
//Insert None Item
Insert_Gen_Grid_Item_Notify *notify = calloc(1,
sizeof(Insert_Gen_Grid_Item_Notify));
if (notify)
{
notify->class = item_class;
notify->item = item;
_insert_gengrid_item(ctx, notify);
}
ctx->background_list = _rec_read_directorys(ctx, ctx->background_list, data,
item_class);
elm_gengrid_item_class_free(item_class);
}
static void
_gengrid_refresh_samples(Background_Ctx *ctx, const char *path)
{
if(!ecore_file_exists(path))
return;
_refresh_directory(ctx, path);
}
static void
_cb_entry_changed(void *data,
Evas_Object *parent,
void *_event EINA_UNUSED)
{
Background_Ctx *ctx = data;
const char *path = elm_object_text_get(parent);
_gengrid_refresh_samples(ctx, path);
}
static void
_cb_hoversel_select(Background_Ctx *ctx, const Eina_Stringshare *path)
{
if (path)
{
elm_flip_go_to(ctx->flip, EINA_TRUE, ELM_FLIP_PAGE_LEFT);
elm_object_text_set(ctx->entry, path);
}
else
{
Evas_Object *o = elm_object_part_content_get(ctx->flip, "back");
elm_fileselector_path_set(o, elm_object_text_get(ctx->entry));
elm_flip_go_to(ctx->flip, EINA_FALSE, ELM_FLIP_PAGE_RIGHT);
}
}
static void
_cb_hoversel_select_system(void *data,
Evas_Object *obj EINA_UNUSED,
void *event_info EINA_UNUSED)
{
Background_Ctx *ctx = data;
_cb_hoversel_select(ctx, ctx->system_path);
}
static void
_cb_hoversel_select_user(void *data,
Evas_Object *obj EINA_UNUSED,
void *event_info EINA_UNUSED)
{
Background_Ctx *ctx = data;
_cb_hoversel_select(ctx, ctx->user_path);
}
static void
_cb_hoversel_select_none(void *data,
Evas_Object *obj EINA_UNUSED,
void *event_info EINA_UNUSED)
{
Background_Ctx *ctx = data;
_cb_hoversel_select(ctx, NULL);
}
static void
_system_background_dir_init(Background_Ctx *ctx)
{
char path[PATH_MAX];
snprintf(path, PATH_MAX, "%s/backgrounds/", elm_app_data_dir_get());
if (ctx->system_path)
eina_stringshare_replace(&ctx->system_path, path);
else
ctx->system_path = eina_stringshare_add(path);
}
static const char*
_user_background_dir_init(Background_Ctx *ctx)
{
char path[PATH_MAX], *user;
user = getenv("HOME");
if(!user)
return NULL;
snprintf(path, PATH_MAX, "%s/.config/terminology/background/", user);
if (!ecore_file_exists(path))
ecore_file_mkpath(path);
if (!ctx->user_path)
ctx->user_path = eina_stringshare_add(path);
else
eina_stringshare_replace(&ctx->user_path, path);
return ctx->user_path;
}
static const char*
_import_background(Background_Ctx *ctx, const char* background)
{
char path[PATH_MAX];
const char *filename = ecore_file_file_get(background);
if (!filename)
return NULL;
if (!_user_background_dir_init(ctx))
return NULL;
snprintf(path, PATH_MAX, "%s/%s", ctx->user_path, filename);
if (!ecore_file_cp(background, path))
return NULL;
return eina_stringshare_add(path);
}
static void
_cb_grid_doubleclick(void *data,
Evas_Object *_obj EINA_UNUSED,
void *_event EINA_UNUSED)
{
Background_Ctx *ctx = data;
Config *config = ctx->config;
char *config_background_dir = ecore_file_dir_get(config->background);
if (!ctx->user_path) {
if (!_user_background_dir_init(ctx))
return;
}
if (!config->background)
return;
if (strncmp(config_background_dir, ctx->user_path,
strlen(config_background_dir)) == 0)
{
_bubble_show(ctx, _("Source file is target file"));
free(config_background_dir);
return;
}
const char *newfile = _import_background(ctx, config->background);
if (newfile)
{
eina_stringshare_replace(&(config->background), newfile);
config_save(config);
main_media_update(config);
eina_stringshare_del(newfile);
_bubble_show(ctx, _("Picture imported"));
elm_object_text_set(ctx->entry, config_background_dir);
}
else
{
_bubble_show(ctx, _("Failed"));
}
free(config_background_dir);
}
static void
_parent_del_cb(void *data,
Evas *_e EINA_UNUSED,
Evas_Object *_obj EINA_UNUSED,
void *_event_info EINA_UNUSED)
{
Background_Ctx *ctx = data;
Background_Item *item;
EINA_LIST_FREE(ctx->background_list, item)
{
if (item->path)
eina_stringshare_del(item->path);
free(item);
}
ctx->background_list = NULL;
if (ctx->user_path)
{
eina_stringshare_del(ctx->user_path);
ctx->user_path = NULL;
}
if (ctx->system_path)
{
eina_stringshare_del(ctx->system_path);
ctx->system_path = NULL;
}
free(ctx);
}
void
options_background(Evas_Object *opbox, Evas_Object *term)
{
Evas_Object *o, *bx, *bx_front;
Config *config = termio_config_get(term);
char path[PATH_MAX];
Background_Ctx *ctx;
ctx = calloc(1, sizeof(*ctx));
assert(ctx);
ctx->config = config;
ctx->term = term;
ctx->frame = o = elm_frame_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, EVAS_HINT_FILL);
elm_object_text_set(o, _("Background"));
evas_object_show(o);
elm_box_pack_end(opbox, o);
evas_object_event_callback_add(ctx->frame, EVAS_CALLBACK_DEL,
_parent_del_cb, ctx);
bx = o = elm_box_add(ctx->frame);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, EVAS_HINT_FILL);
elm_object_content_set(ctx->frame, bx);
evas_object_show(o);
o = elm_label_add(opbox);
evas_object_size_hint_weight_set(o, 0.0, 0.0);
evas_object_size_hint_align_set(o, 0.0, 0.5);
elm_object_text_set(o, _("Shine:"));
elm_box_pack_end(bx, o);
evas_object_show(o);
ctx->op_shine_slider = o = elm_slider_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.5);
elm_slider_span_size_set(o, 40);
elm_slider_unit_format_set(o, "%1.0f");
elm_slider_indicator_format_set(o, "%1.0f");
elm_slider_min_max_set(o, 0, 255);
elm_slider_step_set(o, 1);
elm_slider_value_set(o, config->shine);
elm_box_pack_end(bx, o);
evas_object_show(o);
evas_object_smart_callback_add(o, "delay,changed",
_cb_op_shine_sel, ctx);
ctx->op_trans = o = elm_check_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.5);
elm_object_text_set(o, _("Translucent"));
elm_check_state_set(o, config->translucent);
elm_box_pack_end(bx, o);
evas_object_show(o);
evas_object_smart_callback_add(o, "changed",
_cb_op_video_trans_chg, ctx);
ctx->op_opacity = o = elm_slider_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.5);
elm_slider_span_size_set(o, 40);
elm_slider_unit_format_set(o, _("%1.0f%%"));
elm_slider_indicator_format_set(o, _("%1.0f%%"));
elm_slider_min_max_set(o, 0, 100);
elm_slider_value_set(o, config->opacity);
elm_object_disabled_set(o, !config->translucent);
elm_box_pack_end(bx, o);
evas_object_show(o);
evas_object_smart_callback_add(o, "changed",
_cb_op_video_opacity_chg, ctx);
o = elm_separator_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.5);
elm_separator_horizontal_set(o, EINA_TRUE);
elm_box_pack_end(bx, o);
evas_object_show(o);
o = elm_hoversel_add(opbox);
evas_object_size_hint_weight_set(o, 0.0, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.0);
elm_object_text_set(o, _("Select Path"));
elm_box_pack_end(bx, o);
evas_object_show(o);
snprintf(path, PATH_MAX, "%s/backgrounds/", elm_app_data_dir_get());
_system_background_dir_init(ctx);
elm_hoversel_item_add(o, _("System"), NULL, ELM_ICON_NONE,
_cb_hoversel_select_system, ctx);
if (_user_background_dir_init(ctx))
elm_hoversel_item_add(o, _("User"), NULL, ELM_ICON_NONE,
_cb_hoversel_select_user, ctx);
//In the other case it has failed, so dont show the user item
elm_hoversel_item_add(o, _("Other"), NULL, ELM_ICON_NONE,
_cb_hoversel_select_none, ctx);
ctx->flip = o = elm_flip_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, EVAS_HINT_FILL);
elm_box_pack_end(bx, o);
evas_object_show(o);
bx_front = o = elm_box_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.0);
elm_box_horizontal_set(o, EINA_FALSE);
elm_object_part_content_set(ctx->flip, "front", o);
evas_object_show(o);
ctx->entry = o = elm_entry_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.0);
elm_entry_single_line_set(o, EINA_TRUE);
elm_entry_scrollable_set(o, EINA_TRUE);
elm_scroller_policy_set(o, ELM_SCROLLER_POLICY_OFF,
ELM_SCROLLER_POLICY_OFF);
evas_object_smart_callback_add(ctx->entry, "changed",
_cb_entry_changed, ctx);
elm_box_pack_end(bx_front, o);
evas_object_show(o);
ctx->bg_grid = o = elm_gengrid_add(opbox);
evas_object_data_set(ctx->bg_grid, "ctx", ctx);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, EVAS_HINT_FILL);
evas_object_smart_callback_add(o, "clicked,double",
_cb_grid_doubleclick, ctx);
elm_gengrid_item_size_set(o, elm_config_scale_get() * 100,
elm_config_scale_get() * 80);
elm_box_pack_end(bx_front, o);
evas_object_show(o);
o = elm_label_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, 0.0);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, 0.0);
elm_object_text_set(o, _("Click on a picture to use it as background"));
elm_label_line_wrap_set(o, ELM_WRAP_WORD);
elm_box_pack_end(bx_front, o);
evas_object_show(o);
o = elm_fileselector_add(opbox);
evas_object_size_hint_weight_set(o, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(o, EVAS_HINT_FILL, EVAS_HINT_FILL);
elm_object_part_content_set(ctx->flip, "back", o);
elm_fileselector_folder_only_set(o, EINA_TRUE);
evas_object_smart_callback_add(o, "done", _cb_fileselector, ctx);
evas_object_show(o);
if (config->background)
{
char *config_background_dir;
config_background_dir = ecore_file_dir_get(config->background);
elm_object_text_set(ctx->entry, config_background_dir);
free(config_background_dir);
}
else
{
elm_object_text_set(ctx->entry, ctx->system_path);
}
}
|
622543.c |
/* dummy source file for compatibility purposes */
#if defined(HAVE_CDEFS_H)
#include <sys/cdefs.h>
#endif
#ifndef __warn_references
#if defined(__GNUC__) && defined (HAS_GNU_WARNING_LONG)
#define __warn_references(sym,msg) \
__asm__(".section .gnu" #sym ",\n\t.ascii \"" msg "\"\n\t.text");
#else
#define __warn_references(sym,msg) /* nothing */
#endif
#endif
#include "json_object.h"
__warn_references(json_object_get, "Warning: please link against libjson-c instead of libjson")
/* __asm__(".section .gnu.warning." __STRING(sym) \
" ; .ascii \"" msg "\" ; .text") */
|
838294.c | /*
* timb_dma.c timberdale FPGA DMA driver
* Copyright (c) 2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA DMA engine
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/timb_dma.h>
#include "dmaengine.h"
#define DRIVER_NAME "timb-dma"
/* Global DMA registers */
#define TIMBDMA_ACR 0x34
#define TIMBDMA_32BIT_ADDR 0x01
#define TIMBDMA_ISR 0x080000
#define TIMBDMA_IPR 0x080004
#define TIMBDMA_IER 0x080008
/* Channel specific registers */
/* RX instances base addresses are 0x00, 0x40, 0x80 ...
* TX instances base addresses are 0x18, 0x58, 0x98 ...
*/
#define TIMBDMA_INSTANCE_OFFSET 0x40
#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
/* RX registers, relative the instance base */
#define TIMBDMA_OFFS_RX_DHAR 0x00
#define TIMBDMA_OFFS_RX_DLAR 0x04
#define TIMBDMA_OFFS_RX_LR 0x0C
#define TIMBDMA_OFFS_RX_BLR 0x10
#define TIMBDMA_OFFS_RX_ER 0x14
#define TIMBDMA_RX_EN 0x01
/* bytes per Row, video specific register
* which is placed after the TX registers...
*/
#define TIMBDMA_OFFS_RX_BPRR 0x30
/* TX registers, relative the instance base */
#define TIMBDMA_OFFS_TX_DHAR 0x00
#define TIMBDMA_OFFS_TX_DLAR 0x04
#define TIMBDMA_OFFS_TX_BLR 0x0C
#define TIMBDMA_OFFS_TX_LR 0x14
#define TIMB_DMA_DESC_SIZE 8
struct timb_dma_desc {
struct list_head desc_node;
struct dma_async_tx_descriptor txd;
u8 *desc_list;
unsigned int desc_list_len;
bool interrupt;
};
struct timb_dma_chan {
struct dma_chan chan;
void __iomem *membase;
spinlock_t lock; /* Used to protect data structures,
especially the lists and descriptors,
from races between the tasklet and calls
from above */
bool ongoing;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
enum dma_transfer_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
struct timb_dma {
struct dma_device dma;
void __iomem *membase;
struct tasklet_struct tasklet;
struct timb_dma_chan channels[0];
};
static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
static struct device *chan2dmadev(struct dma_chan *chan)
{
return chan2dev(chan)->parent->parent;
}
static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
{
int id = td_chan->chan.chan_id;
return (struct timb_dma *)((u8 *)td_chan -
id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
}
/* Must be called with the spinlock held */
static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
{
int id = td_chan->chan.chan_id;
struct timb_dma *td = tdchantotd(td_chan);
u32 ier;
/* enable interrupt for this channel */
ier = ioread32(td->membase + TIMBDMA_IER);
ier |= 1 << id;
dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
ier);
iowrite32(ier, td->membase + TIMBDMA_IER);
}
/* Should be called with the spinlock held */
static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
{
int id = td_chan->chan.chan_id;
struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
u32 isr;
bool done = false;
dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
if (isr) {
iowrite32(isr, td->membase + TIMBDMA_ISR);
done = true;
}
return done;
}
static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
struct scatterlist *sg, bool last)
{
if (sg_dma_len(sg) > USHRT_MAX) {
dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
return -EINVAL;
}
/* length must be word aligned */
if (sg_dma_len(sg) % sizeof(u32)) {
dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
sg_dma_len(sg));
return -EINVAL;
}
dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
dma_desc, (unsigned long long)sg_dma_address(sg));
dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
dma_desc[1] = 0x00;
dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
return 0;
}
/* Must be called with the spinlock held */
static void __td_start_dma(struct timb_dma_chan *td_chan)
{
struct timb_dma_desc *td_desc;
if (td_chan->ongoing) {
dev_err(chan2dev(&td_chan->chan),
"Transfer already ongoing\n");
return;
}
td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
desc_node);
dev_dbg(chan2dev(&td_chan->chan),
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
if (td_chan->direction == DMA_DEV_TO_MEM) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
iowrite32(td_desc->txd.phys, td_chan->membase +
TIMBDMA_OFFS_RX_DLAR);
/* Bytes per line */
iowrite32(td_chan->bytes_per_line, td_chan->membase +
TIMBDMA_OFFS_RX_BPRR);
/* enable RX */
iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
} else {
/* address high */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
iowrite32(td_desc->txd.phys, td_chan->membase +
TIMBDMA_OFFS_TX_DLAR);
}
td_chan->ongoing = true;
if (td_desc->interrupt)
__td_enable_chan_irq(td_chan);
}
static void __td_finish(struct timb_dma_chan *td_chan)
{
dma_async_tx_callback callback;
void *param;
struct dma_async_tx_descriptor *txd;
struct timb_dma_desc *td_desc;
/* can happen if the descriptor is canceled */
if (list_empty(&td_chan->active_list))
return;
td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
desc_node);
txd = &td_desc->txd;
dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
txd->cookie);
/* make sure to stop the transfer */
if (td_chan->direction == DMA_DEV_TO_MEM)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
*/
dma_cookie_complete(txd);
td_chan->ongoing = false;
callback = txd->callback;
param = txd->callback_param;
list_move(&td_desc->desc_node, &td_chan->free_list);
dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
if (callback)
callback(param);
}
static u32 __td_ier_mask(struct timb_dma *td)
{
int i;
u32 ret = 0;
for (i = 0; i < td->dma.chancnt; i++) {
struct timb_dma_chan *td_chan = td->channels + i;
if (td_chan->ongoing) {
struct timb_dma_desc *td_desc =
list_entry(td_chan->active_list.next,
struct timb_dma_desc, desc_node);
if (td_desc->interrupt)
ret |= 1 << i;
}
}
return ret;
}
static void __td_start_next(struct timb_dma_chan *td_chan)
{
struct timb_dma_desc *td_desc;
BUG_ON(list_empty(&td_chan->queue));
BUG_ON(td_chan->ongoing);
td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
desc_node);
dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
__func__, td_desc->txd.cookie);
list_move(&td_desc->desc_node, &td_chan->active_list);
__td_start_dma(td_chan);
}
static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
txd);
struct timb_dma_chan *td_chan = container_of(txd->chan,
struct timb_dma_chan, chan);
dma_cookie_t cookie;
spin_lock_bh(&td_chan->lock);
cookie = dma_cookie_assign(txd);
if (list_empty(&td_chan->active_list)) {
dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
txd->cookie);
list_add_tail(&td_desc->desc_node, &td_chan->active_list);
__td_start_dma(td_chan);
} else {
dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
txd->cookie);
list_add_tail(&td_desc->desc_node, &td_chan->queue);
}
spin_unlock_bh(&td_chan->lock);
return cookie;
}
static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
{
struct dma_chan *chan = &td_chan->chan;
struct timb_dma_desc *td_desc;
int err;
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
if (!td_desc) {
dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
goto out;
}
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
if (!td_desc->desc_list) {
dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
goto err;
}
dma_async_tx_descriptor_init(&td_desc->txd, chan);
td_desc->txd.tx_submit = td_tx_submit;
td_desc->txd.flags = DMA_CTRL_ACK;
td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
if (err) {
dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
goto err;
}
return td_desc;
err:
kfree(td_desc->desc_list);
kfree(td_desc);
out:
return NULL;
}
static void td_free_desc(struct timb_dma_desc *td_desc)
{
dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_TO_DEVICE);
kfree(td_desc->desc_list);
kfree(td_desc);
}
static void td_desc_put(struct timb_dma_chan *td_chan,
struct timb_dma_desc *td_desc)
{
dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
spin_lock_bh(&td_chan->lock);
list_add(&td_desc->desc_node, &td_chan->free_list);
spin_unlock_bh(&td_chan->lock);
}
static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
{
struct timb_dma_desc *td_desc, *_td_desc;
struct timb_dma_desc *ret = NULL;
spin_lock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
desc_node) {
if (async_tx_test_ack(&td_desc->txd)) {
list_del(&td_desc->desc_node);
ret = td_desc;
break;
}
dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
td_desc);
}
spin_unlock_bh(&td_chan->lock);
return ret;
}
static int td_alloc_chan_resources(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
int i;
dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
BUG_ON(!list_empty(&td_chan->free_list));
for (i = 0; i < td_chan->descs; i++) {
struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
if (!td_desc) {
if (i)
break;
else {
dev_err(chan2dev(chan),
"Couldnt allocate any descriptors\n");
return -ENOMEM;
}
}
td_desc_put(td_chan, td_desc);
}
spin_lock_bh(&td_chan->lock);
dma_cookie_init(chan);
spin_unlock_bh(&td_chan->lock);
return 0;
}
static void td_free_chan_resources(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
struct timb_dma_desc *td_desc, *_td_desc;
LIST_HEAD(list);
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
/* check that all descriptors are free */
BUG_ON(!list_empty(&td_chan->active_list));
BUG_ON(!list_empty(&td_chan->queue));
spin_lock_bh(&td_chan->lock);
list_splice_init(&td_chan->free_list, &list);
spin_unlock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
td_desc);
td_free_desc(td_desc);
}
}
static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
enum dma_status ret;
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
ret = dma_cookie_status(chan, cookie, txstate);
dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
return ret;
}
static void td_issue_pending(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
spin_lock_bh(&td_chan->lock);
if (!list_empty(&td_chan->active_list))
/* transfer ongoing */
if (__td_dma_done_ack(td_chan))
__td_finish(td_chan);
if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
__td_start_next(td_chan);
spin_unlock_bh(&td_chan->lock);
}
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags,
void *context)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
struct timb_dma_desc *td_desc;
struct scatterlist *sg;
unsigned int i;
unsigned int desc_usage = 0;
if (!sgl || !sg_len) {
dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
return NULL;
}
/* even channels are for RX, odd for TX */
if (td_chan->direction != direction) {
dev_err(chan2dev(chan),
"Requesting channel in wrong direction\n");
return NULL;
}
td_desc = td_desc_get(td_chan);
if (!td_desc) {
dev_err(chan2dev(chan), "Not enough descriptors available\n");
return NULL;
}
td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
for_each_sg(sgl, sg, sg_len, i) {
int err;
if (desc_usage > td_desc->desc_list_len) {
dev_err(chan2dev(chan), "No descriptor space\n");
return NULL;
}
err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
i == (sg_len - 1));
if (err) {
dev_err(chan2dev(chan), "Failed to update desc: %d\n",
err);
td_desc_put(td_chan, td_desc);
return NULL;
}
desc_usage += TIMB_DMA_DESC_SIZE;
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_MEM_TO_DEV);
return &td_desc->txd;
}
static int td_terminate_all(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
struct timb_dma_desc *td_desc, *_td_desc;
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
/* first the easy part, put the queue into the free list */
spin_lock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
desc_node)
list_move(&td_desc->desc_node, &td_chan->free_list);
/* now tear down the running */
__td_finish(td_chan);
spin_unlock_bh(&td_chan->lock);
return 0;
}
static void td_tasklet(unsigned long data)
{
struct timb_dma *td = (struct timb_dma *)data;
u32 isr;
u32 ipr;
u32 ier;
int i;
isr = ioread32(td->membase + TIMBDMA_ISR);
ipr = isr & __td_ier_mask(td);
/* ack the interrupts */
iowrite32(ipr, td->membase + TIMBDMA_ISR);
for (i = 0; i < td->dma.chancnt; i++)
if (ipr & (1 << i)) {
struct timb_dma_chan *td_chan = td->channels + i;
spin_lock(&td_chan->lock);
__td_finish(td_chan);
if (!list_empty(&td_chan->queue))
__td_start_next(td_chan);
spin_unlock(&td_chan->lock);
}
ier = __td_ier_mask(td);
iowrite32(ier, td->membase + TIMBDMA_IER);
}
static irqreturn_t td_irq(int irq, void *devid)
{
struct timb_dma *td = devid;
u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
if (ipr) {
/* disable interrupts, will be re-enabled in tasklet */
iowrite32(0, td->membase + TIMBDMA_IER);
tasklet_schedule(&td->tasklet);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
static int td_probe(struct platform_device *pdev)
{
struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timb_dma *td;
struct resource *iomem;
int irq;
int err;
int i;
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
return -EINVAL;
}
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!request_mem_region(iomem->start, resource_size(iomem),
DRIVER_NAME))
return -EBUSY;
td = kzalloc(sizeof(struct timb_dma) +
sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
if (!td) {
err = -ENOMEM;
goto err_release_region;
}
dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
td->membase = ioremap(iomem->start, resource_size(iomem));
if (!td->membase) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto err_free_mem;
}
/* 32bit addressing */
iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
/* disable and clear any interrupts */
iowrite32(0x0, td->membase + TIMBDMA_IER);
iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
if (err) {
dev_err(&pdev->dev, "Failed to request IRQ\n");
goto err_tasklet_kill;
}
td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
td->dma.device_free_chan_resources = td_free_chan_resources;
td->dma.device_tx_status = td_tx_status;
td->dma.device_issue_pending = td_issue_pending;
dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
td->dma.device_prep_slave_sg = td_prep_slave_sg;
td->dma.device_terminate_all = td_terminate_all;
td->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&td->dma.channels);
for (i = 0; i < pdata->nr_channels; i++) {
struct timb_dma_chan *td_chan = &td->channels[i];
struct timb_dma_platform_data_channel *pchan =
pdata->channels + i;
/* even channels are RX, odd are TX */
if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
goto err_free_irq;
}
td_chan->chan.device = &td->dma;
dma_cookie_init(&td_chan->chan);
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);
INIT_LIST_HEAD(&td_chan->free_list);
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
DMA_MEM_TO_DEV;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
(pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
i, td_chan->membase);
list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
}
err = dma_async_device_register(&td->dma);
if (err) {
dev_err(&pdev->dev, "Failed to register async device\n");
goto err_free_irq;
}
platform_set_drvdata(pdev, td);
dev_dbg(&pdev->dev, "Probe result: %d\n", err);
return err;
err_free_irq:
free_irq(irq, td);
err_tasklet_kill:
tasklet_kill(&td->tasklet);
iounmap(td->membase);
err_free_mem:
kfree(td);
err_release_region:
release_mem_region(iomem->start, resource_size(iomem));
return err;
}
static int td_remove(struct platform_device *pdev)
{
struct timb_dma *td = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
dma_async_device_unregister(&td->dma);
free_irq(irq, td);
tasklet_kill(&td->tasklet);
iounmap(td->membase);
kfree(td);
release_mem_region(iomem->start, resource_size(iomem));
dev_dbg(&pdev->dev, "Removed...\n");
return 0;
}
static struct platform_driver td_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = td_probe,
.remove = td_remove,
};
module_platform_driver(td_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
MODULE_AUTHOR("Pelagicore AB <[email protected]>");
MODULE_ALIAS("platform:"DRIVER_NAME);
|
52998.c | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE690_NULL_Deref_from_Return__char_malloc_64a.c
Label Definition File: CWE690_NULL_Deref_from_Return.free.label.xml
Template File: source-sinks-64a.tmpl.c
*/
/*
* @description
* CWE: 690 Unchecked Return Value To NULL Pointer
* BadSource: malloc Allocate data using malloc()
* Sinks:
* GoodSink: Check to see if the data allocation failed and if not, use data
* BadSink : Don't check for NULL and use data
* Flow Variant: 64 Data flow: void pointer to data passed from one function to another in different source files
*
* */
#include "std_testcase.h"
#include <wchar.h>
#ifndef OMITBAD
/* bad function declaration */
void CWE690_NULL_Deref_from_Return__char_malloc_64b_bad_sink(void * void_data_ptr);
void CWE690_NULL_Deref_from_Return__char_malloc_64_bad()
{
char * data;
data = NULL; /* Initialize data */
/* FLAW: Allocate memory without checking if the memeory allocation function failed */
data = (char *)malloc(100);
CWE690_NULL_Deref_from_Return__char_malloc_64b_bad_sink(&data);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodB2G uses the BadSource with the GoodSink */
void CWE690_NULL_Deref_from_Return__char_malloc_64b_goodB2G_sink(void * void_data_ptr);
static void goodB2G()
{
char * data;
data = NULL; /* Initialize data */
/* FLAW: Allocate memory without checking if the memeory allocation function failed */
data = (char *)malloc(100);
CWE690_NULL_Deref_from_Return__char_malloc_64b_goodB2G_sink(&data);
}
void CWE690_NULL_Deref_from_Return__char_malloc_64_good()
{
goodB2G();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE690_NULL_Deref_from_Return__char_malloc_64_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE690_NULL_Deref_from_Return__char_malloc_64_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
243729.c | /*++
Copyright (c) 2013-2014, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
Module Name:
ArmGicDxe.c
Abstract:
Driver implementing the GIC interrupt controller protocol
--*/
#include <PiDxe.h>
#include "ArmGicDxe.h"
lkapi_t* mLKApi = NULL;
/**
Initialize the state information for the CPU Architectural Protocol
@param ImageHandle of the loaded driver
@param SystemTable Pointer to the System Table
@retval EFI_SUCCESS Protocol registered
@retval EFI_OUT_OF_RESOURCES Cannot allocate protocol data structure
@retval EFI_DEVICE_ERROR Hardware problems
@retval EFI_UNSUPPORTED GIC version not supported
**/
EFI_STATUS
InterruptDxeInitialize (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
ARM_GIC_ARCH_REVISION Revision;
mLKApi = GetLKApi();
Revision = ArmGicGetSupportedArchRevision ();
if (Revision == ARM_GIC_ARCH_REVISION_2) {
Status = GicV2DxeInitialize (ImageHandle, SystemTable);
} else if (Revision == ARM_GIC_ARCH_REVISION_3) {
Status = GicV3DxeInitialize (ImageHandle, SystemTable);
} else {
Status = EFI_UNSUPPORTED;
}
mLKApi->int_register_handler = lkapi_int_register_handler;
mLKApi->int_mask = lkapi_int_mask;
mLKApi->int_unmask = lkapi_int_unmask;
return Status;
}
|
94810.c | /***********************************************************
Copyright 1991 by Stichting Mathematisch Centrum, Amsterdam, The
Netherlands.
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the names of Stichting Mathematisch
Centrum or CWI not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior permission.
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
******************************************************************/
/* Bitset primitives used by the parser generator */
#include "pgenheaders.h"
#include "bitset.h"
bitset
newbitset(nbits)
int nbits;
{
int nbytes = NBYTES(nbits);
bitset ss = NEW(BYTE, nbytes);
if (ss == NULL)
fatal("no mem for bitset");
ss += nbytes;
while (--nbytes >= 0)
*--ss = 0;
return ss;
}
void
delbitset(ss)
bitset ss;
{
DEL(ss);
}
int
addbit(ss, ibit)
bitset ss;
int ibit;
{
int ibyte = BIT2BYTE(ibit);
BYTE mask = BIT2MASK(ibit);
if (ss[ibyte] & mask)
return 0; /* Bit already set */
ss[ibyte] |= mask;
return 1;
}
#if 0 /* Now a macro */
int
testbit(ss, ibit)
bitset ss;
int ibit;
{
return (ss[BIT2BYTE(ibit)] & BIT2MASK(ibit)) != 0;
}
#endif
int
samebitset(ss1, ss2, nbits)
bitset ss1, ss2;
int nbits;
{
int i;
for (i = NBYTES(nbits); --i >= 0; )
if (*ss1++ != *ss2++)
return 0;
return 1;
}
void
mergebitset(ss1, ss2, nbits)
bitset ss1, ss2;
int nbits;
{
int i;
for (i = NBYTES(nbits); --i >= 0; )
*ss1++ |= *ss2++;
}
|
908763.c | /*
tea6420 - i2c-driver for the tea6420 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <[email protected]>
The tea6420 is a bus controlled audio-matrix with 5 stereo inputs,
4 stereo outputs and gain control for each output.
It is cascadable, i.e. it can be found at the adresses 0x98
and 0x9a on the i2c-bus.
For detailed informations download the specifications directly
from SGS Thomson at http://www.st.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/ioctl.h>
#include <linux/i2c.h>
#include "tea6420.h"
static int debug = 0; /* insmod parameter */
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
#define dprintk(args...) \
do { if (debug) { printk("%s: %s()[%d]: ", KBUILD_MODNAME, __FUNCTION__, __LINE__); printk(args); } } while (0)
/* addresses to scan, found only at 0x4c and/or 0x4d (7-Bit) */
static unsigned short normal_i2c[] = { I2C_ADDR_TEA6420_1, I2C_ADDR_TEA6420_2, I2C_CLIENT_END };
/* magic definition of all other variables and things */
I2C_CLIENT_INSMOD;
static struct i2c_driver driver;
static struct i2c_client client_template;
/* make a connection between the input 'i' and the output 'o'
with gain 'g' for the tea6420-client 'client' (note: i = 6 means 'mute') */
static int tea6420_switch(struct i2c_client *client, int i, int o, int g)
{
u8 byte = 0;
int ret;
dprintk("adr:0x%02x, i:%d, o:%d, g:%d\n", client->addr, i, o, g);
/* check if the paramters are valid */
if (i < 1 || i > 6 || o < 1 || o > 4 || g < 0 || g > 6 || g % 2 != 0)
return -1;
byte = ((o - 1) << 5);
byte |= (i - 1);
/* to understand this, have a look at the tea6420-specs (p.5) */
switch (g) {
case 0:
byte |= (3 << 3);
break;
case 2:
byte |= (2 << 3);
break;
case 4:
byte |= (1 << 3);
break;
case 6:
break;
}
ret = i2c_smbus_write_byte(client, byte);
if (ret) {
dprintk("i2c_smbus_write_byte() failed, ret:%d\n", ret);
return -EIO;
}
return 0;
}
/* this function is called by i2c_probe */
static int tea6420_detect(struct i2c_adapter *adapter, int address, int kind)
{
struct i2c_client *client;
int err = 0, i = 0;
/* let's see whether this adapter can support what we need */
if (0 == i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE)) {
return 0;
}
/* allocate memory for client structure */
client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (0 == client) {
return -ENOMEM;
}
/* fill client structure */
memcpy(client, &client_template, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
/* tell the i2c layer a new client has arrived */
if (0 != (err = i2c_attach_client(client))) {
kfree(client);
return err;
}
/* set initial values: set "mute"-input to all outputs at gain 0 */
err = 0;
for (i = 1; i < 5; i++) {
err += tea6420_switch(client, 6, i, 0);
}
if (err) {
dprintk("could not initialize tea6420\n");
kfree(client);
return -ENODEV;
}
printk("tea6420: detected @ 0x%02x on adapter %s\n", address, &client->adapter->name[0]);
return 0;
}
static int attach(struct i2c_adapter *adapter)
{
/* let's see whether this is a know adapter we can attach to */
if (adapter->id != I2C_HW_SAA7146) {
dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id);
return -ENODEV;
}
return i2c_probe(adapter, &addr_data, &tea6420_detect);
}
static int detach(struct i2c_client *client)
{
int ret = i2c_detach_client(client);
kfree(client);
return ret;
}
static int command(struct i2c_client *client, unsigned int cmd, void *arg)
{
struct tea6420_multiplex *a = (struct tea6420_multiplex *)arg;
int result = 0;
switch (cmd) {
case TEA6420_SWITCH:
result = tea6420_switch(client, a->in, a->out, a->gain);
break;
default:
return -ENOIOCTLCMD;
}
return result;
}
static struct i2c_driver driver = {
.driver = {
.name = "tea6420",
},
.id = I2C_DRIVERID_TEA6420,
.attach_adapter = attach,
.detach_client = detach,
.command = command,
};
static struct i2c_client client_template = {
.name = "tea6420",
.driver = &driver,
};
static int __init this_module_init(void)
{
return i2c_add_driver(&driver);
}
static void __exit this_module_exit(void)
{
i2c_del_driver(&driver);
}
module_init(this_module_init);
module_exit(this_module_exit);
MODULE_AUTHOR("Michael Hunold <[email protected]>");
MODULE_DESCRIPTION("tea6420 driver");
MODULE_LICENSE("GPL");
|
392410.c | /*
* Copyright (C) 2014 - 2015 Xilinx, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* Use of the Software is limited solely to applications:
* (a) running on a Xilinx device, or
* (b) that interact with a Xilinx device through a bus or interconnect.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Except as contained in this notice, the name of the Xilinx shall not be used
* in advertising or otherwise to promote the sale, use or other dealings in
* this Software without prior written authorization from Xilinx.
*/
/**
* Implementation for the custom idling of of individual peripheral node.
*/
#include <sleep.h>
#include "pm_node_idle.h"
#include "pm_common.h"
#define MAX_TIMEOUT 0x1FFFFFFF
#if defined(XPAR_PSU_TTC_0_DEVICE_ID) || \
defined(XPAR_PSU_TTC_1_DEVICE_ID) || \
defined(XPAR_PSU_TTC_2_DEVICE_ID) || \
defined(XPAR_PSU_TTC_3_DEVICE_ID)
/**
* NodeTtcIdle() - Custom code to idle the TTC
*
* @BaseAddress: TTC base address
*/
void NodeTtcIdle(u32 BaseAddress)
{
/*Stop the TTC timer */
u32 Val = Xil_In32(BaseAddress + XTTCPS_CNT_CNTRL_OFFSET);
Xil_Out32(BaseAddress + XTTCPS_CNT_CNTRL_OFFSET,(Val | ~XTTCPS_CNT_CNTRL_DIS_MASK));
/*
* Reset the counter control register
*/
XTtcPs_WriteReg(BaseAddress, XTTCPS_CNT_CNTRL_OFFSET, XTTCPS_CNT_CNTRL_RESET_VALUE);
/*
* Clear counters interval values
*/
XTtcPs_WriteReg(BaseAddress, XTTCPS_INTERVAL_VAL_OFFSET, 0x0);
/*
* Clear counters Match values
*/
XTtcPs_WriteReg(BaseAddress, XTTCPS_MATCH_0_OFFSET, 0x0);
XTtcPs_WriteReg(BaseAddress, XTTCPS_MATCH_1_OFFSET, 0x0);
XTtcPs_WriteReg(BaseAddress, XTTCPS_MATCH_2_OFFSET, 0x0);
/*
* Disable counter's interrupts
*/
XTtcPs_WriteReg(BaseAddress, XTTCPS_IER_OFFSET, 0x0);
/*
* Clear interrupts (status) for all the counters [clronrd]
*/
XTtcPs_ReadReg(BaseAddress, XTTCPS_ISR_OFFSET);
}
#endif
#if defined(XPAR_PSU_SD_0_DEVICE_ID) || \
defined(XPAR_PSU_SD_1_DEVICE_ID)
#define IOU_SLCR_BASE 0xFF180000
#define IOU_SD_CTRL_OFFSET 0x00000310
#define SD_SLEEP_TIME 1000 /* in ms */
#define EMMC_RESET_TIME 1 /* in ms */
#define SD0_EMMC_SEL_MASK (0x1 << 0)
#define SD1_EMMC_SEL_MASK (0x1 << 15)
/**
* NodeSdioIdle() - Custom code to idle the SDIO
*
* @BaseAddress: SDIO base address
*/
void NodeSdioIdle(u32 BaseAddress)
{
u16 EmmcStatus;
u8 Val;
u32 StatusReg;
u32 PresentStateReg;
u32 Timeout = MAX_TIMEOUT;
u32 SdpsActive = (XSDPS_PSR_INHIBIT_CMD_MASK | XSDPS_PSR_INHIBIT_DAT_MASK
| XSDPS_PSR_DAT_ACTIVE_MASK);
PresentStateReg = XSdPs_ReadReg8(BaseAddress, XSDPS_PRES_STATE_OFFSET);
/* Check for Card Present */
if (PresentStateReg & XSDPS_PSR_CARD_INSRT_MASK) {
/* Check for SD idle */
do {
StatusReg = XSdPs_ReadReg8(BaseAddress, XSDPS_PRES_STATE_OFFSET);
} while ((StatusReg & SdpsActive) && Timeout--);
}
if (Timeout == 0) {
PmDbg(DEBUG_DETAILED,"SD was still not idle\n");
}
/* Reset the eMMC card */
EmmcStatus = Xil_In32(IOU_SLCR_BASE + IOU_SD_CTRL_OFFSET);
#ifdef XPAR_PSU_SD_0_DEVICE_ID
if ((BaseAddress == XPAR_PSU_SD_0_BASEADDR)
&& (EmmcStatus & SD0_EMMC_SEL_MASK)) {
Val = XSdPs_ReadReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET);
XSdPs_WriteReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET,
Val | XSDPS_PC_EMMC_HW_RST_MASK);
usleep(1000 * EMMC_RESET_TIME);
Val = XSdPs_ReadReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET);
XSdPs_WriteReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET,
Val & ~XSDPS_PC_EMMC_HW_RST_MASK);
}
#endif
#ifdef XPAR_PSU_SD_1_DEVICE_ID
if ((BaseAddress == XPAR_PSU_SD_1_BASEADDR)
&& (EmmcStatus & SD1_EMMC_SEL_MASK)) {
Val = XSdPs_ReadReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET);
XSdPs_WriteReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET,
Val | XSDPS_PC_EMMC_HW_RST_MASK);
usleep(1000 * EMMC_RESET_TIME);
Val = XSdPs_ReadReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET);
XSdPs_WriteReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET,
Val & ~XSDPS_PC_EMMC_HW_RST_MASK);
}
#endif
/* Disable bus power */
XSdPs_WriteReg8(BaseAddress, XSDPS_POWER_CTRL_OFFSET, 0);
usleep(1000 * SD_SLEEP_TIME);
/* "Software reset for all" is initiated */
XSdPs_WriteReg8(BaseAddress, XSDPS_SW_RST_OFFSET, XSDPS_SWRST_ALL_MASK);
Timeout = MAX_TIMEOUT;
/* Proceed with initialization only after reset is complete */
Val = XSdPs_ReadReg8(BaseAddress, XSDPS_SW_RST_OFFSET);
while (((Val & XSDPS_SWRST_ALL_MASK) != 0U) && Timeout--) {
Val = XSdPs_ReadReg8(BaseAddress, XSDPS_SW_RST_OFFSET);
}
if (Timeout == 0) {
PmDbg(DEBUG_DETAILED,"SD was still not reset\n");
}
}
#endif
#if defined(XPAR_PSU_I2C_0_DEVICE_ID) || \
defined(XPAR_PSU_I2C_1_DEVICE_ID)
/**
* NodeI2cIdle() - Custom code to idle the I2c
*
* @BaseAddress: I2c base address
*/
void NodeI2cIdle(u32 BaseAddress)
{
u32 StatusReg;
u32 Timeout = MAX_TIMEOUT;
/* Check whether the I2C bus is busy */
do {
StatusReg = XIicPs_ReadReg(BaseAddress,
XIICPS_SR_OFFSET);
} while (((StatusReg & XIICPS_SR_BA_MASK) != 0x0U) && Timeout--);
if (Timeout == 0) {
PmDbg(DEBUG_DETAILED,"i2c was still not idle\n");
}
}
#endif
#if defined(XPAR_PSU_ETHERNET_0_DEVICE_ID) || \
defined(XPAR_PSU_ETHERNET_1_DEVICE_ID) || \
defined(XPAR_PSU_ETHERNET_2_DEVICE_ID) || \
defined(XPAR_PSU_ETHERNET_3_DEVICE_ID)
/**
* NodeGemIdle() - Custom code to idle the GEM
*
* @BaseAddress: GEM base address
*/
void NodeGemIdle(u32 BaseAddress)
{
u32 Reg;
u32 Timeout = MAX_TIMEOUT;
/* Make sure MDIO is in IDLE state */
do {
Reg = XEmacPs_ReadReg(BaseAddress, XEMACPS_NWSR_OFFSET);
} while ((!(Reg & XEMACPS_NWSR_MDIOIDLE_MASK)) && Timeout--);
if (Timeout == 0) {
PmDbg(DEBUG_DETAILED,"gem was still not idle\n");
}
/* stop all transactions of the Ethernet */
/* Disable all interrupts */
XEmacPs_WriteReg(BaseAddress, XEMACPS_IDR_OFFSET,
XEMACPS_IXR_ALL_MASK);
/* Disable the receiver & transmitter */
Reg = XEmacPs_ReadReg(BaseAddress, XEMACPS_NWCTRL_OFFSET);
Reg &= (u32)(~XEMACPS_NWCTRL_RXEN_MASK);
Reg &= (u32)(~XEMACPS_NWCTRL_TXEN_MASK);
XEmacPs_WriteReg(BaseAddress, XEMACPS_NWCTRL_OFFSET, Reg);
}
#endif
#ifdef XPAR_PSU_QSPI_0_DEVICE_ID
/**
* NodeQspiIdle() - Custom code to idle the QSPI
*
* @BaseAddress: QSPI base address
*/
void NodeQspiIdle(u32 BaseAddress)
{
u32 StatusReg;
u32 Timeout = MAX_TIMEOUT;
/*
* Wait for the transfer to finish by polling Tx fifo status.
*/
do {
StatusReg = XQspiPsu_ReadReg(BaseAddress, XQSPIPSU_XFER_STS_OFFSET);
} while ((StatusReg != 0) && Timeout--);
if (Timeout == 0) {
PmDbg(DEBUG_DETAILED,"QSPI was still not idle\n");
}
}
#endif
|
980867.c | #include <std.h>
inherit WEAPON;
void create(){
::create();
set_id(({"polearm","pole arm","hook-fauchard","spear","hook"}));
set_name("hook-fauchard");
set_short("Hook-fauchard");
set_long("This is a 6 foot long Derro pole arm. It can be used to impale and then drag opponents off balance with its hooks");
set_weight(10);
set_size(2);
set_value(10);
set_wc(1,4);
set_large_wc(2,3);
set_type("slash");
set_prof_type("polearm");
set_property("enchantment",2);
set_hit((:TO,"extra_hit":));
set_weapon_prof("martial");
}
int extra_hit(object ob){
int dam;
ob = ETO->query_current_attacker();
if(!objectp(ob))return 1;
if(random(25)>2) return 1;
// changed from 15, was a little too frequent *Styx* 8/15/03
message("my_action","%^BOLD%^%^WHITE%^You impale "+ob->query_cap_name()+" and then drag "+ob->query_objective()+" off balance with the hook!",ETO);
message("other_action","%^BOLD%^%^WHITE%^" +ETO->query_cap_name()+ " impales "+ob->query_cap_name()+" with "+ETO->query_possessive()+" hooked pole arm and then pulls "+ob->query_objective()+" off ballance!",environment(ob),(({ETO,ob})));
message("my_action","%^BOLD%^%^WHITE%^" +ETO->query_cap_name()+ " impales you with "+ETO->query_possessive()+" hooked pole arm and then pulls you off your feet!",ob);
ob->set_tripped(1,"%^BOLD%^%^WHITE%^You are struggling to your feet!");
return(random(5)+1);
}
|
354598.c | #include <stdio.h>
int main(void) {
int N;
scanf("%d", &N);
for (int i = 1; i <= N; i++)
if (i == N)
printf("Ho!\n");
else
printf("Ho ");
return 0;
} |
25296.c | /*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "platform_api_vmcore.h"
#include "platform_api_extension.h"
#define bh_assert(v) do { \
if (!(v)) { \
int _count; \
printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
#v, __FILE__, __LINE__); \
_count = printf(" "); \
/* divived by 0 to make it abort */ \
printf("%d\n", _count / (_count - 1)); \
while (1); \
} \
} while (0)
typedef struct os_thread_wait_node {
struct k_sem sem;
os_thread_wait_list next;
} os_thread_wait_node;
typedef struct os_thread_data {
/* Next thread data */
struct os_thread_data *next;
/* Zephyr thread handle */
korp_tid tid;
/* Jeff thread local root */
void *tlr;
/* Lock for waiting list */
struct k_mutex wait_list_lock;
/* Waiting list of other threads who are joining this thread */
os_thread_wait_list thread_wait_list;
/* Thread stack size */
unsigned stack_size;
/* Thread stack */
char stack[1];
} os_thread_data;
typedef struct os_thread_obj {
struct k_thread thread;
/* Whether the thread is terminated and this thread object is to
be freed in the future. */
bool to_be_freed;
struct os_thread_obj *next;
} os_thread_obj;
static bool is_thread_sys_inited = false;
/* Thread data of supervisor thread */
static os_thread_data supervisor_thread_data;
/* Lock for thread data list */
static struct k_mutex thread_data_lock;
/* Thread data list */
static os_thread_data *thread_data_list = NULL;
/* Lock for thread object list */
static struct k_mutex thread_obj_lock;
/* Thread object list */
static os_thread_obj *thread_obj_list = NULL;
static void thread_data_list_add(os_thread_data *thread_data)
{
k_mutex_lock(&thread_data_lock, K_FOREVER);
if (!thread_data_list)
thread_data_list = thread_data;
else {
/* If already in list, just return */
os_thread_data *p = thread_data_list;
while (p) {
if (p == thread_data) {
k_mutex_unlock(&thread_data_lock);
return;
}
p = p->next;
}
/* Set as head of list */
thread_data->next = thread_data_list;
thread_data_list = thread_data;
}
k_mutex_unlock(&thread_data_lock);
}
static void thread_data_list_remove(os_thread_data *thread_data)
{
k_mutex_lock(&thread_data_lock, K_FOREVER);
if (thread_data_list) {
if (thread_data_list == thread_data)
thread_data_list = thread_data_list->next;
else {
/* Search and remove it from list */
os_thread_data *p = thread_data_list;
while (p && p->next != thread_data)
p = p->next;
if (p && p->next == thread_data)
p->next = p->next->next;
}
}
k_mutex_unlock(&thread_data_lock);
}
static os_thread_data *
thread_data_list_lookup(k_tid_t tid)
{
k_mutex_lock(&thread_data_lock, K_FOREVER);
if (thread_data_list) {
os_thread_data *p = thread_data_list;
while (p) {
if (p->tid == tid) {
/* Found */
k_mutex_unlock(&thread_data_lock);
return p;
}
p = p->next;
}
}
k_mutex_unlock(&thread_data_lock);
return NULL;
}
static void thread_obj_list_add(os_thread_obj *thread_obj)
{
k_mutex_lock(&thread_obj_lock, K_FOREVER);
if (!thread_obj_list)
thread_obj_list = thread_obj;
else {
/* Set as head of list */
thread_obj->next = thread_obj_list;
thread_obj_list = thread_obj;
}
k_mutex_unlock(&thread_obj_lock);
}
static void thread_obj_list_reclaim()
{
os_thread_obj *p, *p_prev;
k_mutex_lock(&thread_obj_lock, K_FOREVER);
p_prev = NULL;
p = thread_obj_list;
while (p) {
if (p->to_be_freed) {
if (p_prev == NULL) { /* p is the head of list */
thread_obj_list = p->next;
BH_FREE(p);
p = thread_obj_list;
} else { /* p is not the head of list */
p_prev->next = p->next;
BH_FREE(p);
p = p_prev->next;
}
} else {
p_prev = p;
p = p->next;
}
}
k_mutex_unlock(&thread_obj_lock);
}
int os_thread_sys_init()
{
if (is_thread_sys_inited)
return BHT_OK;
k_mutex_init(&thread_data_lock);
k_mutex_init(&thread_obj_lock);
/* Initialize supervisor thread data */
memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
supervisor_thread_data.tid = k_current_get();
/* Set as head of thread data list */
thread_data_list = &supervisor_thread_data;
is_thread_sys_inited = true;
return BHT_OK;
}
void os_thread_sys_destroy(void)
{
if (is_thread_sys_inited) {
is_thread_sys_inited = false;
}
}
static os_thread_data *
thread_data_current()
{
k_tid_t tid = k_current_get();
return thread_data_list_lookup(tid);
}
static void os_thread_cleanup(void)
{
os_thread_data *thread_data = thread_data_current();
bh_assert(thread_data != NULL);
k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
if (thread_data->thread_wait_list) {
/* Signal each joining thread */
os_thread_wait_list head = thread_data->thread_wait_list;
while (head) {
os_thread_wait_list next = head->next;
k_sem_give(&head->sem);
/* head will be freed by joining thread */
head = next;
}
thread_data->thread_wait_list = NULL;
}
k_mutex_unlock(&thread_data->wait_list_lock);
thread_data_list_remove(thread_data);
/* Set flag to true for the next thread creating to
free the thread object */
((os_thread_obj*) thread_data->tid)->to_be_freed = true;
BH_FREE(thread_data);
}
static void os_thread_wrapper(void *start, void *arg, void *thread_data)
{
/* Set thread custom data */
((os_thread_data*) thread_data)->tid = k_current_get();
thread_data_list_add(thread_data);
((thread_start_routine_t) start)(arg);
os_thread_cleanup();
}
int os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
unsigned int stack_size)
{
return os_thread_create_with_prio(p_tid, start, arg, stack_size,
BH_THREAD_DEFAULT_PRIORITY);
}
int os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
void *arg, unsigned int stack_size, int prio)
{
korp_tid tid;
os_thread_data *thread_data;
unsigned thread_data_size;
if (!p_tid || !stack_size)
return BHT_ERROR;
/* Free the thread objects of terminated threads */
thread_obj_list_reclaim();
/* Create and initialize thread object */
if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
return BHT_ERROR;
memset(tid, 0, sizeof(os_thread_obj));
/* Create and initialize thread data */
thread_data_size = offsetof(os_thread_data, stack) + stack_size;
if (!(thread_data = BH_MALLOC(thread_data_size))) {
BH_FREE(tid);
return BHT_ERROR;
}
memset(thread_data, 0, thread_data_size);
k_mutex_init(&thread_data->wait_list_lock);
thread_data->stack_size = stack_size;
thread_data->tid = tid;
/* Create the thread */
if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
stack_size, os_thread_wrapper, start, arg, thread_data, prio, 0,
K_NO_WAIT)))) {
BH_FREE(tid);
BH_FREE(thread_data);
return BHT_ERROR;
}
bh_assert(tid == thread_data->tid);
/* Set thread custom data */
thread_data_list_add(thread_data);
thread_obj_list_add((os_thread_obj*) tid);
*p_tid = tid;
return BHT_OK;
}
korp_tid os_self_thread()
{
return (korp_tid) k_current_get();
}
int os_thread_join(korp_tid thread, void **value_ptr)
{
(void) value_ptr;
os_thread_data *thread_data;
os_thread_wait_node *node;
/* Create wait node and append it to wait list */
if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
return BHT_ERROR;
k_sem_init(&node->sem, 0, 1);
node->next = NULL;
/* Get thread data */
thread_data = thread_data_list_lookup(thread);
bh_assert(thread_data != NULL);
k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
if (!thread_data->thread_wait_list)
thread_data->thread_wait_list = node;
else {
/* Add to end of waiting list */
os_thread_wait_node *p = thread_data->thread_wait_list;
while (p->next)
p = p->next;
p->next = node;
}
k_mutex_unlock(&thread_data->wait_list_lock);
/* Wait the sem */
k_sem_take(&node->sem, K_FOREVER);
/* Wait some time for the thread to be actually terminated */
k_sleep(Z_TIMEOUT_MS(100));
/* Destroy resource */
BH_FREE(node);
return BHT_OK;
}
int os_mutex_init(korp_mutex *mutex)
{
k_mutex_init(mutex);
return BHT_OK;
}
int os_recursive_mutex_init(korp_mutex *mutex)
{
k_mutex_init(mutex);
return BHT_OK;
}
int os_mutex_destroy(korp_mutex *mutex)
{
(void) mutex;
return BHT_OK;
}
int os_mutex_lock(korp_mutex *mutex)
{
return k_mutex_lock(mutex, K_FOREVER);
}
int os_mutex_unlock(korp_mutex *mutex)
{
return k_mutex_unlock(mutex);
}
int os_cond_init(korp_cond *cond)
{
k_mutex_init(&cond->wait_list_lock);
cond->thread_wait_list = NULL;
return BHT_OK;
}
int os_cond_destroy(korp_cond *cond)
{
(void) cond;
return BHT_OK;
}
static int os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
bool timed, int mills)
{
os_thread_wait_node *node;
/* Create wait node and append it to wait list */
if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
return BHT_ERROR;
k_sem_init(&node->sem, 0, 1);
node->next = NULL;
k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
if (!cond->thread_wait_list)
cond->thread_wait_list = node;
else {
/* Add to end of wait list */
os_thread_wait_node *p = cond->thread_wait_list;
while (p->next)
p = p->next;
p->next = node;
}
k_mutex_unlock(&cond->wait_list_lock);
/* Unlock mutex, wait sem and lock mutex again */
k_mutex_unlock(mutex);
k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
k_mutex_lock(mutex, K_FOREVER);
/* Remove wait node from wait list */
k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
if (cond->thread_wait_list == node)
cond->thread_wait_list = node->next;
else {
/* Remove from the wait list */
os_thread_wait_node *p = cond->thread_wait_list;
while (p->next != node)
p = p->next;
p->next = node->next;
}
BH_FREE(node);
k_mutex_unlock(&cond->wait_list_lock);
return BHT_OK;
}
int os_cond_wait(korp_cond *cond, korp_mutex *mutex)
{
return os_cond_wait_internal(cond, mutex, false, 0);
}
int os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, int useconds)
{
if (useconds == BHT_WAIT_FOREVER)
return os_cond_wait_internal(cond, mutex, false, 0);
else
return os_cond_wait_internal(cond, mutex, true, useconds / 1000);
}
int os_cond_signal(korp_cond *cond)
{
/* Signal the head wait node of wait list */
k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
if (cond->thread_wait_list)
k_sem_give(&cond->thread_wait_list->sem);
k_mutex_unlock(&cond->wait_list_lock);
return BHT_OK;
}
uint8 *os_thread_get_stack_boundary()
{
#if defined(CONFIG_THREAD_STACK_INFO)
korp_tid thread = k_current_get();
return (uint8*)thread->stack_info.start;
#else
return NULL;
#endif
}
|
801716.c | /**
* \file src/MBCSR/MatTransMatMult/3x8.c
* \brief The \f$3\times 8\f$ MBCSR implementation
* of \f$A^TA\cdot x\f$ and \f$A^HA\cdot x\f$.
* \ingroup MATTYPE_MBCSR
*
* Automatically generated by ./gen_ata.sh on Wed Jun 8 15:55:46 PDT 2005.
*/
#include <assert.h>
#include <oski/config.h> /* for 'restrict' keyword */
#include <oski/common.h>
#include <oski/mangle.h>
#include <oski/vecview.h>
#include <oski/MBCSR/format.h>
#include <oski/MBCSR/module.h>
#if IS_VAL_COMPLEX
/** Complex-valued, so do not use explicit 'register' keyword. */
#define REGISTER
#else
/** Real-valued, so use explicit 'register' keyword. */
#define REGISTER register
#endif
#if defined(DO_NAME_MANGLING)
/** Mangled name for MBCSR_MatTransMatMult_v1_aX_b1_xs1_ys1. */
#define MBCSR_MatTransMatMult_v1_aX_b1_xs1_ys1 \
MANGLE_MOD_(MBCSR_MatTransMatMult_v1_aX_b1_xs1_ys1_3x8)
#endif
/**
* \brief The \f$3\times 8\f$ MBCSR implementation
* of \f$A^TA\cdot x\f$, where the source and destination
* vectors have unit-stride and unit-stride,
* respectively.
*
* Parameter 'tp' may be NULL if no intermediate vector output
* is desired.
*/
void
MBCSR_MatTransMatMult_v1_aX_b1_xs1_ys1( oski_index_t M,
const oski_index_t* restrict ptr, const oski_index_t* restrict ind,
const oski_value_t* restrict val, const oski_value_t* restrict diag,
oski_value_t alpha, const oski_value_t* restrict x,
oski_value_t* restrict y,
oski_value_t* restrict t, oski_index_t inct )
{
oski_index_t I;
oski_value_t* tp = t;
for( I = 0; I < M; I++ )
{
oski_index_t K;
const oski_value_t* vp;
REGISTER oski_value_t _t0 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t1 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t2 = MAKE_VAL_COMPLEX(0.0, 0.0);
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
const oski_value_t* xp = x + j0;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
REGISTER oski_value_t _x3;
REGISTER oski_value_t _x4;
REGISTER oski_value_t _x5;
REGISTER oski_value_t _x6;
REGISTER oski_value_t _x7;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1] );
VAL_ASSIGN( _x2, xp[2] );
VAL_ASSIGN( _x3, xp[3] );
VAL_ASSIGN( _x4, xp[4] );
VAL_ASSIGN( _x5, xp[5] );
VAL_ASSIGN( _x6, xp[6] );
VAL_ASSIGN( _x7, xp[7] );
VAL_MAC( _t0, vp[0], _x0 );
VAL_MAC( _t1, vp[8], _x0 );
VAL_MAC( _t2, vp[16], _x0 );
VAL_MAC( _t0, vp[1], _x1 );
VAL_MAC( _t1, vp[9], _x1 );
VAL_MAC( _t2, vp[17], _x1 );
VAL_MAC( _t0, vp[2], _x2 );
VAL_MAC( _t1, vp[10], _x2 );
VAL_MAC( _t2, vp[18], _x2 );
VAL_MAC( _t0, vp[3], _x3 );
VAL_MAC( _t1, vp[11], _x3 );
VAL_MAC( _t2, vp[19], _x3 );
VAL_MAC( _t0, vp[4], _x4 );
VAL_MAC( _t1, vp[12], _x4 );
VAL_MAC( _t2, vp[20], _x4 );
VAL_MAC( _t0, vp[5], _x5 );
VAL_MAC( _t1, vp[13], _x5 );
VAL_MAC( _t2, vp[21], _x5 );
VAL_MAC( _t0, vp[6], _x6 );
VAL_MAC( _t1, vp[14], _x6 );
VAL_MAC( _t2, vp[22], _x6 );
VAL_MAC( _t0, vp[7], _x7 );
VAL_MAC( _t1, vp[15], _x7 );
VAL_MAC( _t2, vp[23], _x7 );
}
/* Diag block multiply */
{
const oski_value_t* xp = x + I*3;
const oski_value_t* dp = diag;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1] );
VAL_ASSIGN( _x2, xp[2] );
VAL_MAC( _t0, dp[0], _x0 );
VAL_MAC( _t1, dp[3], _x0 );
VAL_MAC( _t2, dp[6], _x0 );
VAL_MAC( _t0, dp[1], _x1 );
VAL_MAC( _t1, dp[4], _x1 );
VAL_MAC( _t2, dp[7], _x1 );
VAL_MAC( _t0, dp[2], _x2 );
VAL_MAC( _t1, dp[5], _x2 );
VAL_MAC( _t2, dp[8], _x2 );
}
if( tp != NULL )
{
VAL_ASSIGN( tp[0], _t0 );
VAL_ASSIGN( tp[1*inct], _t1 );
VAL_ASSIGN( tp[2*inct], _t2 );
tp += 3*inct;
}
VAL_SCALE( _t0, alpha );
VAL_SCALE( _t1, alpha );
VAL_SCALE( _t2, alpha );
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
oski_value_t* yp = y + j0;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
REGISTER oski_value_t _y3;
REGISTER oski_value_t _y4;
REGISTER oski_value_t _y5;
REGISTER oski_value_t _y6;
REGISTER oski_value_t _y7;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_SET_ZERO( _y3 );
VAL_SET_ZERO( _y4 );
VAL_SET_ZERO( _y5 );
VAL_SET_ZERO( _y6 );
VAL_SET_ZERO( _y7 );
VAL_MAC( _y0, vp[0], _t0 );
VAL_MAC( _y1, vp[1], _t0 );
VAL_MAC( _y2, vp[2], _t0 );
VAL_MAC( _y3, vp[3], _t0 );
VAL_MAC( _y4, vp[4], _t0 );
VAL_MAC( _y5, vp[5], _t0 );
VAL_MAC( _y6, vp[6], _t0 );
VAL_MAC( _y7, vp[7], _t0 );
VAL_MAC( _y0, vp[8], _t1 );
VAL_MAC( _y1, vp[9], _t1 );
VAL_MAC( _y2, vp[10], _t1 );
VAL_MAC( _y3, vp[11], _t1 );
VAL_MAC( _y4, vp[12], _t1 );
VAL_MAC( _y5, vp[13], _t1 );
VAL_MAC( _y6, vp[14], _t1 );
VAL_MAC( _y7, vp[15], _t1 );
VAL_MAC( _y0, vp[16], _t2 );
VAL_MAC( _y1, vp[17], _t2 );
VAL_MAC( _y2, vp[18], _t2 );
VAL_MAC( _y3, vp[19], _t2 );
VAL_MAC( _y4, vp[20], _t2 );
VAL_MAC( _y5, vp[21], _t2 );
VAL_MAC( _y6, vp[22], _t2 );
VAL_MAC( _y7, vp[23], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1], _y1 );
VAL_INC( yp[2], _y2 );
VAL_INC( yp[3], _y3 );
VAL_INC( yp[4], _y4 );
VAL_INC( yp[5], _y5 );
VAL_INC( yp[6], _y6 );
VAL_INC( yp[7], _y7 );
}
/* Diag block transpose-multiply */
{
oski_value_t* yp = y + I*3;
const oski_value_t* dp = diag;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_MAC( _y0, dp[0], _t0 );
VAL_MAC( _y1, dp[1], _t0 );
VAL_MAC( _y2, dp[2], _t0 );
VAL_MAC( _y0, dp[3], _t1 );
VAL_MAC( _y1, dp[4], _t1 );
VAL_MAC( _y2, dp[5], _t1 );
VAL_MAC( _y0, dp[6], _t2 );
VAL_MAC( _y1, dp[7], _t2 );
VAL_MAC( _y2, dp[8], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1], _y1 );
VAL_INC( yp[2], _y2 );
}
val += (ptr[I+1]-ptr[I]) * 24;
diag += 3*3;
}
} /* MBCSR_MatTransMatMult_v1_aX_b1_xs1_ys1 */
#if defined(DO_NAME_MANGLING)
/** Mangled name for MBCSR_MatTransMatMult_v1_aX_b1_xsX_ysX. */
#define MBCSR_MatTransMatMult_v1_aX_b1_xsX_ysX \
MANGLE_MOD_(MBCSR_MatTransMatMult_v1_aX_b1_xsX_ysX_3x8)
#endif
/**
* \brief The \f$3\times 8\f$ MBCSR implementation
* of \f$A^TA\cdot x\f$, where the source and destination
* vectors have general-stride and general-stride,
* respectively.
*
* Parameter 'tp' may be NULL if no intermediate vector output
* is desired.
*/
void
MBCSR_MatTransMatMult_v1_aX_b1_xsX_ysX( oski_index_t M,
const oski_index_t* restrict ptr, const oski_index_t* restrict ind,
const oski_value_t* restrict val, const oski_value_t* restrict diag,
oski_value_t alpha, const oski_value_t* restrict x, oski_index_t incx,
oski_value_t* restrict y, oski_index_t incy,
oski_value_t* restrict t, oski_index_t inct )
{
oski_index_t I;
oski_value_t* tp = t;
for( I = 0; I < M; I++ )
{
oski_index_t K;
const oski_value_t* vp;
REGISTER oski_value_t _t0 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t1 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t2 = MAKE_VAL_COMPLEX(0.0, 0.0);
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
const oski_value_t* xp = x + j0*incx;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
REGISTER oski_value_t _x3;
REGISTER oski_value_t _x4;
REGISTER oski_value_t _x5;
REGISTER oski_value_t _x6;
REGISTER oski_value_t _x7;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1*incx] );
VAL_ASSIGN( _x2, xp[2*incx] );
VAL_ASSIGN( _x3, xp[3*incx] );
VAL_ASSIGN( _x4, xp[4*incx] );
VAL_ASSIGN( _x5, xp[5*incx] );
VAL_ASSIGN( _x6, xp[6*incx] );
VAL_ASSIGN( _x7, xp[7*incx] );
VAL_MAC( _t0, vp[0], _x0 );
VAL_MAC( _t1, vp[8], _x0 );
VAL_MAC( _t2, vp[16], _x0 );
VAL_MAC( _t0, vp[1], _x1 );
VAL_MAC( _t1, vp[9], _x1 );
VAL_MAC( _t2, vp[17], _x1 );
VAL_MAC( _t0, vp[2], _x2 );
VAL_MAC( _t1, vp[10], _x2 );
VAL_MAC( _t2, vp[18], _x2 );
VAL_MAC( _t0, vp[3], _x3 );
VAL_MAC( _t1, vp[11], _x3 );
VAL_MAC( _t2, vp[19], _x3 );
VAL_MAC( _t0, vp[4], _x4 );
VAL_MAC( _t1, vp[12], _x4 );
VAL_MAC( _t2, vp[20], _x4 );
VAL_MAC( _t0, vp[5], _x5 );
VAL_MAC( _t1, vp[13], _x5 );
VAL_MAC( _t2, vp[21], _x5 );
VAL_MAC( _t0, vp[6], _x6 );
VAL_MAC( _t1, vp[14], _x6 );
VAL_MAC( _t2, vp[22], _x6 );
VAL_MAC( _t0, vp[7], _x7 );
VAL_MAC( _t1, vp[15], _x7 );
VAL_MAC( _t2, vp[23], _x7 );
}
/* Diag block multiply */
{
const oski_value_t* xp = x + I*3*incx;
const oski_value_t* dp = diag;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1*incx] );
VAL_ASSIGN( _x2, xp[2*incx] );
VAL_MAC( _t0, dp[0], _x0 );
VAL_MAC( _t1, dp[3], _x0 );
VAL_MAC( _t2, dp[6], _x0 );
VAL_MAC( _t0, dp[1], _x1 );
VAL_MAC( _t1, dp[4], _x1 );
VAL_MAC( _t2, dp[7], _x1 );
VAL_MAC( _t0, dp[2], _x2 );
VAL_MAC( _t1, dp[5], _x2 );
VAL_MAC( _t2, dp[8], _x2 );
}
if( tp != NULL )
{
VAL_ASSIGN( tp[0], _t0 );
VAL_ASSIGN( tp[1*inct], _t1 );
VAL_ASSIGN( tp[2*inct], _t2 );
tp += 3*inct;
}
VAL_SCALE( _t0, alpha );
VAL_SCALE( _t1, alpha );
VAL_SCALE( _t2, alpha );
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
oski_value_t* yp = y + j0*incy;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
REGISTER oski_value_t _y3;
REGISTER oski_value_t _y4;
REGISTER oski_value_t _y5;
REGISTER oski_value_t _y6;
REGISTER oski_value_t _y7;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_SET_ZERO( _y3 );
VAL_SET_ZERO( _y4 );
VAL_SET_ZERO( _y5 );
VAL_SET_ZERO( _y6 );
VAL_SET_ZERO( _y7 );
VAL_MAC( _y0, vp[0], _t0 );
VAL_MAC( _y1, vp[1], _t0 );
VAL_MAC( _y2, vp[2], _t0 );
VAL_MAC( _y3, vp[3], _t0 );
VAL_MAC( _y4, vp[4], _t0 );
VAL_MAC( _y5, vp[5], _t0 );
VAL_MAC( _y6, vp[6], _t0 );
VAL_MAC( _y7, vp[7], _t0 );
VAL_MAC( _y0, vp[8], _t1 );
VAL_MAC( _y1, vp[9], _t1 );
VAL_MAC( _y2, vp[10], _t1 );
VAL_MAC( _y3, vp[11], _t1 );
VAL_MAC( _y4, vp[12], _t1 );
VAL_MAC( _y5, vp[13], _t1 );
VAL_MAC( _y6, vp[14], _t1 );
VAL_MAC( _y7, vp[15], _t1 );
VAL_MAC( _y0, vp[16], _t2 );
VAL_MAC( _y1, vp[17], _t2 );
VAL_MAC( _y2, vp[18], _t2 );
VAL_MAC( _y3, vp[19], _t2 );
VAL_MAC( _y4, vp[20], _t2 );
VAL_MAC( _y5, vp[21], _t2 );
VAL_MAC( _y6, vp[22], _t2 );
VAL_MAC( _y7, vp[23], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1*incy], _y1 );
VAL_INC( yp[2*incy], _y2 );
VAL_INC( yp[3*incy], _y3 );
VAL_INC( yp[4*incy], _y4 );
VAL_INC( yp[5*incy], _y5 );
VAL_INC( yp[6*incy], _y6 );
VAL_INC( yp[7*incy], _y7 );
}
/* Diag block transpose-multiply */
{
oski_value_t* yp = y + I*3*incy;
const oski_value_t* dp = diag;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_MAC( _y0, dp[0], _t0 );
VAL_MAC( _y1, dp[1], _t0 );
VAL_MAC( _y2, dp[2], _t0 );
VAL_MAC( _y0, dp[3], _t1 );
VAL_MAC( _y1, dp[4], _t1 );
VAL_MAC( _y2, dp[5], _t1 );
VAL_MAC( _y0, dp[6], _t2 );
VAL_MAC( _y1, dp[7], _t2 );
VAL_MAC( _y2, dp[8], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1*incy], _y1 );
VAL_INC( yp[2*incy], _y2 );
}
val += (ptr[I+1]-ptr[I]) * 24;
diag += 3*3;
}
} /* MBCSR_MatTransMatMult_v1_aX_b1_xsX_ysX */
#if defined(DO_NAME_MANGLING)
/** Mangled name for primary exportable symbol, SubmatReprTransSubmatReprMult */
#define SubmatReprTransSubmatReprMult \
MANGLE_MOD_(SubmatReprTransSubmatReprMult_3x8)
#endif
/**
* \brief Exported module wrapper for the \f$3\times 8\f$
* implementation of \f$A^TA\cdot x\f$.
*/
int
SubmatReprTransSubmatReprMult( const oski_submatMBCSR_t* A,
oski_value_t alpha, const oski_vecview_t x,
oski_vecview_t y, oski_vecview_t t )
{
oski_index_t j;
const oski_value_t* xp = x->val;
oski_value_t* yp = y->val;
oski_value_t* tp;
oski_index_t inct;
assert( A != NULL );
assert( A->r == 3 );
assert( A->c == 8 );
if( t == INVALID_VEC || t->num_rows == 0 || t->num_cols == 0 ) {
inct = 0;
tp = NULL;
} else {
inct = t->rowinc;
tp = t->val + A->offset * inct;
}
for( j = 0; j < x->num_cols; j++ )
{
if( x->rowinc == 1 && y->rowinc == 1 )
MBCSR_MatTransMatMult_v1_aX_b1_xs1_ys1(
A->num_block_rows, A->bptr, A->bind, A->bval, A->bdiag,
alpha, xp, yp, tp, inct );
else
MBCSR_MatTransMatMult_v1_aX_b1_xsX_ysX(
A->num_block_rows, A->bptr, A->bind, A->bval, A->bdiag,
alpha, xp, x->rowinc, yp, y->rowinc, tp, inct );
xp += x->colinc;
yp += y->colinc;
if( tp != NULL ) tp += t->colinc;
}
return 0;
}
#if defined(DO_NAME_MANGLING)
/** Mangled name for MBCSR_MatHermMatMult_v1_aX_b1_xs1_ys1. */
#define MBCSR_MatHermMatMult_v1_aX_b1_xs1_ys1 \
MANGLE_MOD_(MBCSR_MatHermMatMult_v1_aX_b1_xs1_ys1_3x8)
#endif
/**
* \brief The \f$3\times 8\f$ MBCSR implementation
* of \f$A^HA\cdot x\f$, where the source and destination
* vectors have unit-stride and unit-stride,
* respectively.
*
* Parameter 'tp' may be NULL if no intermediate vector output
* is desired.
*/
void
MBCSR_MatHermMatMult_v1_aX_b1_xs1_ys1( oski_index_t M,
const oski_index_t* restrict ptr, const oski_index_t* restrict ind,
const oski_value_t* restrict val, const oski_value_t* restrict diag,
oski_value_t alpha, const oski_value_t* restrict x,
oski_value_t* restrict y,
oski_value_t* restrict t, oski_index_t inct )
{
oski_index_t I;
oski_value_t* tp = t;
for( I = 0; I < M; I++ )
{
oski_index_t K;
const oski_value_t* vp;
REGISTER oski_value_t _t0 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t1 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t2 = MAKE_VAL_COMPLEX(0.0, 0.0);
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
const oski_value_t* xp = x + j0;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
REGISTER oski_value_t _x3;
REGISTER oski_value_t _x4;
REGISTER oski_value_t _x5;
REGISTER oski_value_t _x6;
REGISTER oski_value_t _x7;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1] );
VAL_ASSIGN( _x2, xp[2] );
VAL_ASSIGN( _x3, xp[3] );
VAL_ASSIGN( _x4, xp[4] );
VAL_ASSIGN( _x5, xp[5] );
VAL_ASSIGN( _x6, xp[6] );
VAL_ASSIGN( _x7, xp[7] );
VAL_MAC( _t0, vp[0], _x0 );
VAL_MAC( _t1, vp[8], _x0 );
VAL_MAC( _t2, vp[16], _x0 );
VAL_MAC( _t0, vp[1], _x1 );
VAL_MAC( _t1, vp[9], _x1 );
VAL_MAC( _t2, vp[17], _x1 );
VAL_MAC( _t0, vp[2], _x2 );
VAL_MAC( _t1, vp[10], _x2 );
VAL_MAC( _t2, vp[18], _x2 );
VAL_MAC( _t0, vp[3], _x3 );
VAL_MAC( _t1, vp[11], _x3 );
VAL_MAC( _t2, vp[19], _x3 );
VAL_MAC( _t0, vp[4], _x4 );
VAL_MAC( _t1, vp[12], _x4 );
VAL_MAC( _t2, vp[20], _x4 );
VAL_MAC( _t0, vp[5], _x5 );
VAL_MAC( _t1, vp[13], _x5 );
VAL_MAC( _t2, vp[21], _x5 );
VAL_MAC( _t0, vp[6], _x6 );
VAL_MAC( _t1, vp[14], _x6 );
VAL_MAC( _t2, vp[22], _x6 );
VAL_MAC( _t0, vp[7], _x7 );
VAL_MAC( _t1, vp[15], _x7 );
VAL_MAC( _t2, vp[23], _x7 );
}
/* Diag block multiply */
{
const oski_value_t* xp = x + I*3;
const oski_value_t* dp = diag;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1] );
VAL_ASSIGN( _x2, xp[2] );
VAL_MAC( _t0, dp[0], _x0 );
VAL_MAC( _t1, dp[3], _x0 );
VAL_MAC( _t2, dp[6], _x0 );
VAL_MAC( _t0, dp[1], _x1 );
VAL_MAC( _t1, dp[4], _x1 );
VAL_MAC( _t2, dp[7], _x1 );
VAL_MAC( _t0, dp[2], _x2 );
VAL_MAC( _t1, dp[5], _x2 );
VAL_MAC( _t2, dp[8], _x2 );
}
if( tp != NULL )
{
VAL_ASSIGN( tp[0], _t0 );
VAL_ASSIGN( tp[1*inct], _t1 );
VAL_ASSIGN( tp[2*inct], _t2 );
tp += 3*inct;
}
VAL_SCALE( _t0, alpha );
VAL_SCALE( _t1, alpha );
VAL_SCALE( _t2, alpha );
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
oski_value_t* yp = y + j0;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
REGISTER oski_value_t _y3;
REGISTER oski_value_t _y4;
REGISTER oski_value_t _y5;
REGISTER oski_value_t _y6;
REGISTER oski_value_t _y7;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_SET_ZERO( _y3 );
VAL_SET_ZERO( _y4 );
VAL_SET_ZERO( _y5 );
VAL_SET_ZERO( _y6 );
VAL_SET_ZERO( _y7 );
VAL_MAC_CONJ( _y0, vp[0], _t0 );
VAL_MAC_CONJ( _y1, vp[1], _t0 );
VAL_MAC_CONJ( _y2, vp[2], _t0 );
VAL_MAC_CONJ( _y3, vp[3], _t0 );
VAL_MAC_CONJ( _y4, vp[4], _t0 );
VAL_MAC_CONJ( _y5, vp[5], _t0 );
VAL_MAC_CONJ( _y6, vp[6], _t0 );
VAL_MAC_CONJ( _y7, vp[7], _t0 );
VAL_MAC_CONJ( _y0, vp[8], _t1 );
VAL_MAC_CONJ( _y1, vp[9], _t1 );
VAL_MAC_CONJ( _y2, vp[10], _t1 );
VAL_MAC_CONJ( _y3, vp[11], _t1 );
VAL_MAC_CONJ( _y4, vp[12], _t1 );
VAL_MAC_CONJ( _y5, vp[13], _t1 );
VAL_MAC_CONJ( _y6, vp[14], _t1 );
VAL_MAC_CONJ( _y7, vp[15], _t1 );
VAL_MAC_CONJ( _y0, vp[16], _t2 );
VAL_MAC_CONJ( _y1, vp[17], _t2 );
VAL_MAC_CONJ( _y2, vp[18], _t2 );
VAL_MAC_CONJ( _y3, vp[19], _t2 );
VAL_MAC_CONJ( _y4, vp[20], _t2 );
VAL_MAC_CONJ( _y5, vp[21], _t2 );
VAL_MAC_CONJ( _y6, vp[22], _t2 );
VAL_MAC_CONJ( _y7, vp[23], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1], _y1 );
VAL_INC( yp[2], _y2 );
VAL_INC( yp[3], _y3 );
VAL_INC( yp[4], _y4 );
VAL_INC( yp[5], _y5 );
VAL_INC( yp[6], _y6 );
VAL_INC( yp[7], _y7 );
}
/* Diag block transpose-multiply */
{
oski_value_t* yp = y + I*3;
const oski_value_t* dp = diag;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_MAC_CONJ( _y0, dp[0], _t0 );
VAL_MAC_CONJ( _y1, dp[1], _t0 );
VAL_MAC_CONJ( _y2, dp[2], _t0 );
VAL_MAC_CONJ( _y0, dp[3], _t1 );
VAL_MAC_CONJ( _y1, dp[4], _t1 );
VAL_MAC_CONJ( _y2, dp[5], _t1 );
VAL_MAC_CONJ( _y0, dp[6], _t2 );
VAL_MAC_CONJ( _y1, dp[7], _t2 );
VAL_MAC_CONJ( _y2, dp[8], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1], _y1 );
VAL_INC( yp[2], _y2 );
}
val += (ptr[I+1]-ptr[I]) * 24;
diag += 3*3;
}
} /* MBCSR_MatHermMatMult_v1_aX_b1_xs1_ys1 */
#if defined(DO_NAME_MANGLING)
/** Mangled name for MBCSR_MatHermMatMult_v1_aX_b1_xsX_ysX. */
#define MBCSR_MatHermMatMult_v1_aX_b1_xsX_ysX \
MANGLE_MOD_(MBCSR_MatHermMatMult_v1_aX_b1_xsX_ysX_3x8)
#endif
/**
* \brief The \f$3\times 8\f$ MBCSR implementation
* of \f$A^HA\cdot x\f$, where the source and destination
* vectors have general-stride and general-stride,
* respectively.
*
* Parameter 'tp' may be NULL if no intermediate vector output
* is desired.
*/
void
MBCSR_MatHermMatMult_v1_aX_b1_xsX_ysX( oski_index_t M,
const oski_index_t* restrict ptr, const oski_index_t* restrict ind,
const oski_value_t* restrict val, const oski_value_t* restrict diag,
oski_value_t alpha, const oski_value_t* restrict x, oski_index_t incx,
oski_value_t* restrict y, oski_index_t incy,
oski_value_t* restrict t, oski_index_t inct )
{
oski_index_t I;
oski_value_t* tp = t;
for( I = 0; I < M; I++ )
{
oski_index_t K;
const oski_value_t* vp;
REGISTER oski_value_t _t0 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t1 = MAKE_VAL_COMPLEX(0.0, 0.0);
REGISTER oski_value_t _t2 = MAKE_VAL_COMPLEX(0.0, 0.0);
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
const oski_value_t* xp = x + j0*incx;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
REGISTER oski_value_t _x3;
REGISTER oski_value_t _x4;
REGISTER oski_value_t _x5;
REGISTER oski_value_t _x6;
REGISTER oski_value_t _x7;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1*incx] );
VAL_ASSIGN( _x2, xp[2*incx] );
VAL_ASSIGN( _x3, xp[3*incx] );
VAL_ASSIGN( _x4, xp[4*incx] );
VAL_ASSIGN( _x5, xp[5*incx] );
VAL_ASSIGN( _x6, xp[6*incx] );
VAL_ASSIGN( _x7, xp[7*incx] );
VAL_MAC( _t0, vp[0], _x0 );
VAL_MAC( _t1, vp[8], _x0 );
VAL_MAC( _t2, vp[16], _x0 );
VAL_MAC( _t0, vp[1], _x1 );
VAL_MAC( _t1, vp[9], _x1 );
VAL_MAC( _t2, vp[17], _x1 );
VAL_MAC( _t0, vp[2], _x2 );
VAL_MAC( _t1, vp[10], _x2 );
VAL_MAC( _t2, vp[18], _x2 );
VAL_MAC( _t0, vp[3], _x3 );
VAL_MAC( _t1, vp[11], _x3 );
VAL_MAC( _t2, vp[19], _x3 );
VAL_MAC( _t0, vp[4], _x4 );
VAL_MAC( _t1, vp[12], _x4 );
VAL_MAC( _t2, vp[20], _x4 );
VAL_MAC( _t0, vp[5], _x5 );
VAL_MAC( _t1, vp[13], _x5 );
VAL_MAC( _t2, vp[21], _x5 );
VAL_MAC( _t0, vp[6], _x6 );
VAL_MAC( _t1, vp[14], _x6 );
VAL_MAC( _t2, vp[22], _x6 );
VAL_MAC( _t0, vp[7], _x7 );
VAL_MAC( _t1, vp[15], _x7 );
VAL_MAC( _t2, vp[23], _x7 );
}
/* Diag block multiply */
{
const oski_value_t* xp = x + I*3*incx;
const oski_value_t* dp = diag;
REGISTER oski_value_t _x0;
REGISTER oski_value_t _x1;
REGISTER oski_value_t _x2;
VAL_ASSIGN( _x0, xp[0] );
VAL_ASSIGN( _x1, xp[1*incx] );
VAL_ASSIGN( _x2, xp[2*incx] );
VAL_MAC( _t0, dp[0], _x0 );
VAL_MAC( _t1, dp[3], _x0 );
VAL_MAC( _t2, dp[6], _x0 );
VAL_MAC( _t0, dp[1], _x1 );
VAL_MAC( _t1, dp[4], _x1 );
VAL_MAC( _t2, dp[7], _x1 );
VAL_MAC( _t0, dp[2], _x2 );
VAL_MAC( _t1, dp[5], _x2 );
VAL_MAC( _t2, dp[8], _x2 );
}
if( tp != NULL )
{
VAL_ASSIGN( tp[0], _t0 );
VAL_ASSIGN( tp[1*inct], _t1 );
VAL_ASSIGN( tp[2*inct], _t2 );
tp += 3*inct;
}
VAL_SCALE( _t0, alpha );
VAL_SCALE( _t1, alpha );
VAL_SCALE( _t2, alpha );
for( K = ptr[I], vp = val; K < ptr[I+1]; K++, vp += 3*8 )
{
oski_index_t j0 = ind[K];
oski_value_t* yp = y + j0*incy;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
REGISTER oski_value_t _y3;
REGISTER oski_value_t _y4;
REGISTER oski_value_t _y5;
REGISTER oski_value_t _y6;
REGISTER oski_value_t _y7;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_SET_ZERO( _y3 );
VAL_SET_ZERO( _y4 );
VAL_SET_ZERO( _y5 );
VAL_SET_ZERO( _y6 );
VAL_SET_ZERO( _y7 );
VAL_MAC_CONJ( _y0, vp[0], _t0 );
VAL_MAC_CONJ( _y1, vp[1], _t0 );
VAL_MAC_CONJ( _y2, vp[2], _t0 );
VAL_MAC_CONJ( _y3, vp[3], _t0 );
VAL_MAC_CONJ( _y4, vp[4], _t0 );
VAL_MAC_CONJ( _y5, vp[5], _t0 );
VAL_MAC_CONJ( _y6, vp[6], _t0 );
VAL_MAC_CONJ( _y7, vp[7], _t0 );
VAL_MAC_CONJ( _y0, vp[8], _t1 );
VAL_MAC_CONJ( _y1, vp[9], _t1 );
VAL_MAC_CONJ( _y2, vp[10], _t1 );
VAL_MAC_CONJ( _y3, vp[11], _t1 );
VAL_MAC_CONJ( _y4, vp[12], _t1 );
VAL_MAC_CONJ( _y5, vp[13], _t1 );
VAL_MAC_CONJ( _y6, vp[14], _t1 );
VAL_MAC_CONJ( _y7, vp[15], _t1 );
VAL_MAC_CONJ( _y0, vp[16], _t2 );
VAL_MAC_CONJ( _y1, vp[17], _t2 );
VAL_MAC_CONJ( _y2, vp[18], _t2 );
VAL_MAC_CONJ( _y3, vp[19], _t2 );
VAL_MAC_CONJ( _y4, vp[20], _t2 );
VAL_MAC_CONJ( _y5, vp[21], _t2 );
VAL_MAC_CONJ( _y6, vp[22], _t2 );
VAL_MAC_CONJ( _y7, vp[23], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1*incy], _y1 );
VAL_INC( yp[2*incy], _y2 );
VAL_INC( yp[3*incy], _y3 );
VAL_INC( yp[4*incy], _y4 );
VAL_INC( yp[5*incy], _y5 );
VAL_INC( yp[6*incy], _y6 );
VAL_INC( yp[7*incy], _y7 );
}
/* Diag block transpose-multiply */
{
oski_value_t* yp = y + I*3*incy;
const oski_value_t* dp = diag;
REGISTER oski_value_t _y0;
REGISTER oski_value_t _y1;
REGISTER oski_value_t _y2;
VAL_SET_ZERO( _y0 );
VAL_SET_ZERO( _y1 );
VAL_SET_ZERO( _y2 );
VAL_MAC_CONJ( _y0, dp[0], _t0 );
VAL_MAC_CONJ( _y1, dp[1], _t0 );
VAL_MAC_CONJ( _y2, dp[2], _t0 );
VAL_MAC_CONJ( _y0, dp[3], _t1 );
VAL_MAC_CONJ( _y1, dp[4], _t1 );
VAL_MAC_CONJ( _y2, dp[5], _t1 );
VAL_MAC_CONJ( _y0, dp[6], _t2 );
VAL_MAC_CONJ( _y1, dp[7], _t2 );
VAL_MAC_CONJ( _y2, dp[8], _t2 );
VAL_INC( yp[0], _y0 );
VAL_INC( yp[1*incy], _y1 );
VAL_INC( yp[2*incy], _y2 );
}
val += (ptr[I+1]-ptr[I]) * 24;
diag += 3*3;
}
} /* MBCSR_MatHermMatMult_v1_aX_b1_xsX_ysX */
#if defined(DO_NAME_MANGLING)
/** Mangled name for primary exportable symbol, SubmatReprHermSubmatReprMult */
#define SubmatReprHermSubmatReprMult \
MANGLE_MOD_(SubmatReprHermSubmatReprMult_3x8)
#endif
/**
* \brief Exported module wrapper for the \f$3\times 8\f$
* implementation of \f$A^HA\cdot x\f$.
*/
int
SubmatReprHermSubmatReprMult( const oski_submatMBCSR_t* A,
oski_value_t alpha, const oski_vecview_t x,
oski_vecview_t y, oski_vecview_t t )
{
oski_index_t j;
const oski_value_t* xp = x->val;
oski_value_t* yp = y->val;
oski_value_t* tp;
oski_index_t inct;
assert( A != NULL );
assert( A->r == 3 );
assert( A->c == 8 );
if( t == INVALID_VEC || t->num_rows == 0 || t->num_cols == 0 ) {
inct = 0;
tp = NULL;
} else {
inct = t->rowinc;
tp = t->val + A->offset * inct;
}
for( j = 0; j < x->num_cols; j++ )
{
if( x->rowinc == 1 && y->rowinc == 1 )
MBCSR_MatHermMatMult_v1_aX_b1_xs1_ys1(
A->num_block_rows, A->bptr, A->bind, A->bval, A->bdiag,
alpha, xp, yp, tp, inct );
else
MBCSR_MatHermMatMult_v1_aX_b1_xsX_ysX(
A->num_block_rows, A->bptr, A->bind, A->bval, A->bdiag,
alpha, xp, x->rowinc, yp, y->rowinc, tp, inct );
xp += x->colinc;
yp += y->colinc;
if( tp != NULL ) tp += t->colinc;
}
return 0;
}
/* eof */
|
889812.c | /* $OpenBSD: auth-krb5.c,v 1.23 2018/07/09 21:35:50 markus Exp $ */
/*
* Kerberos v5 authentication and ticket-passing routines.
*
* From: FreeBSD: src/crypto/openssh/auth-krb5.c,v 1.6 2001/02/13 16:58:04 assar
*/
/*
* Copyright (c) 2002 Daniel Kouril. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "includes.h"
#include <sys/types.h>
#include <pwd.h>
#include <stdarg.h>
#include "xmalloc.h"
#include "ssh.h"
#include "packet.h"
#include "log.h"
#include "sshbuf.h"
#include "sshkey.h"
#include "misc.h"
#include "servconf.h"
#include "uidswap.h"
#include "hostfile.h"
#include "auth.h"
#ifdef KRB5
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <krb5.h>
extern ServerOptions options;
static int
krb5_init(void *context)
{
Authctxt *authctxt = (Authctxt *)context;
krb5_error_code problem;
if (authctxt->krb5_ctx == NULL) {
problem = krb5_init_context(&authctxt->krb5_ctx);
if (problem)
return (problem);
}
return (0);
}
int
auth_krb5_password(Authctxt *authctxt, const char *password)
{
#ifndef HEIMDAL
krb5_creds creds;
krb5_principal server;
#endif
krb5_error_code problem;
krb5_ccache ccache = NULL;
int len;
char *client, *platform_client;
const char *errmsg;
/* get platform-specific kerberos client principal name (if it exists) */
platform_client = platform_krb5_get_principal_name(authctxt->pw->pw_name);
client = platform_client ? platform_client : authctxt->pw->pw_name;
temporarily_use_uid(authctxt->pw);
problem = krb5_init(authctxt);
if (problem)
goto out;
problem = krb5_parse_name(authctxt->krb5_ctx, client,
&authctxt->krb5_user);
if (problem)
goto out;
#ifdef HEIMDAL
# ifdef HAVE_KRB5_CC_NEW_UNIQUE
problem = krb5_cc_new_unique(authctxt->krb5_ctx,
krb5_mcc_ops.prefix, NULL, &ccache);
# else
problem = krb5_cc_gen_new(authctxt->krb5_ctx, &krb5_mcc_ops, &ccache);
# endif
if (problem)
goto out;
problem = krb5_cc_initialize(authctxt->krb5_ctx, ccache,
authctxt->krb5_user);
if (problem)
goto out;
restore_uid();
problem = krb5_verify_user(authctxt->krb5_ctx, authctxt->krb5_user,
ccache, password, 1, NULL);
temporarily_use_uid(authctxt->pw);
if (problem)
goto out;
# ifdef HAVE_KRB5_CC_NEW_UNIQUE
problem = krb5_cc_new_unique(authctxt->krb5_ctx,
krb5_fcc_ops.prefix, NULL, &authctxt->krb5_fwd_ccache);
# else
problem = krb5_cc_gen_new(authctxt->krb5_ctx, &krb5_fcc_ops,
&authctxt->krb5_fwd_ccache);
# endif
if (problem)
goto out;
problem = krb5_cc_copy_cache(authctxt->krb5_ctx, ccache,
authctxt->krb5_fwd_ccache);
krb5_cc_destroy(authctxt->krb5_ctx, ccache);
ccache = NULL;
if (problem)
goto out;
#else
problem = krb5_get_init_creds_password(authctxt->krb5_ctx, &creds,
authctxt->krb5_user, (char *)password, NULL, NULL, 0, NULL, NULL);
if (problem)
goto out;
problem = krb5_sname_to_principal(authctxt->krb5_ctx, NULL, NULL,
KRB5_NT_SRV_HST, &server);
if (problem)
goto out;
restore_uid();
problem = krb5_verify_init_creds(authctxt->krb5_ctx, &creds, server,
NULL, NULL, NULL);
krb5_free_principal(authctxt->krb5_ctx, server);
temporarily_use_uid(authctxt->pw);
if (problem)
goto out;
if (!krb5_kuserok(authctxt->krb5_ctx, authctxt->krb5_user,
authctxt->pw->pw_name)) {
problem = -1;
goto out;
}
problem = ssh_krb5_cc_gen(authctxt->krb5_ctx, &authctxt->krb5_fwd_ccache);
if (problem)
goto out;
problem = krb5_cc_initialize(authctxt->krb5_ctx, authctxt->krb5_fwd_ccache,
authctxt->krb5_user);
if (problem)
goto out;
problem= krb5_cc_store_cred(authctxt->krb5_ctx, authctxt->krb5_fwd_ccache,
&creds);
if (problem)
goto out;
#endif
authctxt->krb5_ticket_file = (char *)krb5_cc_get_name(authctxt->krb5_ctx, authctxt->krb5_fwd_ccache);
len = strlen(authctxt->krb5_ticket_file) + 6;
authctxt->krb5_ccname = xmalloc(len);
#ifdef USE_CCAPI
snprintf(authctxt->krb5_ccname, len, "API:%s",
authctxt->krb5_ticket_file);
#else
snprintf(authctxt->krb5_ccname, len, "FILE:%s",
authctxt->krb5_ticket_file);
#endif
#ifdef USE_PAM
if (options.use_pam)
do_pam_putenv("KRB5CCNAME", authctxt->krb5_ccname);
#endif
out:
restore_uid();
free(platform_client);
if (problem) {
if (ccache)
krb5_cc_destroy(authctxt->krb5_ctx, ccache);
if (authctxt->krb5_ctx != NULL && problem!=-1) {
errmsg = krb5_get_error_message(authctxt->krb5_ctx,
problem);
debug("Kerberos password authentication failed: %s",
errmsg);
krb5_free_error_message(authctxt->krb5_ctx, errmsg);
} else
debug("Kerberos password authentication failed: %d",
problem);
krb5_cleanup_proc(authctxt);
if (options.kerberos_or_local_passwd)
return (-1);
else
return (0);
}
return (authctxt->valid ? 1 : 0);
}
void
krb5_cleanup_proc(Authctxt *authctxt)
{
debug("krb5_cleanup_proc called");
if (authctxt->krb5_fwd_ccache) {
krb5_cc_destroy(authctxt->krb5_ctx, authctxt->krb5_fwd_ccache);
authctxt->krb5_fwd_ccache = NULL;
}
if (authctxt->krb5_user) {
krb5_free_principal(authctxt->krb5_ctx, authctxt->krb5_user);
authctxt->krb5_user = NULL;
}
if (authctxt->krb5_ctx) {
krb5_free_context(authctxt->krb5_ctx);
authctxt->krb5_ctx = NULL;
}
}
#ifndef HEIMDAL
krb5_error_code
ssh_krb5_cc_gen(krb5_context ctx, krb5_ccache *ccache) {
int ret, oerrno;
char ccname[40];
mode_t old_umask;
#ifdef USE_CCAPI
char cctemplate[] = "API:krb5cc_%d";
#else
char cctemplate[] = "FILE:/tmp/krb5cc_%d_XXXXXXXXXX";
int tmpfd;
#endif
ret = snprintf(ccname, sizeof(ccname),
cctemplate, geteuid());
if (ret < 0 || (size_t)ret >= sizeof(ccname))
return ENOMEM;
#ifndef USE_CCAPI
old_umask = umask(0177);
tmpfd = mkstemp(ccname + strlen("FILE:"));
oerrno = errno;
umask(old_umask);
if (tmpfd == -1) {
logit("mkstemp(): %.100s", strerror(oerrno));
return oerrno;
}
if (fchmod(tmpfd,S_IRUSR | S_IWUSR) == -1) {
oerrno = errno;
logit("fchmod(): %.100s", strerror(oerrno));
close(tmpfd);
return oerrno;
}
close(tmpfd);
#endif
return (krb5_cc_resolve(ctx, ccname, ccache));
}
#endif /* !HEIMDAL */
#endif /* KRB5 */
|
826050.c | //#if 0
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include "ibm.h"
#include "mem.h"
#include "x86.h"
#include "x86_flags.h"
#include "386_common.h"
#include "cpu.h"
#include "config.h"
#include "paths.h"
#include "i440bx.h"
/*Controls whether the accessed bit in a descriptor is set when CS is loaded.*/
#define CS_ACCESSED
/*Controls whether the accessed bit in a descriptor is set when a data or stack
selector is loaded.*/
#define SEL_ACCESSED
int stimes = 0;
int dtimes = 0;
int btimes = 0;
int is486=1;
uint32_t abrt_error;
int cgate16,cgate32;
#define breaknullsegs 0
int intgatesize;
void taskswitch286(uint16_t seg, uint16_t *segdat, int is32);
void taskswitch386(uint16_t seg, uint16_t *segdat);
/*NOT PRESENT is INT 0B
GPF is INT 0D*/
FILE *pclogf;
void x86abort(const char *format, ...)
{
char buf[256];
// return;
if (!pclogf)
{
strcpy(buf, logs_path);
put_backslash(buf);
strcat(buf, "pcem.log");
pclogf=fopen(buf, "wt");
}
//return;
va_list ap;
va_start(ap, format);
vsprintf(buf, format, ap);
va_end(ap);
fputs(buf,pclogf);
fflush(pclogf);
dumpregs();
exit(-1);
}
static void seg_reset(x86seg *s)
{
s->access = (0 << 5) | 2;
s->access2 = 0;
s->limit = 0xFFFF;
s->limit_low = 0;
s->limit_high = 0xffff;
if (s == &cpu_state.seg_cs)
{
// TODO - When the PC is reset, initialization of the CS descriptor must be like the annotated line below.
//s->base = AT ? (cpu_16bitbus ? 0xFF0000 : 0xFFFF0000) : 0xFFFF0;
s->base = AT ? 0xF0000 : 0xFFFF0;
s->seg = AT ? 0xF000 : 0xFFFF;
}
else
{
s->base = 0;
s->seg = 0;
}
}
void x86seg_reset()
{
seg_reset(&cpu_state.seg_cs);
seg_reset(&cpu_state.seg_ds);
seg_reset(&cpu_state.seg_es);
seg_reset(&cpu_state.seg_fs);
seg_reset(&cpu_state.seg_gs);
seg_reset(&cpu_state.seg_ss);
}
void x86_doabrt(int x86_abrt)
{
// ingpf = 1;
cpu_state.pc = cpu_state.oldpc;
cpu_state.seg_cs.access = oldcpl << 5;
// pclog("x86_doabrt - %02X %08X %04X:%08X %i\n", x86_abrt, abrt_error, CS, pc, ins);
/* if (CS == 0x3433 && pc == 0x000006B0)
{
pclog("Quit it\n");
dumpregs();
exit(-1);
}*/
// pclog("GPF! - error %04X %04X(%08X):%08X %02X %02X %i %04X %i %i\n",error,CS,cs,pc,opcode,opcode2,ins,flags&I_FLAG,IOPL, dtimes);
if (msw & 1)
pmodeint(x86_abrt, 0);
else
{
uint32_t addr = (x86_abrt << 2) + idt.base;
if (stack32)
{
writememw(ss,ESP-2,cpu_state.flags);
writememw(ss,ESP-4,CS);
writememw(ss,ESP-6,cpu_state.pc);
ESP-=6;
}
else
{
writememw(ss,((SP-2)&0xFFFF),cpu_state.flags);
writememw(ss,((SP-4)&0xFFFF),CS);
writememw(ss,((SP-6)&0xFFFF),cpu_state.pc);
SP-=6;
}
cpu_state.flags &= ~I_FLAG;
cpu_state.flags &= ~T_FLAG;
cpu_state.pc=readmemw(0,addr);
loadcs(readmemw(0,addr+2));
return;
}
if (cpu_state.abrt || x86_was_reset) return;
if (intgatesize == 16)
{
if (stack32)
{
writememw(ss, ESP-2, abrt_error);
ESP-=2;
}
else
{
writememw(ss, ((SP-2)&0xFFFF), abrt_error);
SP-=2;
}
}
else
{
if (stack32)
{
writememl(ss, ESP-4, abrt_error);
ESP-=4;
}
else
{
writememl(ss, ((SP-4)&0xFFFF), abrt_error);
SP-=4;
}
}
}
void x86gpf(char *s, uint16_t error)
{
// pclog("GPF %04X %04x(%08x):%08x\n", error, CS,cs,cpu_state.pc);
cpu_state.abrt = ABRT_GPF;
abrt_error = error;
}
void x86gpf_expected(char *s, uint16_t error)
{
// pclog("GPF_v86 %04X %04x(%08x):%08x\n", error, CS,cs,cpu_state.pc);
cpu_state.abrt = ABRT_GPF | ABRT_EXPECTED;
abrt_error = error;
}
void x86ss(char *s, uint16_t error)
{
// pclog("SS %04X\n", error);
cpu_state.abrt = ABRT_SS;
abrt_error = error;
}
void x86ts(char *s, uint16_t error)
{
// pclog("TS %04X\n", error);
cpu_state.abrt = ABRT_TS;
abrt_error = error;
}
void x86np(char *s, uint16_t error)
{
// pclog("NP %04X : %s\n", error, s);
cpu_state.abrt = ABRT_NP;
abrt_error = error;
}
static void set_stack32(int s)
{
stack32 = s;
if (stack32)
cpu_cur_status |= CPU_STATUS_STACK32;
else
cpu_cur_status &= ~CPU_STATUS_STACK32;
}
static void set_use32(int u)
{
if (u)
{
use32 = 0x300;
cpu_cur_status |= CPU_STATUS_USE32;
}
else
{
use32 = 0;
cpu_cur_status &= ~CPU_STATUS_USE32;
}
}
static void do_seg_load(x86seg *s, uint16_t *segdat)
{
s->limit = segdat[0] | ((segdat[3] & 0xF) << 16);
s->limit_raw = s->limit;
if (segdat[3] & 0x80)
s->limit = (s->limit << 12) | 0xFFF;
s->base = segdat[1] | ((segdat[2] & 0xFF) << 16);
if (is386)
s->base |= ((segdat[3] >> 8) << 24);
s->access = segdat[2] >> 8;
s->access2 = segdat[3] & 0xf0;
if ((segdat[2] & 0x1800) != 0x1000 || !(segdat[2] & (1 << 10))) /*expand-down*/
{
s->limit_high = s->limit;
s->limit_low = 0;
}
else
{
s->limit_high = (segdat[3] & 0x40) ? 0xffffffff : 0xffff;
s->limit_low = s->limit + 1;
}
// if (output) pclog("SEG : base=%08x limit=%08x low=%08x high=%08x\n", s->base, s->limit, s->limit_low, s->limit_high);
if (s == &cpu_state.seg_ds)
{
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
}
if (s == &cpu_state.seg_ss)
{
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
}
}
static void do_seg_v86_init(x86seg *s)
{
s->access = (3 << 5) | 2;
s->access2 = 0;
s->limit = 0xffff;
s->limit_low = 0;
s->limit_high = 0xffff;
}
static void check_seg_valid(x86seg *s)
{
int dpl = (s->access >> 5) & 3;
int valid = 1;
if (s->seg & 4)
{
if ((s->seg & ~7) >= ldt.limit)
{
// pclog("Bigger than LDT limit %04X %04X %02X %02X %02X\n", s->seg, ldt.limit, opcode, opcode2, rmdat);
valid = 0;
}
}
else
{
if ((s->seg & ~7) >= gdt.limit)
{
// pclog("Bigger than GDT limit %04X %04X\n", s->seg, gdt.limit);
valid = 0;
}
}
switch (s->access & 0x1f)
{
case 0x10: case 0x11: case 0x12: case 0x13: /*Data segments*/
case 0x14: case 0x15: case 0x16: case 0x17:
case 0x1A: case 0x1B: /*Readable non-conforming code*/
if ((s->seg & 3) > dpl || (CPL) > dpl)
{
// pclog("Data seg fail - %04X:%08X %04X %i\n", CS, pc, s->seg, dpl);
valid = 0;
break;
}
break;
case 0x1E: case 0x1F: /*Readable conforming code*/
break;
default:
valid = 0;
break;
}
if (!valid)
loadseg(0, s);
}
int loadseg(uint16_t seg, x86seg *s)
{
uint16_t segdat[4];
uint32_t addr;
int dpl;
if (msw&1 && !(cpu_state.eflags&VM_FLAG))
{
// intcount++;
if (!(seg&~3))
{
if (s==&cpu_state.seg_ss)
{
pclog("SS selector = NULL!\n");
x86ss(NULL,0);
return 1;
// dumpregs();
// exit(-1);
}
// if (s->base!=-1) pclog("NEW! ");
s->seg=0;
s->access = 0;
s->base=-1;
if (s == &cpu_state.seg_ds)
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
// pclog("NULL selector %s%s%s%s %04X(%06X):%06X\n",(s==&_ds)?"DS":"",(s==&_es)?"ES":"",(s==&_fs)?"FS":"",(s==&_gs)?"GS":"",CS,cs,pc);
return 0;
}
// if (s==&_ss) pclog("Load SS %04X\n",seg);
// pclog("Protected mode seg load!\n");
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X %02X\n",seg,ldt.limit, 0/*rmdat*/);
// dumppic();
// dumpregs();
// exit(-1);
x86gpf(NULL,seg&~3);
return 1;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X 1\n",seg,gdt.limit);
// dumpregs();
// exit(-1);
x86gpf(NULL,seg&~3);
return 1;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return 1;
dpl=(segdat[2]>>13)&3;
if (s==&cpu_state.seg_ss)
{
if (!(seg&~3))
{
pclog("Load SS null selector\n");
x86gpf(NULL,seg&~3);
return 1;
}
if ((seg&3)!=CPL || dpl!=CPL)
{
pclog("Invalid SS permiss\n");
x86gpf(NULL,seg&~3);
// x86abort("Invalid SS permiss for %04X!\n",seg&0xFFFC);
return 1;
}
switch ((segdat[2]>>8)&0x1F)
{
case 0x12: case 0x13: case 0x16: case 0x17: /*r/w*/
break;
default:
pclog("Invalid SS type\n");
x86gpf(NULL,seg&~3);
// x86abort("Invalid SS segment type for %04X!\n",seg&0xFFFC);
return 1;
}
if (!(segdat[2]&0x8000))
{
pclog("Load SS not present!\n");
x86ss(NULL,seg&~3);
return 1;
}
set_stack32((segdat[3] & 0x40) ? 1 : 0);
// pclog("Load SS %04x %04x %04x %04x\n", segdat[0], segdat[1], segdat[2], segdat[3]);
}
else if (s!=&cpu_state.seg_cs)
{
if (output) pclog("Seg data %04X %04X %04X %04X\n", segdat[0], segdat[1], segdat[2], segdat[3]);
if (output) pclog("Seg type %03X\n",segdat[2]&0x1F00);
switch ((segdat[2]>>8)&0x1F)
{
case 0x10: case 0x11: case 0x12: case 0x13: /*Data segments*/
case 0x14: case 0x15: case 0x16: case 0x17:
case 0x1A: case 0x1B: /*Readable non-conforming code*/
// pclog("Load seg %04X %i %i %04X:%08X\n",seg,dpl,CS&3,CS,pc);
if ((seg&3)>dpl || (CPL)>dpl)
{
pclog("Data seg fail - %04X:%08X %04X %i %04X\n",CS,cpu_state.pc,seg,dpl,segdat[2]);
x86gpf(NULL,seg&~3);
// x86abort("Data segment load - level too low!\n",seg&0xFFFC);
return 1;
}
break;
case 0x1E: case 0x1F: /*Readable conforming code*/
break;
default:
pclog("Invalid segment type for %04X! %04X\n",seg&0xFFFC,segdat[2]);
x86gpf(NULL,seg&~3);
return 1;
}
}
if (!(segdat[2] & 0x8000))
{
x86np("Load data seg not present", seg & 0xfffc);
return 1;
}
s->seg = seg;
do_seg_load(s, segdat);
#ifndef CS_ACCESSED
if (s != &_cs)
{
#endif
#ifdef SEL_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
#ifndef CS_ACCESSED
}
#endif
s->checked = 0;
if (s == &cpu_state.seg_ds)
codegen_flat_ds = 0;
if (s == &cpu_state.seg_ss)
codegen_flat_ss = 0;
}
else
{
s->access = (3 << 5) | 2;
s->access2 = 0;
s->base = seg << 4;
s->seg = seg;
s->checked = 1;
if (s == &cpu_state.seg_ds)
codegen_flat_ds = 0;
if (s == &cpu_state.seg_ss)
codegen_flat_ss = 0;
if (s == &cpu_state.seg_ss && (cpu_state.eflags & VM_FLAG))
set_stack32(0);
}
if (s == &cpu_state.seg_ds)
{
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
}
if (s == &cpu_state.seg_ss)
{
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
}
return cpu_state.abrt;
}
#define DPL ((segdat[2]>>13)&3)
#define DPL2 ((segdat2[2]>>13)&3)
#define DPL3 ((segdat3[2]>>13)&3)
void loadcs(uint16_t seg)
{
uint16_t segdat[4];
uint32_t addr;
if (output) pclog("Load CS %04X\n",seg);
if (msw&1 && !(cpu_state.eflags&VM_FLAG))
{
// intcount++;
// flushmmucache();
// pclog("Load CS %04X\n",seg);
if (!(seg&~3))
{
pclog("Trying to load CS with NULL selector! lcs\n");
// dumpregs();
// exit(-1);
x86gpf(NULL,0);
return;
}
// pclog("Protected mode CS load! %04X\n",seg);
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X CS\n",seg,ldt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X CS\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
if (optype==JMP) pclog("Code seg - %04X - %04X %04X %04X %04X\n",seg,segdat[0],segdat[1],segdat[2],segdat[3]);
// if (!(segdat[2]&0x8000)) x86abort("Code segment not present!\n");
// if (output) pclog("Segdat2 %04X\n",segdat[2]);
if (segdat[2]&0x1000) /*Normal code segment*/
{
if (!(segdat[2]&0x400)) /*Not conforming*/
{
if ((seg&3)>CPL)
{
x86gpf(NULL,seg&~3);
pclog("loadcs RPL > CPL %04X %04X %i\n",segdat[2],seg,CPL);
return;
}
if (CPL != DPL)
{
x86gpf(NULL,seg&~3);
return;
}
}
if (CPL < DPL)
{
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
x86np("Load CS not present", seg & 0xfffc);
return;
}
set_use32(segdat[3] & 0x40);
CS=(seg&~3)|CPL;
do_seg_load(&cpu_state.seg_cs, segdat);
use32=(segdat[3]&0x40)?0x300:0;
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
// if (output) pclog("Load CS %08X\n",_cs.base);
// CS=(CS&0xFFFC)|((_cs.access>>5)&3);
}
else /*System segment*/
{
if (!(segdat[2]&0x8000))
{
x86np("Load CS system seg not present\n", seg & 0xfffc);
return;
}
switch (segdat[2]&0xF00)
{
default:
pclog("Bad CS %02X %i special descriptor %03X %04X\n",0/*rmdat*/,optype,segdat[2]&0xF00,seg);
x86gpf(NULL,seg&~3);
return;
}
}
// pclog("CS = %04X base=%06X limit=%04X access=%02X %04X\n",CS,cs,_cs.limit,_cs.access,addr);
// dumpregs();
// exit(-1);
}
else
{
cpu_state.seg_cs.base=seg<<4;
cpu_state.seg_cs.limit=0xFFFF;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
CS=seg;
if (cpu_state.eflags&VM_FLAG) cpu_state.seg_cs.access=(3<<5) | 2;
else cpu_state.seg_cs.access=(0<<5) | 2;
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
}
}
void loadcsjmp(uint16_t seg, uint32_t old_pc)
{
uint16_t segdat[4];
uint32_t addr;
uint16_t type,seg2;
uint32_t newpc;
// pclog("Load CS JMP %04X\n",seg);
if (msw&1 && !(cpu_state.eflags&VM_FLAG))
{
if (!(seg&~3))
{
pclog("Trying to load CS with NULL selector! lcsjmp\n");
x86gpf(NULL,0);
return;
// dumpregs();
// exit(-1);
}
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X CS\n",seg,ldt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X CS\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
if (output) pclog("%04X %04X %04X %04X\n",segdat[0],segdat[1],segdat[2],segdat[3]);
if (segdat[2]&0x1000) /*Normal code segment*/
{
// pclog("Normal CS\n");
if (!(segdat[2]&0x400)) /*Not conforming*/
{
if ((seg&3)>CPL)
{
x86gpf(NULL,seg&~3);
return;
}
if (CPL != DPL)
{
x86gpf(NULL,seg&~3);
return;
}
}
if (CPL < DPL)
{
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
x86np("Load CS JMP not present\n", seg & 0xfffc);
return;
}
set_use32(segdat[3]&0x40);
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
CS = (seg & ~3) | CPL;
segdat[2] = (segdat[2] & ~(3 << (5+8))) | (CPL << (5+8));
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
/* if (segdat[3]&0x40)
{
use32=0x300;
cpu_cur_status |= CPU_STATUS_USE32;
}
else
{
use32=0;
cpu_cur_status &= ~CPU_STATUS_USE32;
}*/
cycles -= timing_jmp_pm;
}
else /*System segment*/
{
// pclog("System CS\n");
if (!(segdat[2]&0x8000))
{
x86np("Load CS JMP system selector not present\n", seg & 0xfffc);
return;
}
type=segdat[2]&0xF00;
newpc=segdat[0];
if (type&0x800) newpc|=segdat[3]<<16;
switch (type)
{
case 0x400: /*Call gate*/
case 0xC00:
// pclog("Call gate\n");
cgate32=(type&0x800);
cgate16=!cgate32;
cpu_state.oldpc = cpu_state.pc;
if ((DPL < CPL) || (DPL < (seg&3)))
{
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
x86np("Load CS JMP call gate not present\n", seg & 0xfffc);
return;
}
seg2=segdat[1];
if (!(seg2&~3))
{
pclog("Trying to load CS with NULL selector! lcsjmpcg\n");
x86gpf(NULL,0);
return;
// dumpregs();
// exit(-1);
}
addr=seg2&~7;
if (seg2&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X CSJ\n",seg2,gdt.limit);
x86gpf(NULL,seg2&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X CSJ\n",seg2,gdt.limit);
x86gpf(NULL,seg2&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
if (DPL > CPL)
{
x86gpf(NULL,seg2&~3);
return;
}
if (!(segdat[2]&0x8000))
{
x86np("Load CS JMP from call gate not present\n", seg2 & 0xfffc);
return;
}
switch (segdat[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming code*/
if (DPL > CPL)
{
pclog("Call gate DPL > CPL");
x86gpf(NULL,seg2&~3);
return;
}
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
CS=seg2;
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3]&0x40);
cpu_state.pc=newpc;
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
break;
default:
pclog("JMP Call gate bad segment type\n");
x86gpf(NULL,seg2&~3);
return;
}
cycles -= timing_jmp_pm_gate;
break;
case 0x100: /*286 Task gate*/
case 0x900: /*386 Task gate*/
// pclog("Task gate\n");
cpu_state.pc = old_pc;
optype=JMP;
cpl_override=1;
taskswitch286(seg,segdat,segdat[2]&0x800);
cpu_state.flags &= ~NT_FLAG;
cpl_override=0;
// case 0xB00: /*386 Busy task gate*/
// if (optype==JMP) pclog("Task switch!\n");
// taskswitch386(seg,segdat);
return;
default:
pclog("Bad JMP CS %02X %i special descriptor %03X %04X\n",0/*rmdat*/,optype,segdat[2]&0xF00,seg);
x86gpf(NULL,0);
return;
// dumpregs();
// exit(-1);
}
}
// pclog("CS = %04X base=%06X limit=%04X access=%02X %04X\n",CS,cs,_cs.limit,_cs.access,addr);
// dumpregs();
// exit(-1);
}
else
{
cpu_state.seg_cs.base=seg<<4;
cpu_state.seg_cs.limit=0xFFFF;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
CS=seg;
if (cpu_state.eflags&VM_FLAG) cpu_state.seg_cs.access=(3<<5) | 2;
else cpu_state.seg_cs.access=(0<<5) | 2;
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
cycles -= timing_jmp_rm;
}
}
void PUSHW(uint16_t v)
{
// if (output==3) pclog("PUSHW %04X to %08X\n",v,ESP-4);
if (stack32)
{
writememw(ss,ESP-2,v);
if (cpu_state.abrt) return;
ESP-=2;
}
else
{
// pclog("Write %04X to %08X\n", v, ss+((SP-2)&0xFFFF));
writememw(ss,((SP-2)&0xFFFF),v);
if (cpu_state.abrt) return;
SP-=2;
}
}
void PUSHL(uint32_t v)
{
// if (output==3) pclog("PUSHL %08X to %08X\n",v,ESP-4);
if (stack32)
{
writememl(ss,ESP-4,v);
if (cpu_state.abrt) return;
ESP-=4;
}
else
{
writememl(ss,((SP-4)&0xFFFF),v);
if (cpu_state.abrt) return;
SP-=4;
}
}
uint16_t POPW()
{
uint16_t tempw;
if (stack32)
{
tempw=readmemw(ss,ESP);
if (cpu_state.abrt) return 0;
ESP+=2;
}
else
{
tempw=readmemw(ss,SP);
if (cpu_state.abrt) return 0;
SP+=2;
}
return tempw;
}
uint32_t POPL()
{
uint32_t templ;
if (stack32)
{
templ=readmeml(ss,ESP);
if (cpu_state.abrt) return 0;
ESP+=4;
}
else
{
templ=readmeml(ss,SP);
if (cpu_state.abrt) return 0;
SP+=4;
}
return templ;
}
void loadcscall(uint16_t seg, uint32_t old_pc)
{
uint16_t seg2;
uint16_t segdat[4],segdat2[4],newss;
uint32_t addr,oldssbase=ss, oaddr;
uint32_t newpc;
int count;
uint32_t oldss,oldsp,newsp,oldsp2;
int type;
uint16_t tempw;
int csout = output;
if (msw&1 && !(cpu_state.eflags&VM_FLAG))
{
//flushmmucache();
if (csout) pclog("Protected mode CS load! %04X\n",seg);
if (!(seg&~3))
{
pclog("Trying to load CS with NULL selector! lcscall\n");
x86gpf(NULL,0);
return;
// dumpregs();
// exit(-1);
}
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X CSC\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X CSC\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
type=segdat[2]&0xF00;
newpc=segdat[0];
if (type&0x800) newpc|=segdat[3]<<16;
if (csout) pclog("Code seg call - %04X - %04X %04X %04X\n",seg,segdat[0],segdat[1],segdat[2]);
if (segdat[2]&0x1000)
{
if (!(segdat[2]&0x400)) /*Not conforming*/
{
if ((seg&3)>CPL)
{
if (csout) pclog("Not conforming, RPL > CPL\n");
x86gpf(NULL,seg&~3);
return;
}
if (CPL != DPL)
{
if (csout) pclog("Not conforming, CPL != DPL (%i %i)\n",CPL,DPL);
x86gpf(NULL,seg&~3);
return;
}
}
if (CPL < DPL)
{
if (csout) pclog("CPL < DPL\n");
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
if (csout) pclog("Not present\n");
x86np("Load CS call not present", seg & 0xfffc);
return;
}
set_use32(segdat[3]&0x40);
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
/*Conforming segments don't change CPL, so preserve existing CPL*/
if (segdat[2]&0x400)
{
seg = (seg & ~3) | CPL;
segdat[2] = (segdat[2] & ~(3 << (5+8))) | (CPL << (5+8));
}
else /*On non-conforming segments, set RPL = CPL*/
seg = (seg & ~3) | CPL;
CS=seg;
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
/* if (segdat[3]&0x40)
{
use32=0x300;
cpu_cur_status |= CPU_STATUS_USE32;
}
else
{
use32=0;
cpu_cur_status &= ~CPU_STATUS_USE32;
}*/
if (csout) pclog("Complete\n");
cycles -= timing_call_pm;
}
else
{
type=segdat[2]&0xF00;
if (csout) pclog("Type %03X\n",type);
switch (type)
{
case 0x400: /*Call gate*/
case 0xC00: /*386 Call gate*/
if (output) pclog("Callgate %08X\n", cpu_state.pc);
cgate32=(type&0x800);
cgate16=!cgate32;
count=segdat[2]&31;
if ((DPL < CPL) || (DPL < (seg&3)))
{
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
if (output) pclog("Call gate not present %04X\n",seg);
x86np("Call gate not present\n", seg & 0xfffc);
return;
}
seg2=segdat[1];
if (output) pclog("New address : %04X:%08X\n", seg2, newpc);
if (!(seg2&~3))
{
pclog("Trying to load CS with NULL selector! lcscallcg\n");
x86gpf(NULL,0);
return;
// dumpregs();
// exit(-1);
}
addr=seg2&~7;
if (seg2&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X CSC\n",seg2,gdt.limit);
x86gpf(NULL,seg2&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X CSC\n",seg2,gdt.limit);
x86gpf(NULL,seg2&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
if (output) pclog("Code seg2 call - %04X - %04X %04X %04X\n",seg2,segdat[0],segdat[1],segdat[2]);
if (DPL > CPL)
{
x86gpf(NULL,seg2&~3);
return;
}
if (!(segdat[2]&0x8000))
{
if (output) pclog("Call gate CS not present %04X\n",seg2);
x86np("Call gate CS not present", seg2 & 0xfffc);
return;
}
switch (segdat[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming code*/
if (DPL < CPL)
{
uint16_t oldcs = CS;
oaddr = addr;
/*Load new stack*/
oldss=SS;
oldsp=oldsp2=ESP;
cpl_override=1;
if (tr.access&8)
{
addr = 4 + tr.base + (DPL * 8);
newss=readmemw(0,addr+4);
newsp=readmeml(0,addr);
}
else
{
addr = 2 + tr.base + (DPL * 4);
newss=readmemw(0,addr+2);
newsp=readmemw(0,addr);
}
cpl_override=0;
if (cpu_state.abrt) return;
if (output) pclog("New stack %04X:%08X\n",newss,newsp);
if (!(newss&~3))
{
pclog("Call gate loading null SS\n");
x86ts(NULL,newss&~3);
return;
}
addr=newss&~7;
if (newss&4)
{
if (addr>=ldt.limit)
{
x86abort("Bigger than LDT limit %04X %08X %04X CSC SS\n",newss,addr,ldt.limit);
x86ts(NULL,newss&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
x86abort("Bigger than GDT limit %04X %04X CSC\n",newss,gdt.limit);
x86ts(NULL,newss&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
if (output) pclog("Read stack seg\n");
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
if (output) pclog("Read stack seg done!\n");
if (((newss & 3) != DPL) || (DPL2 != DPL))
{
pclog("Call gate loading SS with wrong permissions %04X %04X %i %i %04X %04X\n", newss, seg2, DPL, DPL2, segdat[2], segdat2[2]);
// dumpregs();
// exit(-1);
x86ts(NULL,newss&~3);
return;
}
if ((segdat2[2]&0x1A00)!=0x1200)
{
pclog("Call gate loading SS wrong type\n");
x86ts(NULL,newss&~3);
return;
}
if (!(segdat2[2]&0x8000))
{
pclog("Call gate loading SS not present\n");
x86ss("Call gate loading SS not present\n", newss & 0xfffc);
return;
}
if (!stack32) oldsp &= 0xFFFF;
SS=newss;
set_stack32((segdat2[3] & 0x40) ? 1 : 0);
if (stack32) ESP=newsp;
else SP=newsp;
do_seg_load(&cpu_state.seg_ss, segdat2);
if (output) pclog("Set access 1\n");
#ifdef SEL_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat2[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
CS=seg2;
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3]&0x40);
cpu_state.pc=newpc;
if (output) pclog("Set access 2\n");
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, oaddr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
if (output) pclog("Type %04X\n",type);
if (type==0xC00)
{
PUSHL(oldss);
PUSHL(oldsp2);
if (cpu_state.abrt)
{
pclog("ABRT PUSHL\n");
SS = oldss;
ESP = oldsp2;
CS = oldcs;
return;
}
// if (output) pclog("Stack now %04X:%08X\n",SS,ESP);
if (count)
{
while (count)
{
count--;
PUSHL(readmeml(oldssbase,oldsp+(count*4)));
if (cpu_state.abrt)
{
pclog("ABRT COPYL\n");
SS = oldss;
ESP = oldsp2;
CS = oldcs;
return;
}
}
}
// x86abort("Call gate with count %i\n",count);
}
else
{
if (output) pclog("Stack %04X\n",SP);
PUSHW(oldss);
if (output) pclog("Write SS to %04X:%04X\n",SS,SP);
PUSHW(oldsp2);
if (cpu_state.abrt)
{
pclog("ABRT PUSHW\n");
SS = oldss;
ESP = oldsp2;
CS = oldcs;
return;
}
if (output) pclog("Write SP to %04X:%04X\n",SS,SP);
// if (output) pclog("Stack %04X %i %04X:%04X\n",SP,count,oldssbase,oldsp);
// if (output) pclog("PUSH %04X %04X %i %i now %04X:%08X\n",oldss,oldsp,count,stack32,SS,ESP);
if (count)
{
while (count)
{
count--;
tempw=readmemw(oldssbase,(oldsp&0xFFFF)+(count*2));
if (output) pclog("PUSH %04X\n",tempw);
PUSHW(tempw);
if (cpu_state.abrt)
{
pclog("ABRT COPYW\n");
SS = oldss;
ESP = oldsp2;
CS = oldcs;
return;
}
}
}
// if (output) pclog("Stack %04X\n",SP);
// if (count) x86abort("Call gate with count\n");
}
cycles -= timing_call_pm_gate_inner;
break;
}
else if (DPL > CPL)
{
pclog("Call gate DPL > CPL");
x86gpf(NULL,seg2&~3);
return;
}
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
CS=seg2;
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3]&0x40);
cpu_state.pc=newpc;
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
cycles -= timing_call_pm_gate;
break;
default:
pclog("Call gate bad segment type\n");
x86gpf(NULL,seg2&~3);
return;
}
break;
case 0x100: /*286 Task gate*/
case 0x900: /*386 Task gate*/
// pclog("Task gate\n");
cpu_state.pc = old_pc;
cpl_override=1;
taskswitch286(seg,segdat,segdat[2]&0x800);
cpl_override=0;
break;
default:
pclog("Bad CALL special descriptor %03X\n",segdat[2]&0xF00);
x86gpf(NULL,seg&~3);
return;
// dumpregs();
// exit(-1);
}
}
// pclog("CS = %04X base=%06X limit=%04X access=%02X %04X\n",CS,cs,_cs.limit,_cs.access,addr);
// dumpregs();
// exit(-1);
}
else
{
cpu_state.seg_cs.base=seg<<4;
cpu_state.seg_cs.limit=0xFFFF;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
CS=seg;
if (cpu_state.eflags&VM_FLAG) cpu_state.seg_cs.access=(3<<5) | 2;
else cpu_state.seg_cs.access=(0<<5) | 2;
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
}
}
void pmoderetf(int is32, uint16_t off)
{
uint32_t newpc;
uint32_t newsp;
uint32_t addr, oaddr;
uint16_t segdat[4],segdat2[4],seg,newss;
uint32_t oldsp=ESP;
if (output) pclog("RETF %i %04X:%04X %08X %04X\n",is32,CS,cpu_state.pc,cr0,cpu_state.eflags);
if (is32)
{
newpc=POPL();
seg=POPL(); if (cpu_state.abrt) return;
}
else
{
if (output) pclog("PC read from %04X:%04X\n",SS,SP);
newpc=POPW();
if (output) pclog("CS read from %04X:%04X\n",SS,SP);
seg=POPW(); if (cpu_state.abrt) return;
}
if (output) pclog("Return to %04X:%08X\n",seg,newpc);
if ((seg&3)<CPL)
{
pclog("RETF RPL<CPL %04X %i %i %04X:%08X\n",seg,CPL,ins,CS,cpu_state.pc);
// output=3;
// timetolive=100;
// dumpregs();
// exit(-1);
ESP=oldsp;
x86gpf(NULL,seg&~3);
return;
}
if (!(seg&~3))
{
pclog("Trying to load CS with NULL selector! retf\n");
// dumpregs();
// exit(-1);
x86gpf(NULL,0);
return;
}
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X RETF\n",seg,ldt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X RETF\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
// dumpregs();
// exit(-1);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) { ESP=oldsp; return; }
oaddr = addr;
if (output) pclog("CPL %i RPL %i %i\n",CPL,seg&3,is32);
if (stack32) ESP+=off;
else SP+=off;
if (CPL==(seg&3))
{
if (output) pclog("RETF CPL = RPL %04X\n", segdat[2]);
switch (segdat[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming*/
if (CPL != DPL)
{
pclog("RETF non-conforming CPL != DPL\n");
ESP=oldsp;
x86gpf(NULL,seg&~3);
return;
}
break;
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
if (CPL < DPL)
{
pclog("RETF non-conforming CPL < DPL\n");
ESP=oldsp;
x86gpf(NULL,seg&~3);
return;
}
break;
default:
pclog("RETF CS not code segment\n");
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
pclog("RETF CS not present %i %04X %04X %04X\n",ins, segdat[0], segdat[1], segdat[2]);
ESP=oldsp;
x86np("RETF CS not present\n", seg & 0xfffc);
return;
}
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
cpu_state.pc=newpc;
if (segdat[2] & 0x400)
segdat[2] = (segdat[2] & ~(3 << (5+8))) | ((seg & 3) << (5+8));
CS = seg;
do_seg_load(&cpu_state.seg_cs, segdat);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~(3 << 5)) | ((CS & 3) << 5);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3] & 0x40);
// pclog("CPL=RPL return to %04X:%08X\n",CS,pc);
cycles -= timing_retf_pm;
}
else
{
switch (segdat[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming*/
if ((seg&3) != DPL)
{
pclog("RETF non-conforming RPL != DPL\n");
ESP=oldsp;
x86gpf(NULL,seg&~3);
return;
}
if (output) pclog("RETF non-conforming, %i %i\n",seg&3, DPL);
break;
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
if ((seg&3) < DPL)
{
pclog("RETF non-conforming RPL < DPL\n");
ESP=oldsp;
x86gpf(NULL,seg&~3);
return;
}
if (output) pclog("RETF conforming, %i %i\n",seg&3, DPL);
break;
default:
pclog("RETF CS not code segment\n");
ESP=oldsp;
x86gpf(NULL,seg&~3);
return;
}
if (!(segdat[2]&0x8000))
{
pclog("RETF CS not present! %i %04X %04X %04X\n",ins, segdat[0], segdat[1], segdat[2]);
ESP=oldsp;
x86np("RETF CS not present\n", seg & 0xfffc);
return;
}
if (is32)
{
newsp=POPL();
newss=POPL(); if (cpu_state.abrt) return;
// pclog("is32 new stack %04X:%04X\n",newss,newsp);
}
else
{
if (output) pclog("SP read from %04X:%04X\n",SS,SP);
newsp=POPW();
if (output) pclog("SS read from %04X:%04X\n",SS,SP);
newss=POPW(); if (cpu_state.abrt) return;
// pclog("!is32 new stack %04X:%04X\n",newss,newsp);
}
if (output) pclog("Read new stack : %04X:%04X (%08X)\n", newss, newsp, ldt.base);
if (!(newss&~3))
{
pclog("RETF loading null SS\n");
ESP=oldsp;
x86gpf(NULL,newss&~3);
return;
}
addr=newss&~7;
if (newss&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X RETF SS\n",newss,gdt.limit);
ESP=oldsp;
x86gpf(NULL,newss&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X RETF SS\n",newss,gdt.limit);
ESP=oldsp;
x86gpf(NULL,newss&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) { ESP=oldsp; return; }
if (output) pclog("Segment data %04X %04X %04X %04X\n", segdat2[0], segdat2[1], segdat2[2], segdat2[3]);
// if (((newss & 3) != DPL) || (DPL2 != DPL))
if ((newss & 3) != (seg & 3))
{
pclog("RETF loading SS with wrong permissions %i %i %04X %04X\n", newss & 3, seg & 3, newss, seg);
ESP=oldsp;
// output = 3;
// dumpregs();
// exit(-1);
x86gpf(NULL,newss&~3);
return;
}
if ((segdat2[2]&0x1A00)!=0x1200)
{
pclog("RETF loading SS wrong type\n");
ESP=oldsp;
// dumpregs();
// exit(-1);
x86gpf(NULL,newss&~3);
return;
}
if (!(segdat2[2]&0x8000))
{
pclog("RETF loading SS not present\n");
ESP=oldsp;
x86np("RETF loading SS not present\n", newss & 0xfffc);
return;
}
if (DPL2 != (seg & 3))
{
pclog("RETF loading SS with wrong permissions2 %i %i %04X %04X\n", DPL2, seg & 3, newss, seg);
ESP=oldsp;
x86gpf(NULL,newss&~3);
return;
}
SS=newss;
set_stack32((segdat2[3] & 0x40) ? 1 : 0);
if (stack32) ESP=newsp;
else SP=newsp;
do_seg_load(&cpu_state.seg_ss, segdat2);
#ifdef SEL_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat2[2] | 0x100); /*Set accessed bit*/
#ifdef CS_ACCESSED
writememw(0, oaddr+4, segdat[2] | 0x100); /*Set accessed bit*/
#endif
cpl_override = 0;
#endif
/*Conforming segments don't change CPL, so CPL = RPL*/
if (segdat[2]&0x400)
segdat[2] = (segdat[2] & ~(3 << (5+8))) | ((seg & 3) << (5+8));
cpu_state.pc=newpc;
CS=seg;
do_seg_load(&cpu_state.seg_cs, segdat);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3] & 0x40);
if (stack32) ESP+=off;
else SP+=off;
check_seg_valid(&cpu_state.seg_ds);
check_seg_valid(&cpu_state.seg_es);
check_seg_valid(&cpu_state.seg_fs);
check_seg_valid(&cpu_state.seg_gs);
// pclog("CPL<RPL return to %04X:%08X %04X:%08X\n",CS,pc,SS,ESP);
cycles -= timing_retf_pm_outer;
}
}
void pmodeint(int num, int soft)
{
uint16_t segdat[4],segdat2[4],segdat3[4];
uint32_t addr, oaddr;
uint16_t newss;
uint32_t oldss,oldsp;
int type;
uint32_t newsp;
uint16_t seg=0;
int new_cpl;
// if (!num) pclog("Pmode int 0 at %04X(%06X):%08X\n",CS,cs,pc);
// pclog("Pmode int %02X %i %04X:%08X %04X:%08X %i\n",num,soft,CS,pc, SS, ESP, abrt);
if (cpu_state.eflags&VM_FLAG && IOPL!=3 && soft)
{
if (output) pclog("V86 banned int\n");
pclog("V86 banned int!\n");
x86gpf(NULL,0);
return;
// dumpregs();
// exit(-1);
}
addr=(num<<3);
if (addr>=idt.limit)
{
if (num==8)
{
/*Triple fault - reset!*/
pclog("Triple fault!\n");
// output=1;
softresetx86();
cpu_set_edx();
}
else if (num==0xD)
{
pclog("Double fault!\n");
pmodeint(8,0);
}
else
{
pclog("INT out of range\n");
x86gpf(NULL,(num*8)+2+(soft)?0:1);
}
if (output) pclog("addr >= IDT.limit\n");
return;
}
addr+=idt.base;
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(2,addr);
segdat[2]=readmemw(4,addr);
segdat[3]=readmemw(6,addr); cpl_override=0; if (cpu_state.abrt) { pclog("Abrt reading from %08X\n",addr); return; }
oaddr = addr;
if (output) pclog("Addr %08X seg %04X %04X %04X %04X\n",addr,segdat[0],segdat[1],segdat[2],segdat[3]);
if (!(segdat[2]&0x1F00))
{
// pclog("No seg\n");
if (cpu_state.eflags & VM_FLAG) /*This fires on all V86 interrupts in EMM386. Mark as expected to prevent code churn*/
x86gpf_expected(NULL,(num*8)+2);
else
x86gpf(NULL,(num*8)+2);
return;
}
if (DPL<CPL && soft)
{
//pclog("INT : DPL<CPL %04X:%08X %i %i %04X\n",CS,pc,DPL,CPL,segdat[2]);
x86gpf(NULL,(num*8)+2);
return;
}
type=segdat[2]&0x1F00;
// if (output) pclog("Gate type %04X\n",type);
switch (type)
{
case 0x600: case 0x700: case 0xE00: case 0xF00: /*Interrupt and trap gates*/
intgatesize=(type>=0x800)?32:16;
// if (output) pclog("Int gate %04X %i oldpc %04X pc %04X\n",type,intgatesize,oldpc,pc);
if (!(segdat[2]&0x8000))
{
pclog("Int gate not present\n");
x86np("Int gate not present\n", (num << 3) | 2);
return;
}
seg=segdat[1];
new_cpl = seg & 3;
// pclog("Interrupt gate : %04X:%04X%04X\n",seg,segdat[3],segdat[0]);
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X INT\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X INT %i\n",seg,gdt.limit,ins);
x86gpf(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
/* if ((seg&3) < CPL)
{
pclog("INT to higher level\n");
x86gpf(NULL,seg&~3);
return;
}*/
cpl_override=1;
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
oaddr = addr;
if (DPL2 > CPL)
{
pclog("INT to higher level 2\n");
x86gpf(NULL,seg&~3);
return;
}
//pclog("Type %04X\n",segdat2[2]);
switch (segdat2[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming*/
if (DPL2<CPL)
{
if (!(segdat2[2]&0x8000))
{
pclog("Int gate CS not present\n");
x86np("Int gate CS not present\n", segdat[1] & 0xfffc);
return;
}
if ((cpu_state.eflags & VM_FLAG) && DPL2)
{
pclog("V86 calling int gate, DPL != 0\n");
x86gpf(NULL,segdat[1]&0xFFFC);
return;
}
/*Load new stack*/
oldss=SS;
oldsp=ESP;
cpl_override=1;
if (tr.access&8)
{
addr = 4 + tr.base + (DPL2 * 8);
newss=readmemw(0,addr+4);
newsp=readmeml(0,addr);
}
else
{
addr = 2 + tr.base + (DPL2 * 4);
newss=readmemw(0,addr+2);
newsp=readmemw(0,addr);
}
cpl_override=0;
if (!(newss&~3))
{
pclog("Int gate loading null SS\n");
x86ss(NULL,newss&~3);
return;
}
addr=newss&~7;
if (newss&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X PMODEINT SS\n",newss,gdt.limit);
x86ss(NULL,newss&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X CSC\n",newss,gdt.limit);
x86ss(NULL,newss&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat3[0]=readmemw(0,addr);
segdat3[1]=readmemw(0,addr+2);
segdat3[2]=readmemw(0,addr+4);
segdat3[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) return;
if (((newss & 3) != DPL2) || (DPL3 != DPL2))
{
pclog("Int gate loading SS with wrong permissions\n");
x86ss(NULL,newss&~3);
return;
}
if ((segdat3[2]&0x1A00)!=0x1200)
{
pclog("Int gate loading SS wrong type\n");
x86ss(NULL,newss&~3);
return;
}
if (!(segdat3[2]&0x8000))
{
pclog("Int gate loading SS not present\n");
x86np("Int gate loading SS not present\n", newss & 0xfffc);
return;
}
SS=newss;
set_stack32((segdat3[3] & 0x40) ? 1 : 0);
if (stack32) ESP=newsp;
else SP=newsp;
do_seg_load(&cpu_state.seg_ss, segdat3);
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat3[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
if (output) pclog("New stack %04X:%08X\n",SS,ESP);
cpl_override=1;
if (type>=0x800)
{
// if (output) pclog("Push 32 %i\n",eflags&VM_FLAG);
if (cpu_state.eflags & VM_FLAG)
{
PUSHL(GS);
PUSHL(FS);
PUSHL(DS);
PUSHL(ES); if (cpu_state.abrt) return;
loadseg(0,&cpu_state.seg_ds);
loadseg(0,&cpu_state.seg_es);
loadseg(0,&cpu_state.seg_fs);
loadseg(0,&cpu_state.seg_gs);
}
PUSHL(oldss);
PUSHL(oldsp);
PUSHL(cpu_state.flags | (cpu_state.eflags << 16));
// if (soft) pclog("Pushl CS %08X\n", CS);
PUSHL(CS);
// if (soft) pclog("Pushl PC %08X\n", pc);
PUSHL(cpu_state.pc); if (cpu_state.abrt) return;
// if (output) pclog("32Stack %04X:%08X\n",SS,ESP);
}
else
{
// if (output) pclog("Push 16\n");
PUSHW(oldss);
PUSHW(oldsp);
PUSHW(cpu_state.flags);
// if (soft) pclog("Pushw CS %04X\n", CS);
PUSHW(CS);
// if (soft) pclog("Pushw pc %04X\n", pc);
PUSHW(cpu_state.pc); if (cpu_state.abrt) return;
// if (output) pclog("16Stack %04X:%08X\n",SS,ESP);
}
cpl_override=0;
cpu_state.seg_cs.access=0;
cycles -= timing_int_pm_outer - timing_int_pm;
// pclog("Non-confirming int gate, CS = %04X\n");
break;
}
else if (DPL2!=CPL)
{
pclog("Non-conforming int gate DPL != CPL\n");
x86gpf(NULL,seg&~3);
return;
}
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
if (!(segdat2[2]&0x8000))
{
pclog("Int gate CS not present\n");
x86np("Int gate CS not present\n", segdat[1] & 0xfffc);
return;
}
if ((cpu_state.eflags & VM_FLAG) && DPL2<CPL)
{
pclog("Int gate V86 mode DPL2<CPL\n");
x86gpf(NULL,seg&~3);
return;
}
// if (!stack_changed && ssegs) restore_stack();
if (type>0x800)
{
PUSHL(cpu_state.flags | (cpu_state.eflags << 16));
// if (soft) pclog("Pushlc CS %08X\n", CS);
PUSHL(CS);
// if (soft) pclog("Pushlc PC %08X\n", pc);
PUSHL(cpu_state.pc); if (cpu_state.abrt) return;
}
else
{
PUSHW(cpu_state.flags);
// if (soft) pclog("Pushwc CS %04X\n", CS);
PUSHW(CS);
// if (soft) pclog("Pushwc PC %04X\n", pc);
PUSHW(cpu_state.pc); if (cpu_state.abrt) return;
}
new_cpl = CS & 3;
break;
default:
pclog("Int gate CS not code segment - %04X %04X %04X %04X\n",segdat2[0],segdat2[1],segdat2[2],segdat2[3]);
x86gpf(NULL,seg&~3);
return;
}
do_seg_load(&cpu_state.seg_cs, segdat2);
CS = (seg & ~3) | new_cpl;
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~(3 << 5)) | (new_cpl << 5);
// pclog("New CS = %04X\n",CS);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
if (type>0x800) cpu_state.pc=segdat[0]|(segdat[3]<<16);
else cpu_state.pc=segdat[0];
set_use32(segdat2[3]&0x40);
// pclog("Int gate done!\n");
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, oaddr+4, segdat2[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
cpu_state.eflags &= ~VM_FLAG;
cpu_cur_status &= ~CPU_STATUS_V86;
if (!(type&0x100))
{
cpu_state.flags &= ~I_FLAG;
// pclog("INT %02X disabling interrupts %i\n",num,soft);
}
cpu_state.flags &= ~(T_FLAG|NT_FLAG);
// if (output) pclog("Final Stack %04X:%08X\n",SS,ESP);
cycles -= timing_int_pm;
break;
case 0x500: /*Task gate*/
// pclog("Task gate\n");
seg=segdat[1];
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X INT\n",seg,gdt.limit);
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X INT %i\n",seg,gdt.limit,ins);
x86gpf(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6);
cpl_override=0; if (cpu_state.abrt) return;
if (!(segdat2[2]&0x8000))
{
pclog("Int task gate not present\n");
x86np("Int task gate not present\n", segdat[1] & 0xfffc);
return;
}
optype=OPTYPE_INT;
cpl_override=1;
taskswitch286(seg,segdat2,segdat2[2]&0x800);
cpl_override=0;
break;
default:
pclog("Bad int gate type %04X %04X %04X %04X %04X\n",segdat[2]&0x1F00,segdat[0],segdat[1],segdat[2],segdat[3]);
x86gpf(NULL,seg&~3);
return;
}
}
void pmodeiret(int is32)
{
uint32_t newsp;
uint16_t newss;
uint32_t tempflags,flagmask;
uint32_t newpc;
uint16_t segdat[4],segdat2[4];
uint16_t segs[4];
uint16_t seg = 0;
uint32_t addr, oaddr;
uint32_t oldsp=ESP;
if (is386 && (cpu_state.eflags & VM_FLAG))
{
// if (output) pclog("V86 IRET\n");
if (IOPL!=3)
{
pclog("V86 IRET! IOPL!=3\n");
x86gpf(NULL,0);
return;
}
if (is32)
{
newpc=POPL();
seg=POPL();
tempflags=POPL(); if (cpu_state.abrt) return;
}
else
{
newpc=POPW();
seg=POPW();
tempflags=POPW(); if (cpu_state.abrt) return;
}
cpu_state.pc=newpc;
cpu_state.seg_cs.base=seg<<4;
cpu_state.seg_cs.limit=0xFFFF;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
CS=seg;
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempflags & 0xCFD5) | 2;
cycles -= timing_iret_rm;
return;
}
// pclog("IRET %i\n",is32);
//flushmmucache();
// if (output) pclog("Pmode IRET %04X:%04X ",CS,pc);
if (cpu_state.flags & NT_FLAG)
{
// pclog("NT IRET\n");
seg=readmemw(tr.base,0);
addr=seg&~7;
if (seg&4)
{
pclog("TS LDT %04X %04X IRET\n",seg,gdt.limit);
x86ts(NULL,seg&~3);
return;
}
else
{
if (addr>=gdt.limit)
{
pclog("TS Bigger than GDT limit %04X %04X IRET\n",seg,gdt.limit);
x86ts(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6);
taskswitch286(seg,segdat,segdat[2] & 0x800);
cpl_override=0;
return;
}
flagmask=0xFFFF;
if (CPL) flagmask&=~0x3000;
if (IOPL<CPL) flagmask&=~0x200;
// if (output) pclog("IRET %i %i %04X %i\n",CPL,IOPL,flagmask,is32);
if (is32)
{
// pclog("POP\n");
newpc=POPL();
seg=POPL();
tempflags=POPL(); if (cpu_state.abrt) { ESP = oldsp; return; }
// if (output) pclog("IRETD pop %08X %08X %08X\n",newpc,seg,tempflags);
if (is386 && ((tempflags>>16)&VM_FLAG))
{
// pclog("IRETD to V86\n");
newsp=POPL();
newss=POPL();
segs[0]=POPL();
segs[1]=POPL();
segs[2]=POPL();
segs[3]=POPL(); if (cpu_state.abrt) { ESP = oldsp; return; }
// pclog("Pop stack %04X:%04X\n",newss,newsp);
cpu_state.eflags = tempflags>>16;
cpu_cur_status |= CPU_STATUS_V86;
loadseg(segs[0],&cpu_state.seg_es);
do_seg_v86_init(&cpu_state.seg_es);
loadseg(segs[1],&cpu_state.seg_ds);
do_seg_v86_init(&cpu_state.seg_ds);
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
loadseg(segs[2],&cpu_state.seg_fs);
do_seg_v86_init(&cpu_state.seg_fs);
loadseg(segs[3],&cpu_state.seg_gs);
do_seg_v86_init(&cpu_state.seg_gs);
// pclog("V86 IRET %04X:%08X\n",SS,ESP);
// output=3;
cpu_state.pc = newpc & 0xffff;
cpu_state.seg_cs.base=seg<<4;
cpu_state.seg_cs.limit=0xFFFF;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
CS=seg;
cpu_state.seg_cs.access=(3<<5) | 2;
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
ESP=newsp;
loadseg(newss,&cpu_state.seg_ss);
do_seg_v86_init(&cpu_state.seg_ss);
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
use32=0;
cpu_cur_status &= ~CPU_STATUS_USE32;
cpu_state.flags = (tempflags&0xFFD5)|2;
cycles -= timing_iret_v86;
// pclog("V86 IRET to %04X:%04X %04X:%04X %04X %04X %04X %04X %i\n",CS,pc,SS,SP,DS,ES,FS,GS,abrt);
// if (CS==0xFFFF && pc==0xFFFFFFFF) timetolive=12;
/* {
dumpregs();
exit(-1);
}*/
return;
}
}
else
{
newpc=POPW();
seg=POPW();
tempflags=POPW(); if (cpu_state.abrt) { ESP = oldsp; return; }
}
// if (!is386) tempflags&=0xFFF;
// pclog("Returned to %04X:%08X %04X %04X %i\n",seg,newpc,flags,tempflags, ins);
if (!(seg&~3))
{
pclog("IRET CS=0\n");
ESP = oldsp;
// dumpregs();
// exit(-1);
x86gpf(NULL,0);
return;
}
// if (output) pclog("IRET %04X:%08X\n",seg,newpc);
addr=seg&~7;
if (seg&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X IRET\n",seg,gdt.limit);
ESP = oldsp;
x86gpf(NULL,seg&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X IRET\n",seg,gdt.limit);
ESP = oldsp;
x86gpf(NULL,seg&~3);
return;
}
addr+=gdt.base;
}
if ((seg&3) < CPL)
{
pclog("IRET to lower level\n");
ESP = oldsp;
x86gpf(NULL,seg&~3);
return;
}
cpl_override=1;
segdat[0]=readmemw(0,addr);
segdat[1]=readmemw(0,addr+2);
segdat[2]=readmemw(0,addr+4);
segdat[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) { ESP = oldsp; return; }
// pclog("Seg type %04X %04X\n",segdat[2]&0x1F00,segdat[2]);
switch (segdat[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming code*/
if ((seg&3) != DPL)
{
pclog("IRET NC DPL %04X %04X %04X %04X %04X\n", seg, segdat[0], segdat[1], segdat[2], segdat[3]);
ESP = oldsp;
// dumpregs();
// exit(-1);
x86gpf(NULL,seg&~3);
return;
}
break;
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming code*/
if ((seg&3) < DPL)
{
pclog("IRET C DPL\n");
ESP = oldsp;
x86gpf(NULL,seg&~3);
return;
}
break;
default:
pclog("IRET CS != code seg\n");
ESP = oldsp;
x86gpf(NULL,seg&~3);
// dumpregs();
// exit(-1);
return;
}
if (!(segdat[2]&0x8000))
{
pclog("IRET CS not present %i %04X %04X %04X\n",ins, segdat[0], segdat[1], segdat[2]);
ESP = oldsp;
x86np("IRET CS not present\n", seg & 0xfffc);
return;
}
// pclog("Seg %04X CPL %04X\n",seg,CPL);
if ((seg&3) == CPL)
{
// pclog("Same level\n");
CS=seg;
do_seg_load(&cpu_state.seg_cs, segdat);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~(3 << 5)) | ((CS & 3) << 5);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3]&0x40);
#ifdef CS_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat[2] | 0x100); /*Set accessed bit*/
cpl_override = 0;
#endif
cycles -= timing_iret_pm;
}
else /*Return to outer level*/
{
oaddr = addr;
if (output) pclog("Outer level\n");
if (is32)
{
newsp=POPL();
newss=POPL(); if (cpu_state.abrt) { ESP = oldsp; return; }
}
else
{
newsp=POPW();
newss=POPW(); if (cpu_state.abrt) { ESP = oldsp; return; }
}
if (output) pclog("IRET load stack %04X:%04X\n",newss,newsp);
if (!(newss&~3))
{
pclog("IRET loading null SS\n");
ESP = oldsp;
x86gpf(NULL,newss&~3);
return;
}
addr=newss&~7;
if (newss&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X PMODEIRET SS\n",newss,gdt.limit);
ESP = oldsp;
x86gpf(NULL,newss&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X PMODEIRET\n",newss,gdt.limit);
ESP = oldsp;
x86gpf(NULL,newss&~3);
return;
}
addr+=gdt.base;
}
cpl_override=1;
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6); cpl_override=0; if (cpu_state.abrt) { ESP = oldsp; return; }
// pclog("IRET SS sd2 %04X\n",segdat2[2]);
// if (((newss & 3) != DPL) || (DPL2 != DPL))
if ((newss & 3) != (seg & 3))
{
pclog("IRET loading SS with wrong permissions %04X %04X\n", newss, seg);
ESP = oldsp;
// dumpregs();
// exit(-1);
x86gpf(NULL,newss&~3);
return;
}
if ((segdat2[2]&0x1A00)!=0x1200)
{
pclog("IRET loading SS wrong type\n");
ESP = oldsp;
x86gpf(NULL,newss&~3);
return;
}
if (DPL2 != (seg & 3))
{
pclog("IRET loading SS with wrong permissions2 %i %i %04X %04X\n", DPL2, seg & 3, newss, seg);
ESP = oldsp;
x86gpf(NULL,newss&~3);
return;
}
if (!(segdat2[2]&0x8000))
{
pclog("IRET loading SS not present\n");
ESP = oldsp;
x86np("IRET loading SS not present\n", newss & 0xfffc);
return;
}
SS=newss;
set_stack32((segdat2[3] & 0x40) ? 1 : 0);
if (stack32) ESP=newsp;
else SP=newsp;
do_seg_load(&cpu_state.seg_ss, segdat2);
#ifdef SEL_ACCESSED
cpl_override = 1;
writememw(0, addr+4, segdat2[2] | 0x100); /*Set accessed bit*/
#ifdef CS_ACCESSED
writememw(0, oaddr+4, segdat[2] | 0x100); /*Set accessed bit*/
#endif
cpl_override = 0;
#endif
/*Conforming segments don't change CPL, so CPL = RPL*/
if (segdat[2]&0x400)
segdat[2] = (segdat[2] & ~(3 << (5+8))) | ((seg & 3) << (5+8));
CS=seg;
do_seg_load(&cpu_state.seg_cs, segdat);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~(3 << 5)) | ((CS & 3) << 5);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat[3] & 0x40);
check_seg_valid(&cpu_state.seg_ds);
check_seg_valid(&cpu_state.seg_es);
check_seg_valid(&cpu_state.seg_fs);
check_seg_valid(&cpu_state.seg_gs);
cycles -= timing_iret_pm_outer;
}
cpu_state.pc=newpc;
cpu_state.flags = (cpu_state.flags&~flagmask) | (tempflags&flagmask&0xFFD5)|2;
if (is32) cpu_state.eflags = tempflags>>16;
// pclog("done\n");
}
void taskswitch286(uint16_t seg, uint16_t *segdat, int is32)
{
uint32_t base;
uint32_t limit;
uint32_t templ;
uint16_t tempw;
uint32_t new_cr3=0;
uint16_t new_es,new_cs,new_ss,new_ds,new_fs,new_gs;
uint16_t new_ldt;
uint32_t new_eax,new_ebx,new_ecx,new_edx,new_esp,new_ebp,new_esi,new_edi,new_pc,new_flags;
uint32_t addr;
uint16_t segdat2[4];
//output=3;
base=segdat[1]|((segdat[2]&0xFF)<<16);
limit=segdat[0];
if(is386)
{
base |= (segdat[3]>>8)<<24;
limit |= (segdat[3]&0xF)<<16;
}
// pclog("286 Task switch! %04X:%04X\n",CS,pc);
/// pclog("TSS %04X base %08X limit %04X old TSS %04X %08X %i\n",seg,base,limit,tr.seg,tr.base,ins);
// / pclog("%04X %04X %04X %04X\n",segdat[0],segdat[1],segdat[2],segdat[3]);
if (is32)
{
// if (output) pclog("32-bit TSS\n");
if (limit < 103)
{
pclog("32-bit TSS %04X limit less than 103.\n", seg);
x86ts(NULL, seg);
return;
}
if (optype==JMP || optype==CALL || optype==OPTYPE_INT)
{
if (tr.seg&4) tempw=readmemw(ldt.base,(seg&~7)+4);
else tempw=readmemw(gdt.base,(seg&~7)+4);
if (cpu_state.abrt) return;
tempw|=0x200;
if (tr.seg&4) writememw(ldt.base,(seg&~7)+4,tempw);
else writememw(gdt.base,(seg&~7)+4,tempw);
}
if (cpu_state.abrt) return;
if (optype==IRET) cpu_state.flags&=~NT_FLAG;
// if (output) pclog("Write PC %08X %08X\n",tr.base,pc);
cpu_386_flags_rebuild();
writememl(tr.base,0x1C,cr3);
writememl(tr.base,0x20,cpu_state.pc);
writememl(tr.base,0x24,cpu_state.flags | (cpu_state.eflags<<16));
writememl(tr.base,0x28,EAX);
writememl(tr.base,0x2C,ECX);
writememl(tr.base,0x30,EDX);
writememl(tr.base,0x34,EBX);
writememl(tr.base,0x38,ESP);
writememl(tr.base,0x3C,EBP);
writememl(tr.base,0x40,ESI);
writememl(tr.base,0x44,EDI);
writememl(tr.base,0x48,ES);
// if (output) pclog("Write CS %04X to %08X\n",CS,tr.base+0x4C);
writememl(tr.base,0x4C,CS);
writememl(tr.base,0x50,SS);
writememl(tr.base,0x54,DS);
writememl(tr.base,0x58,FS);
writememl(tr.base,0x5C,GS);
if (optype==JMP || optype==IRET)
{
if (tr.seg&4) tempw=readmemw(ldt.base,(tr.seg&~7)+4);
else tempw=readmemw(gdt.base,(tr.seg&~7)+4);
if (cpu_state.abrt) return;
tempw&=~0x200;
if (tr.seg&4) writememw(ldt.base,(tr.seg&~7)+4,tempw);
else writememw(gdt.base,(tr.seg&~7)+4,tempw);
}
if (cpu_state.abrt) return;
if (optype==OPTYPE_INT || optype==CALL)
{
writememl(base,0,tr.seg);
if (cpu_state.abrt)
return;
}
new_cr3=readmeml(base,0x1C);
new_pc=readmeml(base,0x20);
new_flags=readmeml(base,0x24);
if (optype == OPTYPE_INT || optype == CALL)
new_flags |= NT_FLAG;
new_eax=readmeml(base,0x28);
new_ecx=readmeml(base,0x2C);
new_edx=readmeml(base,0x30);
new_ebx=readmeml(base,0x34);
new_esp=readmeml(base,0x38);
new_ebp=readmeml(base,0x3C);
new_esi=readmeml(base,0x40);
new_edi=readmeml(base,0x44);
new_es=readmemw(base,0x48);
// if (output) pclog("Read CS from %08X\n",base+0x4C);
new_cs=readmemw(base,0x4C);
new_ss=readmemw(base,0x50);
new_ds=readmemw(base,0x54);
new_fs=readmemw(base,0x58);
new_gs=readmemw(base,0x5C);
new_ldt=readmemw(base,0x60);
cr0 |= 8;
cr3=new_cr3;
// pclog("TS New CR3 %08X\n",cr3);
flushmmucache();
cpu_state.pc=new_pc;
// if (output) pclog("New pc %08X\n",new_pc);
cpu_state.flags = new_flags;
cpu_state.eflags = new_flags>>16;
cpu_386_flags_extract();
// if (output) pclog("Load LDT %04X\n",new_ldt);
ldt.seg=new_ldt;
templ=(ldt.seg&~7)+gdt.base;
// if (output) pclog("Load from %08X %08X\n",templ,gdt.base);
ldt.limit=readmemw(0,templ);
if (readmemb(0,templ+6)&0x80)
{
ldt.limit<<=12;
ldt.limit|=0xFFF;
}
ldt.base=(readmemw(0,templ+2))|(readmemb(0,templ+4)<<16)|(readmemb(0,templ+7)<<24);
// if (output) pclog("Limit %04X Base %08X\n",ldt.limit,ldt.base);
if (cpu_state.eflags & VM_FLAG)
{
loadcs(new_cs);
set_use32(0);
cpu_cur_status |= CPU_STATUS_V86;
}
else
{
if (!(new_cs&~3))
{
pclog("TS loading null CS\n");
x86ts(NULL,0);
return;
}
addr=new_cs&~7;
if (new_cs&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X %04X TS\n",new_cs,ldt.limit,addr);
x86ts(NULL,new_cs&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X TS\n",new_cs,gdt.limit);
x86ts(NULL,new_cs&~3);
return;
}
addr+=gdt.base;
}
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6);
if (!(segdat2[2]&0x8000))
{
pclog("TS loading CS not present\n");
x86np("TS loading CS not present\n", new_cs & 0xfffc);
return;
}
switch (segdat2[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming*/
if ((new_cs&3) != DPL2)
{
pclog("TS load CS non-conforming RPL != DPL");
x86ts(NULL,new_cs&~3);
return;
}
break;
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
if ((new_cs&3) < DPL2)
{
pclog("TS load CS non-conforming RPL < DPL");
x86ts(NULL,new_cs&~3);
return;
}
break;
default:
pclog("TS load CS not code segment\n");
x86ts(NULL,new_cs&~3);
return;
}
// if (output) pclog("new_cs %04X\n",new_cs);
CS=new_cs;
do_seg_load(&cpu_state.seg_cs, segdat2);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(segdat2[3] & 0x40);
cpu_cur_status &= ~CPU_STATUS_V86;
}
EAX=new_eax;
ECX=new_ecx;
EDX=new_edx;
EBX=new_ebx;
ESP=new_esp;
EBP=new_ebp;
ESI=new_esi;
EDI=new_edi;
if (output) pclog("Load ES %04X\n",new_es);
loadseg(new_es,&cpu_state.seg_es);
if (output) pclog("Load SS %04X\n",new_ss);
loadseg(new_ss,&cpu_state.seg_ss);
if (output) pclog("Load DS %04X\n",new_ds);
loadseg(new_ds,&cpu_state.seg_ds);
if (output) pclog("Load FS %04X\n",new_fs);
loadseg(new_fs,&cpu_state.seg_fs);
if (output) pclog("Load GS %04X\n",new_gs);
loadseg(new_gs,&cpu_state.seg_gs);
if (output) pclog("Resuming at %04X:%08X\n",CS,cpu_state.pc);
}
else
{
// pclog("16-bit TSS\n");
// resetx86();
if (limit < 43)
{
pclog("16-bit TSS %04X limit less than 43.\n", seg);
x86ts(NULL, seg);
return;
}
if (optype==JMP || optype==CALL || optype==OPTYPE_INT)
{
if (tr.seg&4) tempw=readmemw(ldt.base,(seg&~7)+4);
else tempw=readmemw(gdt.base,(seg&~7)+4);
if (cpu_state.abrt) return;
tempw|=0x200;
if (tr.seg&4) writememw(ldt.base,(seg&~7)+4,tempw);
else writememw(gdt.base,(seg&~7)+4,tempw);
}
if (cpu_state.abrt) return;
if (optype == IRET)
cpu_state.flags &= ~NT_FLAG;
// if (output) pclog("Write PC %08X %08X\n",tr.base,pc);
cpu_386_flags_rebuild();
writememw(tr.base,0x0E,cpu_state.pc);
writememw(tr.base,0x10,cpu_state.flags);
writememw(tr.base,0x12,AX);
writememw(tr.base,0x14,CX);
writememw(tr.base,0x16,DX);
writememw(tr.base,0x18,BX);
writememw(tr.base,0x1A,SP);
writememw(tr.base,0x1C,BP);
writememw(tr.base,0x1E,SI);
writememw(tr.base,0x20,DI);
writememw(tr.base,0x22,ES);
// if (output) pclog("Write CS %04X to %08X\n",CS,tr.base+0x4C);
writememw(tr.base,0x24,CS);
writememw(tr.base,0x26,SS);
writememw(tr.base,0x28,DS);
if (optype==JMP || optype==IRET)
{
if (tr.seg&4) tempw=readmemw(ldt.base,(tr.seg&~7)+4);
else tempw=readmemw(gdt.base,(tr.seg&~7)+4);
if (cpu_state.abrt) return;
tempw&=~0x200;
if (tr.seg&4) writememw(ldt.base,(tr.seg&~7)+4,tempw);
else writememw(gdt.base,(tr.seg&~7)+4,tempw);
}
if (cpu_state.abrt) return;
if (optype==OPTYPE_INT || optype==CALL)
{
writememw(base,0,tr.seg);
if (cpu_state.abrt)
return;
}
new_pc=readmemw(base,0x0E);
new_flags=readmemw(base,0x10);
if (optype == OPTYPE_INT || optype == CALL)
new_flags |= NT_FLAG;
new_eax=readmemw(base,0x12);
new_ecx=readmemw(base,0x14);
new_edx=readmemw(base,0x16);
new_ebx=readmemw(base,0x18);
new_esp=readmemw(base,0x1A);
new_ebp=readmemw(base,0x1C);
new_esi=readmemw(base,0x1E);
new_edi=readmemw(base,0x20);
new_es=readmemw(base,0x22);
// if (output) pclog("Read CS from %08X\n",base+0x4C);
new_cs=readmemw(base,0x24);
new_ss=readmemw(base,0x26);
new_ds=readmemw(base,0x28);
new_ldt=readmemw(base,0x2A);
msw |= 8;
cpu_state.pc=new_pc;
// if (output) pclog("New pc %08X\n",new_pc);
cpu_state.flags = new_flags;
cpu_386_flags_extract();
// if (output) pclog("Load LDT %04X\n",new_ldt);
ldt.seg=new_ldt;
templ=(ldt.seg&~7)+gdt.base;
// if (output) pclog("Load from %08X %08X\n",templ,gdt.base);
ldt.limit=readmemw(0,templ);
ldt.base=(readmemw(0,templ+2))|(readmemb(0,templ+4)<<16);
if (is386)
{
if (readmemb(0,templ+6)&0x80)
{
ldt.limit<<=12;
ldt.limit|=0xFFF;
}
ldt.base|=(readmemb(0,templ+7)<<24);
}
// if (output) pclog("Limit %04X Base %08X\n",ldt.limit,ldt.base);
if (!(new_cs&~3))
{
pclog("TS loading null CS\n");
x86ts(NULL,0);
return;
}
addr=new_cs&~7;
if (new_cs&4)
{
if (addr>=ldt.limit)
{
pclog("Bigger than LDT limit %04X %04X %04X TS\n",new_cs,ldt.limit,addr);
x86ts(NULL,new_cs&~3);
return;
}
addr+=ldt.base;
}
else
{
if (addr>=gdt.limit)
{
pclog("Bigger than GDT limit %04X %04X TS\n",new_cs,gdt.limit);
x86ts(NULL,new_cs&~3);
return;
}
addr+=gdt.base;
}
segdat2[0]=readmemw(0,addr);
segdat2[1]=readmemw(0,addr+2);
segdat2[2]=readmemw(0,addr+4);
segdat2[3]=readmemw(0,addr+6);
if (!(segdat2[2]&0x8000))
{
pclog("TS loading CS not present\n");
x86np("TS loading CS not present\n", new_cs & 0xfffc);
return;
}
switch (segdat2[2]&0x1F00)
{
case 0x1800: case 0x1900: case 0x1A00: case 0x1B00: /*Non-conforming*/
if ((new_cs&3) != DPL2)
{
pclog("TS load CS non-conforming RPL != DPL");
x86ts(NULL,new_cs&~3);
return;
}
break;
case 0x1C00: case 0x1D00: case 0x1E00: case 0x1F00: /*Conforming*/
if ((new_cs&3) < DPL2)
{
pclog("TS load CS non-conforming RPL < DPL");
x86ts(NULL,new_cs&~3);
return;
}
break;
default:
pclog("TS load CS not code segment\n");
x86ts(NULL,new_cs&~3);
return;
}
// if (output) pclog("new_cs %04X\n",new_cs);
CS=new_cs;
do_seg_load(&cpu_state.seg_cs, segdat2);
if (CPL==3 && oldcpl!=3) flushmmucache_cr3();
oldcpl = CPL;
set_use32(0);
EAX=new_eax | 0xFFFF0000;
ECX=new_ecx | 0xFFFF0000;
EDX=new_edx | 0xFFFF0000;
EBX=new_ebx | 0xFFFF0000;
ESP=new_esp | 0xFFFF0000;
EBP=new_ebp | 0xFFFF0000;
ESI=new_esi | 0xFFFF0000;
EDI=new_edi | 0xFFFF0000;
if (output) pclog("Load ES %04X\n",new_es);
loadseg(new_es,&cpu_state.seg_es);
if (output) pclog("Load SS %04X\n",new_ss);
loadseg(new_ss,&cpu_state.seg_ss);
if (output) pclog("Load DS %04X\n",new_ds);
loadseg(new_ds,&cpu_state.seg_ds);
if (is386)
{
loadseg(0,&cpu_state.seg_fs);
loadseg(0,&cpu_state.seg_gs);
}
if (output) pclog("Resuming at %04X:%08X\n",CS,cpu_state.pc);
//exit(-1);
}
tr.seg=seg;
tr.base=base;
tr.limit=limit;
tr.access=segdat[2]>>8;
}
void sysenter(void)
{
cpu_state.eflags &= ~VM_FLAG;
cpu_state.flags &= ~I_FLAG;
ESP = sysenter_esp;
cpu_state.pc = sysenter_eip;
cpu_state.seg_cs.seg = sysenter_cs & 0xfffc;
cpu_state.seg_cs.base = 0;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = 0x1b;
cpu_state.seg_cs.checked = 1;
oldcpl = 0;
cpu_state.seg_ss.seg = (sysenter_cs & 0xfffc) + 8;
cpu_state.seg_ss.base = 0;
cpu_state.seg_ss.limit_low = 0;
cpu_state.seg_ss.limit = 0xffffffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0x13;
cpu_state.seg_ss.checked = 1;
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
set_use32(1);
set_stack32(1);
// pclog("syscall to %04x:%08x %04x:%08x\n", CS, cpu_state.pc, SS, ESP);
}
void sysexit(void)
{
ESP = ECX;
cpu_state.pc = EDX;
cpu_state.seg_cs.seg = (sysenter_cs | 3) + 16;
cpu_state.seg_cs.base = 0;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = 0x7b;
cpu_state.seg_cs.checked = 1;
oldcpl = 3;
cpu_state.seg_ss.seg = (sysenter_cs | 3) + 24;
cpu_state.seg_ss.base = 0;
cpu_state.seg_ss.limit_low = 0;
cpu_state.seg_ss.limit = 0xffffffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0x73;
cpu_state.seg_ss.checked = 1;
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
flushmmucache_cr3();
set_use32(1);
set_stack32(1);
// pclog("sysexit to %04x:%08x %04x:%08x\n", CS, cpu_state.pc, SS, ESP);
}
void x86_smi_trigger(void)
{
cpu_state.smi_pending = 1;
}
static void smi_write_descriptor_cache(uint32_t addr, x86seg *seg)
{
writememl(0, addr + 8, seg->seg | (seg->access << 16) | (seg->access2 << 24));
writememl(0, addr + 4, seg->base);
writememl(0, addr, seg->limit);
}
static void smi_load_descriptor_cache(uint32_t addr, x86seg *seg)
{
uint32_t temp;
temp = readmeml(0, addr + 8);
seg->base = readmeml(0, addr + 4);
seg->limit = readmeml(0, addr);
seg->seg = temp & 0xffff;
seg->access = temp >> 16;
seg->access2 = temp >> 24;
if ((seg->access & 0x18) != 0x10 || !(seg->access & (1 << 2))) /*expand-down*/
{
seg->limit_high = seg->limit;
seg->limit_low = 0;
}
else
{
seg->limit_high = (seg->access2 & 0x40) ? 0xffffffff : 0xffff;
seg->limit_low = seg->limit + 1;
}
}
static void smi_load_smi_selector(x86seg *seg)
{
seg->seg = 0;
seg->base = 0;
seg->limit = 0xffffffff;
seg->limit_low = 0;
seg->limit_high = 0xffffffff;
seg->access = (3 << 5) | 2;
seg->access2 = 0;
}
void cyrix_write_seg_descriptor(uint32_t addr, x86seg *seg)
{
writememl(0, addr, (seg->limit_raw & 0xffff) | (seg->base << 16));
writememl(0, addr+4, ((seg->base >> 16) & 0xff) |
(seg->access << 8) |
(seg->limit_raw & 0xf0000) |
(seg->access2 << 16) |
(seg->base & 0xff000000));
}
void cyrix_load_seg_descriptor(uint32_t addr, x86seg *seg)
{
uint16_t segdat[4], selector;
segdat[0] = readmemw(0, addr);
segdat[1] = readmemw(0, addr+2);
segdat[2] = readmemw(0, addr+4);
segdat[3] = readmemw(0, addr+6);
selector = readmemw(0, addr+8);
if (!cpu_state.abrt)
{
do_seg_load(seg, segdat);
seg->seg = selector;
seg->checked = 0;
if (seg == &cpu_state.seg_ds)
{
if (seg->base == 0 && seg->limit_low == 0 && seg->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
codegen_flat_ds = 0;
}
if (seg == &cpu_state.seg_ss)
{
if (seg->base == 0 && seg->limit_low == 0 && seg->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
set_stack32((segdat[3] & 0x40) ? 1 : 0);
codegen_flat_ss = 0;
}
}
}
void x86_smi_enter(void)
{
uint32_t old_cr0 = cr0;
if (smram_enable)
smram_enable();
flushmmucache();
cpu_386_flags_rebuild();
cpl_override = 1;
cr0 = 0; /*Disable MMU*/
if (cpu_iscyrix)
{
uint32_t base;
if (!cyrix.smhr & SMHR_VALID)
cyrix.smhr = (cyrix.arr[3].base + cyrix.arr[3].size) | SMHR_VALID;
base = cyrix.smhr & SMHR_ADDR_MASK;
writememl(0, base-4, dr[7]);
writememl(0, base-8, cpu_state.flags | (cpu_state.eflags << 16));
writememl(0, base-0xc, old_cr0);
writememl(0, base-0x10, cpu_state.oldpc);
writememl(0, base-0x14, cpu_state.pc);
writememl(0, base-0x18, CS | (CPL << 21));
cyrix_write_seg_descriptor(base-0x20, &cpu_state.seg_cs);
writememl(0, base-0x24, 0);
cpl_override = 0;
cpu_cur_status = CPU_STATUS_SMM;
cpu_state.flags = 2;
cpu_state.eflags = 0;
cpu_state.pc = 0;
cr0 &= ~((1 << 0) | (1 << 2) | (1 << 3) | (1 << 31));
dr[7] = 0x400;
cpu_state.seg_cs.seg = cyrix.arr[3].base >> 4; /*Guess*/
cpu_state.seg_cs.base = cyrix.arr[3].base;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = (0 << 5) | 2;
use32 = 0;
stack32 = 0;
}
else
{
writememl(0, cpu_state.smbase + 0x8000 + 0x7ffc, old_cr0);
writememl(0, cpu_state.smbase + 0x8000 + 0x7ff8, cr3);
writememl(0, cpu_state.smbase + 0x8000 + 0x7ff4, cpu_state.flags | (cpu_state.eflags << 16));
writememl(0, cpu_state.smbase + 0x8000 + 0x7ff0, cpu_state.pc);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fec, EDI);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fe8, ESI);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fe4, EBP);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fe0, ESP);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fdc, EBX);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fd8, EDX);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fd4, ECX);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fd0, EAX);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fcc, dr[6]);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fc8, dr[7]);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fc4, tr.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fc0, ldt.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fbc, cpu_state.seg_gs.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fb8, cpu_state.seg_fs.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fb4, cpu_state.seg_ds.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fb0, cpu_state.seg_ss.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fac, cpu_state.seg_cs.seg);
writememl(0, cpu_state.smbase + 0x8000 + 0x7fa8, cpu_state.seg_es.seg);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f9c, &tr);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f90, &idt);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f84, &gdt);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f78, &ldt);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f6c, &cpu_state.seg_gs);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f60, &cpu_state.seg_fs);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f54, &cpu_state.seg_ds);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f48, &cpu_state.seg_ss);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f3c, &cpu_state.seg_cs);
smi_write_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f30, &cpu_state.seg_es);
writememl(0, cpu_state.smbase + 0x8000 + 0x7f28, cr4);
writememl(0, cpu_state.smbase + 0x8000 + 0x7ef8, cpu_state.smbase);
writememl(0, cpu_state.smbase + 0x8000 + 0x7efc, 0x00020000);
cpl_override = 0;
cpu_cur_status = CPU_STATUS_SMM;
cpu_state.flags = 2;
cpu_state.eflags = 0;
cpu_state.pc = 0x8000;
cr0 &= ~((1 << 0) | (1 << 2) | (1 << 3) | (1 << 31));
dr[7] = 0x400;
smi_load_smi_selector(&cpu_state.seg_ds);
smi_load_smi_selector(&cpu_state.seg_es);
smi_load_smi_selector(&cpu_state.seg_fs);
smi_load_smi_selector(&cpu_state.seg_gs);
smi_load_smi_selector(&cpu_state.seg_ss);
cpu_state.seg_cs.seg = 0x3000;
cpu_state.seg_cs.base = cpu_state.smbase;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = (0 << 5) | 2;
use32 = 0;
stack32 = 0;
}
oldcpl = 0;
// pclog("x86_smi_enter\n");
}
void x86_smi_leave(void)
{
uint32_t temp;
uint32_t new_cr0;
if (cpu_iscyrix)
{
uint32_t base = cyrix.smhr & SMHR_ADDR_MASK;
cpl_override = 1;
dr[7] = readmeml(0, base-4);
temp = readmeml(0, base-8);
cpu_state.flags = temp & 0xffff;
cpu_state.eflags = temp >> 16;
new_cr0 = readmeml(0, base-0xc);
cpu_state.pc = readmeml(0, base-0x14);
cyrix_load_seg_descriptor(base-0x20, &cpu_state.seg_cs);
cpl_override = 0;
cr0 = new_cr0;
}
else
{
cpl_override = 1;
new_cr0 = readmeml(0, cpu_state.smbase + 0x8000 + 0x7ffc);
cr3 = readmeml(0, cpu_state.smbase + 0x8000 + 0x7ff8);
temp = readmeml(0, cpu_state.smbase + 0x8000 + 0x7ff4);
cpu_state.flags = temp & 0xffff;
cpu_state.eflags = temp >> 16;
cpu_state.pc = readmeml(0, cpu_state.smbase + 0x8000 + 0x7ff0);
EDI = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fec);
ESI = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fe8);
EBP = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fe4);
ESP = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fe0);
EBX = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fdc);
EDX = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fd8);
ECX = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fd4);
EAX = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fd0);
dr[6] = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fcc);
dr[7] = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fc8);
tr.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fc4);
ldt.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fc0);
cpu_state.seg_gs.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fbc);
cpu_state.seg_fs.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fb8);
cpu_state.seg_ds.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fb4);
cpu_state.seg_ss.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fb0);
cpu_state.seg_cs.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fac);
cpu_state.seg_es.seg = readmeml(0, cpu_state.smbase + 0x8000 + 0x7fa8);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f9c, &tr);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f90, &idt);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f84, &gdt);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f78, &ldt);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f6c, &cpu_state.seg_gs);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f60, &cpu_state.seg_fs);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f54, &cpu_state.seg_ds);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f48, &cpu_state.seg_ss);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f3c, &cpu_state.seg_cs);
smi_load_descriptor_cache(cpu_state.smbase + 0x8000 + 0x7f30, &cpu_state.seg_es);
cr4 = readmeml(0, cpu_state.smbase + 0x8000 + 0x7f28);
cpu_state.smbase = readmeml(0, cpu_state.smbase + 0x8000 + 0x7ef8);
cpl_override = 0;
cr0 = new_cr0;
}
cpu_386_flags_extract();
cpu_cur_status = 0;
use32 = stack32 = 0;
if (cr0 & 1)
{
cpu_cur_status |= CPU_STATUS_PMODE;
if (cpu_state.eflags & VM_FLAG)
cpu_cur_status |= CPU_STATUS_V86;
else
{
if (cpu_state.seg_cs.access2 & 0x40)
{
cpu_cur_status |= CPU_STATUS_USE32;
use32 = 0x300;
}
if (cpu_state.seg_ss.access2 & 0x40)
{
cpu_cur_status |= CPU_STATUS_STACK32;
stack32 = 1;
}
}
}
if (!(cpu_state.seg_ds.base == 0 && cpu_state.seg_ds.limit_low == 0 && cpu_state.seg_ds.limit_high == 0xffffffff))
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
if (!(cpu_state.seg_ss.base == 0 && cpu_state.seg_ss.limit_low == 0 && cpu_state.seg_ss.limit_high == 0xffffffff))
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
if (smram_disable)
smram_disable();
flushmmucache();
oldcpl = CPL;
// pclog("x86_smi_leave\n");
}
|
372156.c | /*
* Support routine for configuring link layer address
*/
#include "syshead.h"
#include "error.h"
#include "misc.h"
int set_lladdr(const char *ifname, const char *lladdr,
const struct env_set *es)
{
struct argv argv = argv_new ();
int r;
if (!ifname || !lladdr)
return -1;
#if defined(TARGET_LINUX)
#ifdef CONFIG_FEATURE_IPROUTE
argv_printf (&argv,
"%s link set addr %s dev %s",
iproute_path, lladdr, ifname);
#else
argv_printf (&argv,
"%s %s hw ether %s",
IFCONFIG_PATH,
ifname, lladdr);
#endif
#elif defined(TARGET_SOLARIS)
argv_printf (&argv,
"%s %s ether %s",
IFCONFIG_PATH,
ifname, lladdr);
#elif defined(TARGET_OPENBSD)
argv_printf (&argv,
"%s %s lladdr %s",
IFCONFIG_PATH,
ifname, lladdr);
#elif defined(TARGET_DARWIN)
argv_printf (&argv,
"%s %s lladdr %s",
IFCONFIG_PATH,
ifname, lladdr);
#elif defined(TARGET_FREEBSD)
argv_printf (&argv,
"%s %s ether %s",
IFCONFIG_PATH,
ifname, lladdr);
#else
msg (M_WARN, "Sorry, but I don't know how to configure link layer addresses on this operating system.");
return -1;
#endif
argv_msg (M_INFO, &argv);
r = openvpn_execve_check (&argv, es, M_WARN, "ERROR: Unable to set link layer address.");
if (r)
msg (M_INFO, "TUN/TAP link layer address set to %s", lladdr);
argv_reset (&argv);
return r;
}
|
355610.c | /*
* CD Graphics Video Decoder
* Copyright (c) 2009 Michael Tison
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "bytestream.h"
#include "internal.h"
/**
* @file
* @brief CD Graphics Video Decoder
* @author Michael Tison
* @see http://wiki.multimedia.cx/index.php?title=CD_Graphics
* @see http://www.ccs.neu.edu/home/bchafy/cdb/info/cdg
*/
/// default screen sizes
#define CDG_FULL_WIDTH 300
#define CDG_FULL_HEIGHT 216
#define CDG_DISPLAY_WIDTH 294
#define CDG_DISPLAY_HEIGHT 204
#define CDG_BORDER_WIDTH 6
#define CDG_BORDER_HEIGHT 12
/// masks
#define CDG_COMMAND 0x09
#define CDG_MASK 0x3F
/// instruction codes
#define CDG_INST_MEMORY_PRESET 1
#define CDG_INST_BORDER_PRESET 2
#define CDG_INST_TILE_BLOCK 6
#define CDG_INST_SCROLL_PRESET 20
#define CDG_INST_SCROLL_COPY 24
#define CDG_INST_TRANSPARENT_COL 28
#define CDG_INST_LOAD_PAL_LO 30
#define CDG_INST_LOAD_PAL_HIGH 31
#define CDG_INST_TILE_BLOCK_XOR 38
/// data sizes
#define CDG_PACKET_SIZE 24
#define CDG_DATA_SIZE 16
#define CDG_TILE_HEIGHT 12
#define CDG_TILE_WIDTH 6
#define CDG_MINIMUM_PKT_SIZE 6
#define CDG_MINIMUM_SCROLL_SIZE 3
#define CDG_HEADER_SIZE 8
#define CDG_PALETTE_SIZE 16
typedef struct CDGraphicsContext {
AVFrame *frame;
int hscroll;
int vscroll;
int transparency;
} CDGraphicsContext;
static av_cold int cdg_decode_init(AVCodecContext *avctx)
{
CDGraphicsContext *cc = avctx->priv_data;
cc->frame = av_frame_alloc();
if (!cc->frame)
return AVERROR(ENOMEM);
cc->transparency = -1;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
return ff_set_dimensions(avctx, CDG_FULL_WIDTH, CDG_FULL_HEIGHT);
}
static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data)
{
int y;
int lsize = cc->frame->linesize[0];
uint8_t *buf = cc->frame->data[0];
int color = data[0] & 0x0F;
if (!(data[1] & 0x0F)) {
/// fill the top and bottom borders
memset(buf, color, CDG_BORDER_HEIGHT * lsize);
memset(buf + (CDG_FULL_HEIGHT - CDG_BORDER_HEIGHT) * lsize,
color, CDG_BORDER_HEIGHT * lsize);
/// fill the side borders
for (y = CDG_BORDER_HEIGHT; y < CDG_FULL_HEIGHT - CDG_BORDER_HEIGHT; y++) {
memset(buf + y * lsize, color, CDG_BORDER_WIDTH);
memset(buf + CDG_FULL_WIDTH - CDG_BORDER_WIDTH + y * lsize,
color, CDG_BORDER_WIDTH);
}
}
}
static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low)
{
uint8_t r, g, b;
uint16_t color;
int i;
int array_offset = low ? 0 : 8;
uint32_t *palette = (uint32_t *) cc->frame->data[1];
for (i = 0; i < 8; i++) {
color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F);
r = ((color >> 8) & 0x000F) * 17;
g = ((color >> 4) & 0x000F) * 17;
b = ((color ) & 0x000F) * 17;
palette[i + array_offset] = 0xFFU << 24 | r << 16 | g << 8 | b;
if (cc->transparency >= 0)
palette[cc->transparency] &= 0xFFFFFF;
}
cc->frame->palette_has_changed = 1;
}
static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
{
unsigned ci, ri;
int color;
int x, y;
int ai;
int stride = cc->frame->linesize[0];
uint8_t *buf = cc->frame->data[0];
ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll;
ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll;
if (ri > (CDG_FULL_HEIGHT - CDG_TILE_HEIGHT))
return AVERROR(EINVAL);
if (ci > (CDG_FULL_WIDTH - CDG_TILE_WIDTH))
return AVERROR(EINVAL);
for (y = 0; y < CDG_TILE_HEIGHT; y++) {
for (x = 0; x < CDG_TILE_WIDTH; x++) {
if (!((data[4 + y] >> (5 - x)) & 0x01))
color = data[0] & 0x0F;
else
color = data[1] & 0x0F;
ai = ci + x + (stride * (ri + y));
if (b)
color ^= buf[ai];
buf[ai] = color;
}
}
return 0;
}
#define UP 2
#define DOWN 1
#define LEFT 2
#define RIGHT 1
static void cdg_copy_rect_buf(int out_tl_x, int out_tl_y, uint8_t *out,
int in_tl_x, int in_tl_y, uint8_t *in,
int w, int h, int stride)
{
int y;
in += in_tl_x + in_tl_y * stride;
out += out_tl_x + out_tl_y * stride;
for (y = 0; y < h; y++)
memcpy(out + y * stride, in + y * stride, w);
}
static void cdg_fill_rect_preset(int tl_x, int tl_y, uint8_t *out,
int color, int w, int h, int stride)
{
int y;
for (y = tl_y; y < tl_y + h; y++)
memset(out + tl_x + y * stride, color, w);
}
static void cdg_fill_wrapper(int out_tl_x, int out_tl_y, uint8_t *out,
int in_tl_x, int in_tl_y, uint8_t *in,
int color, int w, int h, int stride, int roll)
{
if (roll) {
cdg_copy_rect_buf(out_tl_x, out_tl_y, out, in_tl_x, in_tl_y,
in, w, h, stride);
} else {
cdg_fill_rect_preset(out_tl_x, out_tl_y, out, color, w, h, stride);
}
}
static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
AVFrame *new_frame, int roll_over)
{
int color;
int hscmd, h_off, hinc, vscmd, v_off, vinc;
int y;
int stride = cc->frame->linesize[0];
uint8_t *in = cc->frame->data[0];
uint8_t *out = new_frame->data[0];
color = data[0] & 0x0F;
hscmd = (data[1] & 0x30) >> 4;
vscmd = (data[2] & 0x30) >> 4;
h_off = FFMIN(data[1] & 0x07, CDG_BORDER_WIDTH - 1);
v_off = FFMIN(data[2] & 0x0F, CDG_BORDER_HEIGHT - 1);
/// find the difference and save the offset for cdg_tile_block usage
hinc = h_off - cc->hscroll;
vinc = v_off - cc->vscroll;
cc->hscroll = h_off;
cc->vscroll = v_off;
if (vscmd == UP)
vinc -= 12;
if (vscmd == DOWN)
vinc += 12;
if (hscmd == LEFT)
hinc -= 6;
if (hscmd == RIGHT)
hinc += 6;
if (!hinc && !vinc)
return;
memcpy(new_frame->data[1], cc->frame->data[1], CDG_PALETTE_SIZE * 4);
for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++)
memcpy(out + FFMAX(0, hinc) + stride * y,
in + FFMAX(0, hinc) - hinc + (y - vinc) * stride,
FFMIN(stride + hinc, stride));
if (vinc > 0)
cdg_fill_wrapper(0, 0, out,
0, CDG_FULL_HEIGHT - vinc, in, color,
stride, vinc, stride, roll_over);
else if (vinc < 0)
cdg_fill_wrapper(0, CDG_FULL_HEIGHT + vinc, out,
0, 0, in, color,
stride, -1 * vinc, stride, roll_over);
if (hinc > 0)
cdg_fill_wrapper(0, 0, out,
CDG_FULL_WIDTH - hinc, 0, in, color,
hinc, CDG_FULL_HEIGHT, stride, roll_over);
else if (hinc < 0)
cdg_fill_wrapper(CDG_FULL_WIDTH + hinc, 0, out,
0, 0, in, color,
-1 * hinc, CDG_FULL_HEIGHT, stride, roll_over);
}
static int cdg_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, AVPacket *avpkt)
{
GetByteContext gb;
int buf_size = avpkt->size;
int ret;
uint8_t command, inst;
uint8_t cdg_data[CDG_DATA_SIZE] = {0};
AVFrame *frame = data;
CDGraphicsContext *cc = avctx->priv_data;
if (buf_size < CDG_MINIMUM_PKT_SIZE) {
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
return AVERROR(EINVAL);
}
if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
return AVERROR(EINVAL);
}
bytestream2_init(&gb, avpkt->data, avpkt->size);
if ((ret = ff_reget_buffer(avctx, cc->frame)) < 0)
return ret;
if (!avctx->frame_number) {
memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height);
memset(cc->frame->data[1], 0, AVPALETTE_SIZE);
}
command = bytestream2_get_byte(&gb);
inst = bytestream2_get_byte(&gb);
inst &= CDG_MASK;
bytestream2_skip(&gb, 2);
bytestream2_get_buffer(&gb, cdg_data, sizeof(cdg_data));
if ((command & CDG_MASK) == CDG_COMMAND) {
switch (inst) {
case CDG_INST_MEMORY_PRESET:
if (!(cdg_data[1] & 0x0F))
memset(cc->frame->data[0], cdg_data[0] & 0x0F,
cc->frame->linesize[0] * CDG_FULL_HEIGHT);
break;
case CDG_INST_LOAD_PAL_LO:
case CDG_INST_LOAD_PAL_HIGH:
if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) {
av_log(avctx, AV_LOG_ERROR, "buffer too small for loading palette\n");
return AVERROR(EINVAL);
}
cdg_load_palette(cc, cdg_data, inst == CDG_INST_LOAD_PAL_LO);
break;
case CDG_INST_BORDER_PRESET:
cdg_border_preset(cc, cdg_data);
break;
case CDG_INST_TILE_BLOCK_XOR:
case CDG_INST_TILE_BLOCK:
if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) {
av_log(avctx, AV_LOG_ERROR, "buffer too small for drawing tile\n");
return AVERROR(EINVAL);
}
ret = cdg_tile_block(cc, cdg_data, inst == CDG_INST_TILE_BLOCK_XOR);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "tile is out of range\n");
return ret;
}
break;
case CDG_INST_SCROLL_PRESET:
case CDG_INST_SCROLL_COPY:
if (buf_size - CDG_HEADER_SIZE < CDG_MINIMUM_SCROLL_SIZE) {
av_log(avctx, AV_LOG_ERROR, "buffer too small for scrolling\n");
return AVERROR(EINVAL);
}
if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY);
av_frame_unref(cc->frame);
ret = av_frame_ref(cc->frame, frame);
if (ret < 0)
return ret;
break;
case CDG_INST_TRANSPARENT_COL:
cc->transparency = cdg_data[0] & 0xF;
break;
default:
break;
}
if (!frame->data[0]) {
ret = av_frame_ref(frame, cc->frame);
if (ret < 0)
return ret;
}
*got_frame = 1;
} else {
*got_frame = 0;
}
return avpkt->size;
}
static av_cold int cdg_decode_end(AVCodecContext *avctx)
{
CDGraphicsContext *cc = avctx->priv_data;
av_frame_free(&cc->frame);
return 0;
}
AVCodec ff_cdgraphics_decoder = {
.name = "cdgraphics",
.long_name = NULL_IF_CONFIG_SMALL("CD Graphics video"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CDGRAPHICS,
.priv_data_size = sizeof(CDGraphicsContext),
.init = cdg_decode_init,
.close = cdg_decode_end,
.decode = cdg_decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
};
|
115442.c | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE390_Error_Without_Action__realloc_07.c
Label Definition File: CWE390_Error_Without_Action.label.xml
Template File: point-flaw-07.tmpl.c
*/
/*
* @description
* CWE: 390 Detection of Error Condition Without Action
* Sinks: realloc
* GoodSink: Check to see if realloc() failed, and handle errors properly
* BadSink : Check to see if realloc() failed, but fail to handle errors
* Flow Variant: 07 Control flow: if(static_five==5) and if(static_five!=5)
*
* */
#include "std_testcase.h"
/* The variable below is not declared "const", but is never assigned
any other value so a tool should be able to identify that reads of
this will always give its initialized value. */
static int static_five = 5;
#ifndef OMITBAD
void CWE390_Error_Without_Action__realloc_07_bad()
{
if(static_five==5)
{
{
char * data = NULL;
data = (char *)realloc(data, 100);
/* FLAW: Check to see if realloc() failed, but do nothing about it */
if (data == NULL)
{
/* do nothing */
}
strcpy(data, "BadSinkBody");
printLine(data);
if (data != NULL)
{
free(data);
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
{
char * data = NULL;
data = (char *)realloc(data, 100);
/* FIX: Check to see if realloc() failed and handle errors properly */
if (data == NULL)
{
printLine("realloc() failed");
exit(1);
}
strcpy(data, "BadSinkBody");
printLine(data);
if (data != NULL)
{
free(data);
}
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good1() uses if(static_five!=5) instead of if(static_five==5) */
static void good1()
{
if(static_five!=5)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
{
char * data = NULL;
data = (char *)realloc(data, 100);
/* FLAW: Check to see if realloc() failed, but do nothing about it */
if (data == NULL)
{
/* do nothing */
}
strcpy(data, "BadSinkBody");
printLine(data);
if (data != NULL)
{
free(data);
}
}
}
else
{
{
char * data = NULL;
data = (char *)realloc(data, 100);
/* FIX: Check to see if realloc() failed and handle errors properly */
if (data == NULL)
{
printLine("realloc() failed");
exit(1);
}
strcpy(data, "BadSinkBody");
printLine(data);
if (data != NULL)
{
free(data);
}
}
}
}
/* good2() reverses the bodies in the if statement */
static void good2()
{
if(static_five==5)
{
{
char * data = NULL;
data = (char *)realloc(data, 100);
/* FIX: Check to see if realloc() failed and handle errors properly */
if (data == NULL)
{
printLine("realloc() failed");
exit(1);
}
strcpy(data, "BadSinkBody");
printLine(data);
if (data != NULL)
{
free(data);
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
{
char * data = NULL;
data = (char *)realloc(data, 100);
/* FLAW: Check to see if realloc() failed, but do nothing about it */
if (data == NULL)
{
/* do nothing */
}
strcpy(data, "BadSinkBody");
printLine(data);
if (data != NULL)
{
free(data);
}
}
}
}
void CWE390_Error_Without_Action__realloc_07_good()
{
good1();
good2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE390_Error_Without_Action__realloc_07_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE390_Error_Without_Action__realloc_07_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
393798.c | /*-------------------------------------------------------------------------
*
* nodeResult.c
* support for constant nodes needing special code.
*
* DESCRIPTION
*
* Result nodes are used in queries where no relations are scanned.
* Examples of such queries are:
*
* select 1 * 2
*
* insert into emp values ('mike', 15000)
*
* (Remember that in an INSERT or UPDATE, we need a plan tree that
* generates the new rows.)
*
* Result nodes are also used to optimise queries with constant
* qualifications (ie, quals that do not depend on the scanned data),
* such as:
*
* select * from emp where 2 > 1
*
* In this case, the plan generated is
*
* Result (with 2 > 1 qual)
* /
* SeqScan (emp.*)
*
* At runtime, the Result node evaluates the constant qual once,
* which is shown by EXPLAIN as a One-Time Filter. If it's
* false, we can return an empty result set without running the
* controlled plan at all. If it's true, we run the controlled
* plan normally and pass back the results.
*
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/executor/nodeResult.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "executor/executor.h"
#include "executor/nodeResult.h"
#include "utils/memutils.h"
/* ----------------------------------------------------------------
* ExecResult(node)
*
* returns the tuples from the outer plan which satisfy the
* qualification clause. Since result nodes with right
* subtrees are never planned, we ignore the right subtree
* entirely (for now).. -cim 10/7/89
*
* The qualification containing only constant clauses are
* checked first before any processing is done. It always returns
* 'nil' if the constant qualification is not satisfied.
* ----------------------------------------------------------------
*/
TupleTableSlot *
ExecResult(ResultState *node)
{
TupleTableSlot *outerTupleSlot;
PlanState *outerPlan;
ExprContext *econtext;
econtext = node->ps.ps_ExprContext;
/*
* check constant qualifications like (2 > 1), if not already done
*/
if (node->rs_checkqual)
{
bool qualResult = ExecQual((List *) node->resconstantqual,
econtext,
false);
node->rs_checkqual = false;
if (!qualResult)
{
node->rs_done = true;
return NULL;
}
}
/*
* Reset per-tuple memory context to free any expression evaluation
* storage allocated in the previous tuple cycle.
*/
ResetExprContext(econtext);
/*
* if rs_done is true then it means that we were asked to return a
* constant tuple and we already did the last time ExecResult() was
* called, OR that we failed the constant qual check. Either way, now we
* are through.
*/
while (!node->rs_done)
{
outerPlan = outerPlanState(node);
if (outerPlan != NULL)
{
/*
* retrieve tuples from the outer plan until there are no more.
*/
outerTupleSlot = ExecProcNode(outerPlan);
if (TupIsNull(outerTupleSlot))
return NULL;
/*
* prepare to compute projection expressions, which will expect to
* access the input tuples as varno OUTER.
*/
econtext->ecxt_outertuple = outerTupleSlot;
}
else
{
/*
* if we don't have an outer plan, then we are just generating the
* results from a constant target list. Do it only once.
*/
node->rs_done = true;
}
/* form the result tuple using ExecProject(), and return it */
return ExecProject(node->ps.ps_ProjInfo);
}
return NULL;
}
/* ----------------------------------------------------------------
* ExecResultMarkPos
* ----------------------------------------------------------------
*/
void
ExecResultMarkPos(ResultState *node)
{
PlanState *outerPlan = outerPlanState(node);
if (outerPlan != NULL)
ExecMarkPos(outerPlan);
else
elog(DEBUG2, "Result nodes do not support mark/restore");
}
/* ----------------------------------------------------------------
* ExecResultRestrPos
* ----------------------------------------------------------------
*/
void
ExecResultRestrPos(ResultState *node)
{
PlanState *outerPlan = outerPlanState(node);
if (outerPlan != NULL)
ExecRestrPos(outerPlan);
else
elog(ERROR, "Result nodes do not support mark/restore");
}
/* ----------------------------------------------------------------
* ExecInitResult
*
* Creates the run-time state information for the result node
* produced by the planner and initializes outer relations
* (child nodes).
* ----------------------------------------------------------------
*/
ResultState *
ExecInitResult(Result *node, EState *estate, int eflags)
{
ResultState *resstate;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_MARK | EXEC_FLAG_BACKWARD)) ||
outerPlan(node) != NULL);
/*
* create state structure
*/
resstate = makeNode(ResultState);
resstate->ps.plan = (Plan *) node;
resstate->ps.state = estate;
resstate->rs_done = false;
resstate->rs_checkqual = (node->resconstantqual == NULL) ? false : true;
/*
* Miscellaneous initialization
*
* create expression context for node
*/
ExecAssignExprContext(estate, &resstate->ps);
/*
* tuple table initialization
*/
ExecInitResultTupleSlot(estate, &resstate->ps);
/*
* initialize child expressions
*/
resstate->ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
(PlanState *) resstate);
resstate->ps.qual = (List *)
ExecInitExpr((Expr *) node->plan.qual,
(PlanState *) resstate);
resstate->resconstantqual = ExecInitExpr((Expr *) node->resconstantqual,
(PlanState *) resstate);
/*
* initialize child nodes
*/
outerPlanState(resstate) = ExecInitNode(outerPlan(node), estate, eflags, NULL);
/*
* we don't use inner plan
*/
Assert(innerPlan(node) == NULL);
/*
* initialize tuple type and projection info
*/
ExecAssignResultTypeFromTL(&resstate->ps);
ExecAssignProjectionInfo(&resstate->ps, NULL);
return resstate;
}
/* ----------------------------------------------------------------
* ExecEndResult
*
* frees up storage allocated through C routines
* ----------------------------------------------------------------
*/
void
ExecEndResult(ResultState *node)
{
/*
* Free the exprcontext
*/
ExecFreeExprContext(&node->ps);
/*
* clean out the tuple table
*/
ExecClearTuple(node->ps.ps_ResultTupleSlot);
/*
* shut down subplans
*/
ExecEndNode(outerPlanState(node));
}
void
ExecReScanResult(ResultState *node)
{
node->rs_done = false;
node->rs_checkqual = (node->resconstantqual == NULL) ? false : true;
/*
* If chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
if (node->ps.lefttree &&
node->ps.lefttree->chgParam == NULL)
ExecReScan(node->ps.lefttree);
}
|