filename
stringlengths
3
9
code
stringlengths
4
1.05M
342550.c
/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include "jsonrpc-server.h" #include <errno.h> #include "bitmap.h" #include "column.h" #include "openvswitch/dynamic-string.h" #include "monitor.h" #include "openvswitch/json.h" #include "jsonrpc.h" #include "ovsdb-error.h" #include "ovsdb-parser.h" #include "ovsdb.h" #include "condition.h" #include "openvswitch/poll-loop.h" #include "reconnect.h" #include "row.h" #include "server.h" #include "simap.h" #include "storage.h" #include "stream.h" #include "table.h" #include "timeval.h" #include "transaction.h" #include "trigger.h" #include "util.h" #include "openvswitch/vlog.h" VLOG_DEFINE_THIS_MODULE(ovsdb_jsonrpc_server); struct ovsdb_jsonrpc_remote; struct ovsdb_jsonrpc_session; /* Set false to defeature monitor_cond, causing jsonrpc to respond to * monitor_cond method with an error. */ static bool monitor_cond_enable__ = true; /* Message rate-limiting. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); /* Sessions. */ static struct ovsdb_jsonrpc_session *ovsdb_jsonrpc_session_create( struct ovsdb_jsonrpc_remote *, struct jsonrpc_session *, bool); static void ovsdb_jsonrpc_session_preremove_db(struct ovsdb_jsonrpc_remote *, struct ovsdb *); static void ovsdb_jsonrpc_session_run_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_wait_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_get_memory_usage_all( const struct ovsdb_jsonrpc_remote *, struct simap *usage); static void ovsdb_jsonrpc_session_close_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_reconnect_all(struct ovsdb_jsonrpc_remote *, bool force, const char *comment); static void ovsdb_jsonrpc_session_set_all_options( struct ovsdb_jsonrpc_remote *, const struct ovsdb_jsonrpc_options *); static bool ovsdb_jsonrpc_active_session_get_status( const struct ovsdb_jsonrpc_remote *, struct ovsdb_jsonrpc_remote_status *); static void ovsdb_jsonrpc_session_get_status( const struct ovsdb_jsonrpc_session *, struct ovsdb_jsonrpc_remote_status *); static void ovsdb_jsonrpc_session_unlock_all(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_session_unlock__(struct ovsdb_lock_waiter *); static void ovsdb_jsonrpc_session_send(struct ovsdb_jsonrpc_session *, struct jsonrpc_msg *); /* Triggers. */ static void ovsdb_jsonrpc_trigger_create(struct ovsdb_jsonrpc_session *, struct ovsdb *, struct jsonrpc_msg *request); static struct ovsdb_jsonrpc_trigger *ovsdb_jsonrpc_trigger_find( struct ovsdb_jsonrpc_session *, const struct json *id, size_t hash); static void ovsdb_jsonrpc_trigger_complete(struct ovsdb_jsonrpc_trigger *); static void ovsdb_jsonrpc_trigger_preremove_db(struct ovsdb_jsonrpc_session *, struct ovsdb *); static void ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_trigger_complete_done( struct ovsdb_jsonrpc_session *); /* Monitors. */ static struct jsonrpc_msg *ovsdb_jsonrpc_monitor_create( struct ovsdb_jsonrpc_session *, struct ovsdb *, struct json *params, enum ovsdb_monitor_version, const struct json *request_id); static struct jsonrpc_msg *ovsdb_jsonrpc_monitor_cond_change( struct ovsdb_jsonrpc_session *s, struct json *params, const struct json *request_id); static struct jsonrpc_msg *ovsdb_jsonrpc_monitor_cancel( struct ovsdb_jsonrpc_session *, struct json_array *params, const struct json *request_id); static void ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *, struct ovsdb *); static void ovsdb_jsonrpc_monitor_remove_all(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_monitor_flush_all(struct ovsdb_jsonrpc_session *); static bool ovsdb_jsonrpc_monitor_needs_flush(struct ovsdb_jsonrpc_session *); static struct json *ovsdb_jsonrpc_monitor_compose_update( struct ovsdb_jsonrpc_monitor *monitor, bool initial); static struct jsonrpc_msg * ovsdb_jsonrpc_create_notify( const struct ovsdb_jsonrpc_monitor *m, struct json *params); /* JSON-RPC database server. */ struct ovsdb_jsonrpc_server { struct ovsdb_server up; unsigned int n_sessions; bool read_only; /* This server is does not accept any transactions that can modify the database. */ struct shash remotes; /* Contains "struct ovsdb_jsonrpc_remote *"s. */ }; /* A configured remote. This is either a passive stream listener plus a list * of the currently connected sessions, or a list of exactly one active * session. */ struct ovsdb_jsonrpc_remote { struct ovsdb_jsonrpc_server *server; struct pstream *listener; /* Listener, if passive. */ struct ovs_list sessions; /* List of "struct ovsdb_jsonrpc_session"s. */ uint8_t dscp; bool read_only; char *role; }; static struct ovsdb_jsonrpc_remote *ovsdb_jsonrpc_server_add_remote( struct ovsdb_jsonrpc_server *, const char *name, const struct ovsdb_jsonrpc_options *options ); static void ovsdb_jsonrpc_server_del_remote(struct shash_node *); /* Creates and returns a new server to provide JSON-RPC access to an OVSDB. * * The caller must call ovsdb_jsonrpc_server_add_db() for each database to * which 'server' should provide access. */ struct ovsdb_jsonrpc_server * ovsdb_jsonrpc_server_create(bool read_only) { struct ovsdb_jsonrpc_server *server = xzalloc(sizeof *server); ovsdb_server_init(&server->up); shash_init(&server->remotes); server->read_only = read_only; return server; } /* Adds 'db' to the set of databases served out by 'svr'. Returns true if * successful, false if 'db''s name is the same as some database already in * 'server'. */ bool ovsdb_jsonrpc_server_add_db(struct ovsdb_jsonrpc_server *svr, struct ovsdb *db) { ovsdb_jsonrpc_server_reconnect( svr, false, xasprintf("adding %s database", db->name)); return ovsdb_server_add_db(&svr->up, db); } /* Removes 'db' from the set of databases served out by 'svr'. * * 'comment' should be a human-readable reason for removing the database, for * use in log messages, or NULL to suppress logging. This function frees * it. */ void ovsdb_jsonrpc_server_remove_db(struct ovsdb_jsonrpc_server *svr, struct ovsdb *db, char *comment) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_preremove_db(remote, db); } ovsdb_jsonrpc_server_reconnect(svr, false, comment); ovsdb_server_remove_db(&svr->up, db); } void ovsdb_jsonrpc_server_destroy(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node, *next; SHASH_FOR_EACH_SAFE (node, next, &svr->remotes) { ovsdb_jsonrpc_server_del_remote(node); } shash_destroy(&svr->remotes); ovsdb_server_destroy(&svr->up); free(svr); } struct ovsdb_jsonrpc_options * ovsdb_jsonrpc_default_options(const char *target) { struct ovsdb_jsonrpc_options *options = xzalloc(sizeof *options); options->max_backoff = RECONNECT_DEFAULT_MAX_BACKOFF; options->probe_interval = (stream_or_pstream_needs_probes(target) ? RECONNECT_DEFAULT_PROBE_INTERVAL : 0); return options; } /* Sets 'svr''s current set of remotes to the names in 'new_remotes', with * options in the struct ovsdb_jsonrpc_options supplied as the data values. * * A remote is an active or passive stream connection method, e.g. "pssl:" or * "tcp:1.2.3.4". */ void ovsdb_jsonrpc_server_set_remotes(struct ovsdb_jsonrpc_server *svr, const struct shash *new_remotes) { struct shash_node *node, *next; SHASH_FOR_EACH_SAFE (node, next, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; struct ovsdb_jsonrpc_options *options = shash_find_data(new_remotes, node->name); if (!options) { VLOG_INFO("%s: remote deconfigured", node->name); ovsdb_jsonrpc_server_del_remote(node); } else if (options->dscp != remote->dscp) { ovsdb_jsonrpc_server_del_remote(node); } } SHASH_FOR_EACH (node, new_remotes) { const struct ovsdb_jsonrpc_options *options = node->data; struct ovsdb_jsonrpc_remote *remote; remote = shash_find_data(&svr->remotes, node->name); if (!remote) { remote = ovsdb_jsonrpc_server_add_remote(svr, node->name, options); if (!remote) { continue; } } ovsdb_jsonrpc_session_set_all_options(remote, options); } } static struct ovsdb_jsonrpc_remote * ovsdb_jsonrpc_server_add_remote(struct ovsdb_jsonrpc_server *svr, const char *name, const struct ovsdb_jsonrpc_options *options) { struct ovsdb_jsonrpc_remote *remote; struct pstream *listener; int error; error = jsonrpc_pstream_open(name, &listener, options->dscp); if (error && error != EAFNOSUPPORT) { VLOG_ERR_RL(&rl, "%s: listen failed: %s", name, ovs_strerror(error)); return NULL; } remote = xmalloc(sizeof *remote); remote->server = svr; remote->listener = listener; ovs_list_init(&remote->sessions); remote->dscp = options->dscp; remote->read_only = options->read_only; remote->role = nullable_xstrdup(options->role); shash_add(&svr->remotes, name, remote); if (!listener) { ovsdb_jsonrpc_session_create(remote, jsonrpc_session_open(name, true), svr->read_only || remote->read_only); } return remote; } static void ovsdb_jsonrpc_server_del_remote(struct shash_node *node) { struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_close_all(remote); pstream_close(remote->listener); shash_delete(&remote->server->remotes, node); free(remote->role); free(remote); } /* Stores status information for the remote named 'target', which should have * been configured on 'svr' with a call to ovsdb_jsonrpc_server_set_remotes(), * into '*status'. On success returns true, on failure (if 'svr' doesn't have * a remote named 'target' or if that remote is an outbound remote that has no * active connections) returns false. On failure, 'status' will be zeroed. */ bool ovsdb_jsonrpc_server_get_remote_status( const struct ovsdb_jsonrpc_server *svr, const char *target, struct ovsdb_jsonrpc_remote_status *status) { const struct ovsdb_jsonrpc_remote *remote; memset(status, 0, sizeof *status); remote = shash_find_data(&svr->remotes, target); if (!remote) { return false; } if (remote->listener) { status->bound_port = pstream_get_bound_port(remote->listener); status->is_connected = !ovs_list_is_empty(&remote->sessions); status->n_connections = ovs_list_size(&remote->sessions); return true; } return ovsdb_jsonrpc_active_session_get_status(remote, status); } void ovsdb_jsonrpc_server_free_remote_status( struct ovsdb_jsonrpc_remote_status *status) { free(status->locks_held); free(status->locks_waiting); free(status->locks_lost); } /* Makes all of the JSON-RPC sessions managed by 'svr' disconnect. (They * will then generally reconnect.). Uses 'comment' as a human-readable comment * for logging (it may be NULL to suppress logging). Frees 'comment'. * * If 'force' is true, disconnects all sessions. Otherwise, disconnects only * sesions that aren't database change aware. */ void ovsdb_jsonrpc_server_reconnect(struct ovsdb_jsonrpc_server *svr, bool force, char *comment) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_reconnect_all(remote, force, comment); } free(comment); } void ovsdb_jsonrpc_server_set_read_only(struct ovsdb_jsonrpc_server *svr, bool read_only) { if (svr->read_only != read_only) { svr->read_only = read_only; ovsdb_jsonrpc_server_reconnect(svr, true, xstrdup(read_only ? "making server read-only" : "making server read/write")); } } void ovsdb_jsonrpc_server_run(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; if (remote->listener) { struct stream *stream; int error; error = pstream_accept(remote->listener, &stream); if (!error) { struct jsonrpc_session *js; js = jsonrpc_session_open_unreliably(jsonrpc_open(stream), remote->dscp); ovsdb_jsonrpc_session_create(remote, js, svr->read_only || remote->read_only); } else if (error != EAGAIN) { VLOG_WARN_RL(&rl, "%s: accept failed: %s", pstream_get_name(remote->listener), ovs_strerror(error)); } } ovsdb_jsonrpc_session_run_all(remote); } } void ovsdb_jsonrpc_server_wait(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; if (remote->listener) { pstream_wait(remote->listener); } ovsdb_jsonrpc_session_wait_all(remote); } } /* Adds some memory usage statistics for 'svr' into 'usage', for use with * memory_report(). */ void ovsdb_jsonrpc_server_get_memory_usage(const struct ovsdb_jsonrpc_server *svr, struct simap *usage) { struct shash_node *node; simap_increase(usage, "sessions", svr->n_sessions); SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_get_memory_usage_all(remote, usage); } } /* JSON-RPC database server session. */ struct ovsdb_jsonrpc_session { struct ovs_list node; /* Element in remote's sessions list. */ struct ovsdb_session up; struct ovsdb_jsonrpc_remote *remote; /* RFC 7047 does not contemplate how to alert clients to changes to the set * of databases, e.g. databases that are added or removed while the * database server is running. Traditionally, ovsdb-server disconnects all * of its clients when this happens; a well-written client will reassess * what is available from the server upon reconnection. * * OVS 2.9 introduces a way for clients to monitor changes to the databases * being served, through the Database table in the _Server database that * OVSDB adds in this version. ovsdb-server suppresses the connection * close for clients that identify themselves as taking advantage of this * mechanism. When this member is true, it indicates that the client * requested such suppression. */ bool db_change_aware; /* Triggers. */ struct hmap triggers; /* Hmap of "struct ovsdb_jsonrpc_trigger"s. */ /* Monitors. */ struct hmap monitors; /* Hmap of "struct ovsdb_jsonrpc_monitor"s. */ /* Network connectivity. */ struct jsonrpc_session *js; /* JSON-RPC session. */ unsigned int js_seqno; /* Last jsonrpc_session_get_seqno() value. */ /* Read only. */ bool read_only; /* When true, not allow to modify the database. */ }; static void ovsdb_jsonrpc_session_close(struct ovsdb_jsonrpc_session *); static int ovsdb_jsonrpc_session_run(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_session_wait(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_session_get_memory_usage( const struct ovsdb_jsonrpc_session *, struct simap *usage); static void ovsdb_jsonrpc_session_got_request(struct ovsdb_jsonrpc_session *, struct jsonrpc_msg *); static void ovsdb_jsonrpc_session_got_notify(struct ovsdb_jsonrpc_session *, struct jsonrpc_msg *); static struct ovsdb_jsonrpc_session * ovsdb_jsonrpc_session_create(struct ovsdb_jsonrpc_remote *remote, struct jsonrpc_session *js, bool read_only) { struct ovsdb_jsonrpc_session *s; s = xzalloc(sizeof *s); ovsdb_session_init(&s->up, &remote->server->up); s->remote = remote; ovs_list_push_back(&remote->sessions, &s->node); hmap_init(&s->triggers); hmap_init(&s->monitors); s->js = js; s->js_seqno = jsonrpc_session_get_seqno(js); s->read_only = read_only; remote->server->n_sessions++; return s; } /* Database 'db' is about to be removed from the database server. To prepare, * this function removes all references to 'db' from 'remote'. */ static void ovsdb_jsonrpc_session_preremove_db(struct ovsdb_jsonrpc_remote *remote, struct ovsdb *db) { struct ovsdb_jsonrpc_session *s; LIST_FOR_EACH (s, node, &remote->sessions) { ovsdb_jsonrpc_monitor_preremove_db(s, db); ovsdb_jsonrpc_trigger_preremove_db(s, db); } } static void ovsdb_jsonrpc_session_close(struct ovsdb_jsonrpc_session *s) { ovsdb_jsonrpc_monitor_remove_all(s); ovsdb_jsonrpc_session_unlock_all(s); ovsdb_jsonrpc_trigger_complete_all(s); hmap_destroy(&s->monitors); hmap_destroy(&s->triggers); jsonrpc_session_close(s->js); ovs_list_remove(&s->node); s->remote->server->n_sessions--; ovsdb_session_destroy(&s->up); free(s); } static int ovsdb_jsonrpc_session_run(struct ovsdb_jsonrpc_session *s) { jsonrpc_session_run(s->js); if (s->js_seqno != jsonrpc_session_get_seqno(s->js)) { s->js_seqno = jsonrpc_session_get_seqno(s->js); ovsdb_jsonrpc_trigger_complete_all(s); ovsdb_jsonrpc_monitor_remove_all(s); ovsdb_jsonrpc_session_unlock_all(s); } ovsdb_jsonrpc_trigger_complete_done(s); if (!jsonrpc_session_get_backlog(s->js)) { struct jsonrpc_msg *msg; ovsdb_jsonrpc_monitor_flush_all(s); msg = jsonrpc_session_recv(s->js); if (msg) { if (msg->type == JSONRPC_REQUEST) { ovsdb_jsonrpc_session_got_request(s, msg); } else if (msg->type == JSONRPC_NOTIFY) { ovsdb_jsonrpc_session_got_notify(s, msg); } else { VLOG_WARN("%s: received unexpected %s message", jsonrpc_session_get_name(s->js), jsonrpc_msg_type_to_string(msg->type)); jsonrpc_session_force_reconnect(s->js); jsonrpc_msg_destroy(msg); } } } return jsonrpc_session_is_alive(s->js) ? 0 : ETIMEDOUT; } static void ovsdb_jsonrpc_session_set_options(struct ovsdb_jsonrpc_session *session, const struct ovsdb_jsonrpc_options *options) { jsonrpc_session_set_max_backoff(session->js, options->max_backoff); jsonrpc_session_set_probe_interval(session->js, options->probe_interval); jsonrpc_session_set_dscp(session->js, options->dscp); } static void ovsdb_jsonrpc_session_run_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s, *next; LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) { int error = ovsdb_jsonrpc_session_run(s); if (error) { ovsdb_jsonrpc_session_close(s); } } } static void ovsdb_jsonrpc_session_wait(struct ovsdb_jsonrpc_session *s) { jsonrpc_session_wait(s->js); if (!jsonrpc_session_get_backlog(s->js)) { if (ovsdb_jsonrpc_monitor_needs_flush(s)) { poll_immediate_wake(); } else { jsonrpc_session_recv_wait(s->js); } } } static void ovsdb_jsonrpc_session_wait_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s; LIST_FOR_EACH (s, node, &remote->sessions) { ovsdb_jsonrpc_session_wait(s); } } static void ovsdb_jsonrpc_session_get_memory_usage(const struct ovsdb_jsonrpc_session *s, struct simap *usage) { simap_increase(usage, "triggers", hmap_count(&s->triggers)); simap_increase(usage, "backlog", jsonrpc_session_get_backlog(s->js)); } static void ovsdb_jsonrpc_session_get_memory_usage_all( const struct ovsdb_jsonrpc_remote *remote, struct simap *usage) { struct ovsdb_jsonrpc_session *s; LIST_FOR_EACH (s, node, &remote->sessions) { ovsdb_jsonrpc_session_get_memory_usage(s, usage); } } static void ovsdb_jsonrpc_session_close_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s, *next; LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) { ovsdb_jsonrpc_session_close(s); } } /* Makes all of the JSON-RPC sessions managed by 'remote' disconnect. (They * will then generally reconnect.). 'comment' should be a human-readable * explanation of the reason for disconnection, for use in log messages, or * NULL to suppress logging. * * If 'force' is true, disconnects all sessions. Otherwise, disconnects only * sesions that aren't database change aware. */ static void ovsdb_jsonrpc_session_reconnect_all(struct ovsdb_jsonrpc_remote *remote, bool force, const char *comment) { struct ovsdb_jsonrpc_session *s, *next; LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) { if (force || !s->db_change_aware) { jsonrpc_session_force_reconnect(s->js); if (comment && jsonrpc_session_is_connected(s->js)) { VLOG_INFO("%s: disconnecting (%s)", jsonrpc_session_get_name(s->js), comment); } if (!jsonrpc_session_is_alive(s->js)) { ovsdb_jsonrpc_session_close(s); } } } } /* Sets the options for all of the JSON-RPC sessions managed by 'remote' to * 'options'. * * (The dscp value can't be changed directly; the caller must instead close and * re-open the session.) */ static void ovsdb_jsonrpc_session_set_all_options( struct ovsdb_jsonrpc_remote *remote, const struct ovsdb_jsonrpc_options *options) { struct ovsdb_jsonrpc_session *s; LIST_FOR_EACH (s, node, &remote->sessions) { ovsdb_jsonrpc_session_set_options(s, options); } } /* Sets the 'status' of for the 'remote' with an outgoing connection. */ static bool ovsdb_jsonrpc_active_session_get_status( const struct ovsdb_jsonrpc_remote *remote, struct ovsdb_jsonrpc_remote_status *status) { const struct ovs_list *sessions = &remote->sessions; const struct ovsdb_jsonrpc_session *s; if (ovs_list_is_empty(sessions)) { return false; } ovs_assert(ovs_list_is_singleton(sessions)); s = CONTAINER_OF(ovs_list_front(sessions), struct ovsdb_jsonrpc_session, node); ovsdb_jsonrpc_session_get_status(s, status); status->n_connections = 1; return true; } static void ovsdb_jsonrpc_session_get_status(const struct ovsdb_jsonrpc_session *session, struct ovsdb_jsonrpc_remote_status *status) { const struct ovsdb_jsonrpc_session *s = session; const struct jsonrpc_session *js; struct ovsdb_lock_waiter *waiter; struct reconnect_stats rstats; struct ds locks_held, locks_waiting, locks_lost; js = s->js; status->is_connected = jsonrpc_session_is_connected(js); status->last_error = jsonrpc_session_get_status(js); jsonrpc_session_get_reconnect_stats(js, &rstats); status->state = rstats.state; status->sec_since_connect = rstats.msec_since_connect == UINT_MAX ? UINT_MAX : rstats.msec_since_connect / 1000; status->sec_since_disconnect = rstats.msec_since_disconnect == UINT_MAX ? UINT_MAX : rstats.msec_since_disconnect / 1000; ds_init(&locks_held); ds_init(&locks_waiting); ds_init(&locks_lost); HMAP_FOR_EACH (waiter, session_node, &s->up.waiters) { struct ds *string; string = (ovsdb_lock_waiter_is_owner(waiter) ? &locks_held : waiter->mode == OVSDB_LOCK_WAIT ? &locks_waiting : &locks_lost); if (string->length) { ds_put_char(string, ' '); } ds_put_cstr(string, waiter->lock_name); } status->locks_held = ds_steal_cstr(&locks_held); status->locks_waiting = ds_steal_cstr(&locks_waiting); status->locks_lost = ds_steal_cstr(&locks_lost); } /* Examines 'request' to determine the database to which it relates, and then * searches 's' to find that database: * * - If successful, returns the database and sets '*replyp' to NULL. * * - If no such database exists, returns NULL and sets '*replyp' to an * appropriate JSON-RPC error reply, owned by the caller. */ static struct ovsdb * ovsdb_jsonrpc_lookup_db(const struct ovsdb_jsonrpc_session *s, const struct jsonrpc_msg *request, struct jsonrpc_msg **replyp) { struct json_array *params; struct ovsdb_error *error; const char *db_name; struct ovsdb *db; params = json_array(request->params); if (!params->n || params->elems[0]->type != JSON_STRING) { error = ovsdb_syntax_error( request->params, NULL, "%s request params must begin with <db-name>", request->method); goto error; } db_name = params->elems[0]->string; db = shash_find_data(&s->up.server->dbs, db_name); if (!db) { error = ovsdb_syntax_error( request->params, "unknown database", "%s request specifies unknown database %s", request->method, db_name); goto error; } if (!db->schema) { error = ovsdb_error("database not available", "%s request specifies database %s which is not " "yet available because it has not completed " "joining its cluster", request->method, db_name); goto error; } *replyp = NULL; return db; error: *replyp = jsonrpc_create_error(ovsdb_error_to_json_free(error), request->id); return NULL; } static struct ovsdb_error * ovsdb_jsonrpc_session_parse_lock_name(const struct jsonrpc_msg *request, const char **lock_namep) { const struct json_array *params; params = json_array(request->params); if (params->n != 1 || params->elems[0]->type != JSON_STRING || !ovsdb_parser_is_id(json_string(params->elems[0]))) { *lock_namep = NULL; return ovsdb_syntax_error(request->params, NULL, "%s request params must be <id>", request->method); } *lock_namep = json_string(params->elems[0]); return NULL; } static void ovsdb_jsonrpc_session_notify(struct ovsdb_session *session, const char *lock_name, const char *method) { struct ovsdb_jsonrpc_session *s; struct json *params; s = CONTAINER_OF(session, struct ovsdb_jsonrpc_session, up); params = json_array_create_1(json_string_create(lock_name)); ovsdb_jsonrpc_session_send(s, jsonrpc_create_notify(method, params)); } static struct jsonrpc_msg * jsonrpc_create_readonly_lock_error(const struct json *id) { return jsonrpc_create_error(json_string_create( "lock and unlock methods not allowed," " DB server is read only."), id); } static struct jsonrpc_msg * ovsdb_jsonrpc_session_lock(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request, enum ovsdb_lock_mode mode) { struct ovsdb_lock_waiter *waiter; struct ovsdb_error *error; struct ovsdb_session *victim; const char *lock_name; struct json *result; if (s->read_only) { return jsonrpc_create_readonly_lock_error(request->id); } error = ovsdb_jsonrpc_session_parse_lock_name(request, &lock_name); if (error) { goto error; } /* Report error if this session has issued a "lock" or "steal" without a * matching "unlock" for this lock. */ waiter = ovsdb_session_get_lock_waiter(&s->up, lock_name); if (waiter) { error = ovsdb_syntax_error( request->params, NULL, "must issue \"unlock\" before new \"%s\"", request->method); goto error; } /* Get the lock, add us as a waiter. */ waiter = ovsdb_server_lock(&s->remote->server->up, &s->up, lock_name, mode, &victim); if (victim) { ovsdb_jsonrpc_session_notify(victim, lock_name, "stolen"); } result = json_object_create(); json_object_put(result, "locked", json_boolean_create(ovsdb_lock_waiter_is_owner(waiter))); return jsonrpc_create_reply(result, request->id); error: return jsonrpc_create_error(ovsdb_error_to_json_free(error), request->id); } static void ovsdb_jsonrpc_session_unlock_all(struct ovsdb_jsonrpc_session *s) { struct ovsdb_lock_waiter *waiter, *next; HMAP_FOR_EACH_SAFE (waiter, next, session_node, &s->up.waiters) { ovsdb_jsonrpc_session_unlock__(waiter); } } static void ovsdb_jsonrpc_session_unlock__(struct ovsdb_lock_waiter *waiter) { struct ovsdb_lock *lock = waiter->lock; if (lock) { struct ovsdb_session *new_owner = ovsdb_lock_waiter_remove(waiter); if (new_owner) { ovsdb_jsonrpc_session_notify(new_owner, lock->name, "locked"); } else { /* ovsdb_server_lock() might have freed 'lock'. */ } } ovsdb_lock_waiter_destroy(waiter); } static struct jsonrpc_msg * syntax_error_reply(const struct jsonrpc_msg *request, const char *details) { struct ovsdb_error *error = ovsdb_syntax_error( request->params, NULL, "%s: %s", request->method, details); struct jsonrpc_msg *msg = jsonrpc_create_error(ovsdb_error_to_json(error), request->id); ovsdb_error_destroy(error); return msg; } static struct jsonrpc_msg * ovsdb_jsonrpc_session_unlock(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { struct ovsdb_lock_waiter *waiter; struct ovsdb_error *error; const char *lock_name; if (s->read_only) { return jsonrpc_create_readonly_lock_error(request->id); } error = ovsdb_jsonrpc_session_parse_lock_name(request, &lock_name); if (error) { return jsonrpc_create_error(ovsdb_error_to_json_free(error), request->id); } /* Report error if this session has not issued a "lock" or "steal" for this * lock. */ waiter = ovsdb_session_get_lock_waiter(&s->up, lock_name); if (!waiter) { return syntax_error_reply(request, "\"unlock\" without \"lock\" or \"steal\""); } ovsdb_jsonrpc_session_unlock__(waiter); return jsonrpc_create_reply(json_object_create(), request->id); } static struct jsonrpc_msg * ovsdb_jsonrpc_session_set_db_change_aware(struct ovsdb_jsonrpc_session *s, const struct jsonrpc_msg *request) { const struct json_array *params = json_array(request->params); if (params->n != 1 || (params->elems[0]->type != JSON_TRUE && params->elems[0]->type != JSON_FALSE)) { return syntax_error_reply(request, "true or false parameter expected"); } s->db_change_aware = json_boolean(params->elems[0]); return jsonrpc_create_reply(json_object_create(), request->id); } static void ovsdb_jsonrpc_session_got_request(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { struct jsonrpc_msg *reply; if (!strcmp(request->method, "transact") || !strcmp(request->method, "convert")) { struct ovsdb *db = ovsdb_jsonrpc_lookup_db(s, request, &reply); if (!reply) { ovsdb_jsonrpc_trigger_create(s, db, request); } } else if (!strcmp(request->method, "monitor") || (monitor_cond_enable__ && (!strcmp(request->method, "monitor_cond") || !strcmp(request->method, "monitor_cond_since")))) { struct ovsdb *db = ovsdb_jsonrpc_lookup_db(s, request, &reply); if (!reply) { enum ovsdb_monitor_version version; if (!strcmp(request->method, "monitor")) { version = OVSDB_MONITOR_V1; } else if (!strcmp(request->method, "monitor_cond")) { version = OVSDB_MONITOR_V2; } else { version = OVSDB_MONITOR_V3; } reply = ovsdb_jsonrpc_monitor_create(s, db, request->params, version, request->id); } } else if (!strcmp(request->method, "monitor_cond_change")) { reply = ovsdb_jsonrpc_monitor_cond_change(s, request->params, request->id); } else if (!strcmp(request->method, "monitor_cancel")) { reply = ovsdb_jsonrpc_monitor_cancel(s, json_array(request->params), request->id); } else if (!strcmp(request->method, "get_schema")) { struct ovsdb *db = ovsdb_jsonrpc_lookup_db(s, request, &reply); if (!reply) { reply = jsonrpc_create_reply(ovsdb_schema_to_json(db->schema), request->id); } } else if (!strcmp(request->method, "list_dbs")) { size_t n_dbs = shash_count(&s->up.server->dbs); struct shash_node *node; struct json **dbs; size_t i; dbs = xmalloc(n_dbs * sizeof *dbs); i = 0; SHASH_FOR_EACH (node, &s->up.server->dbs) { dbs[i++] = json_string_create(node->name); } reply = jsonrpc_create_reply(json_array_create(dbs, n_dbs), request->id); } else if (!strcmp(request->method, "get_server_id")) { const struct uuid *uuid = &s->up.server->uuid; struct json *result; result = json_string_create_nocopy(xasprintf(UUID_FMT, UUID_ARGS(uuid))); reply = jsonrpc_create_reply(result, request->id); } else if (!strcmp(request->method, "lock")) { reply = ovsdb_jsonrpc_session_lock(s, request, OVSDB_LOCK_WAIT); } else if (!strcmp(request->method, "steal")) { reply = ovsdb_jsonrpc_session_lock(s, request, OVSDB_LOCK_STEAL); } else if (!strcmp(request->method, "unlock")) { reply = ovsdb_jsonrpc_session_unlock(s, request); } else if (!strcmp(request->method, "set_db_change_aware")) { reply = ovsdb_jsonrpc_session_set_db_change_aware(s, request); } else if (!strcmp(request->method, "echo")) { reply = jsonrpc_create_reply(json_clone(request->params), request->id); } else { reply = jsonrpc_create_error(json_string_create("unknown method"), request->id); } if (reply) { jsonrpc_msg_destroy(request); ovsdb_jsonrpc_session_send(s, reply); } } static void execute_cancel(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { if (json_array(request->params)->n == 1) { struct ovsdb_jsonrpc_trigger *t; struct json *id; id = request->params->array.elems[0]; t = ovsdb_jsonrpc_trigger_find(s, id, json_hash(id, 0)); if (t) { ovsdb_jsonrpc_trigger_complete(t); } } } static void ovsdb_jsonrpc_session_got_notify(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { if (!strcmp(request->method, "cancel")) { execute_cancel(s, request); } jsonrpc_msg_destroy(request); } static void ovsdb_jsonrpc_session_send(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *msg) { ovsdb_jsonrpc_monitor_flush_all(s); jsonrpc_session_send(s->js, msg); } /* JSON-RPC database server triggers. * * (Every transaction is treated as a trigger even if it doesn't actually have * any "wait" operations.) */ struct ovsdb_jsonrpc_trigger { struct ovsdb_trigger trigger; struct hmap_node hmap_node; /* In session's "triggers" hmap. */ struct json *id; }; static void ovsdb_jsonrpc_trigger_create(struct ovsdb_jsonrpc_session *s, struct ovsdb *db, struct jsonrpc_msg *request) { /* Check for duplicate ID. */ size_t hash = json_hash(request->id, 0); struct ovsdb_jsonrpc_trigger *t = ovsdb_jsonrpc_trigger_find(s, request->id, hash); if (t) { ovsdb_jsonrpc_session_send( s, syntax_error_reply(request, "duplicate request ID")); jsonrpc_msg_destroy(request); return; } /* Insert into trigger table. */ t = xmalloc(sizeof *t); bool disconnect_all = ovsdb_trigger_init( &s->up, db, &t->trigger, request, time_msec(), s->read_only, s->remote->role, jsonrpc_session_get_id(s->js)); t->id = json_clone(request->id); hmap_insert(&s->triggers, &t->hmap_node, hash); /* Complete early if possible. */ if (ovsdb_trigger_is_complete(&t->trigger)) { ovsdb_jsonrpc_trigger_complete(t); } if (disconnect_all) { /* The message below is currently the only reason to disconnect all * clients. */ ovsdb_jsonrpc_server_reconnect(s->remote->server, false, xasprintf("committed %s database " "schema conversion", db->name)); } } static struct ovsdb_jsonrpc_trigger * ovsdb_jsonrpc_trigger_find(struct ovsdb_jsonrpc_session *s, const struct json *id, size_t hash) { struct ovsdb_jsonrpc_trigger *t; HMAP_FOR_EACH_WITH_HASH (t, hmap_node, hash, &s->triggers) { if (json_equal(t->id, id)) { return t; } } return NULL; } static void ovsdb_jsonrpc_trigger_complete(struct ovsdb_jsonrpc_trigger *t) { struct ovsdb_jsonrpc_session *s; s = CONTAINER_OF(t->trigger.session, struct ovsdb_jsonrpc_session, up); if (jsonrpc_session_is_connected(s->js)) { bool complete = ovsdb_trigger_is_complete(&t->trigger); if (s->db_change_aware && !complete) { ovsdb_trigger_cancel(&t->trigger, "closing JSON-RPC session"); complete = true; } if (complete) { struct jsonrpc_msg *reply = ovsdb_trigger_steal_reply(&t->trigger); ovsdb_jsonrpc_session_send(s, reply); } } json_destroy(t->id); ovsdb_trigger_destroy(&t->trigger); hmap_remove(&s->triggers, &t->hmap_node); free(t); } static void ovsdb_jsonrpc_trigger_remove__(struct ovsdb_jsonrpc_session *s, struct ovsdb *db) { struct ovsdb_jsonrpc_trigger *t, *next; HMAP_FOR_EACH_SAFE (t, next, hmap_node, &s->triggers) { if (!db || t->trigger.db == db) { ovsdb_jsonrpc_trigger_complete(t); } } } /* Database 'db' is about to be removed from the database server. To prepare, * this function removes all references from triggers in 's' to 'db'. */ static void ovsdb_jsonrpc_trigger_preremove_db(struct ovsdb_jsonrpc_session *s, struct ovsdb *db) { ovs_assert(db); ovsdb_jsonrpc_trigger_remove__(s, db); } /* Removes all triggers from 's'. */ static void ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *s) { ovsdb_jsonrpc_trigger_remove__(s, NULL); } static void ovsdb_jsonrpc_trigger_complete_done(struct ovsdb_jsonrpc_session *s) { struct ovsdb_jsonrpc_trigger *trigger, *next; LIST_FOR_EACH_SAFE (trigger, next, trigger.node, &s->up.completions) { ovsdb_jsonrpc_trigger_complete(trigger); } } /* Jsonrpc front end monitor. */ struct ovsdb_jsonrpc_monitor { struct hmap_node node; /* In ovsdb_jsonrpc_session's "monitors". */ struct ovsdb_jsonrpc_session *session; struct ovsdb *db; struct json *monitor_id; struct ovsdb_monitor *dbmon; struct ovsdb_monitor_change_set *change_set; enum ovsdb_monitor_version version; struct ovsdb_monitor_session_condition *condition;/* Session's condition */ }; static struct ovsdb_jsonrpc_monitor * ovsdb_jsonrpc_monitor_find(struct ovsdb_jsonrpc_session *s, const struct json *monitor_id) { struct ovsdb_jsonrpc_monitor *m; HMAP_FOR_EACH_WITH_HASH (m, node, json_hash(monitor_id, 0), &s->monitors) { if (json_equal(m->monitor_id, monitor_id)) { return m; } } return NULL; } static bool parse_bool(struct ovsdb_parser *parser, const char *name, bool default_value) { const struct json *json; json = ovsdb_parser_member(parser, name, OP_BOOLEAN | OP_OPTIONAL); return json ? json_boolean(json) : default_value; } static struct ovsdb_error * OVS_WARN_UNUSED_RESULT ovsdb_jsonrpc_parse_monitor_request( struct ovsdb_monitor *dbmon, const struct ovsdb_table *table, struct ovsdb_monitor_session_condition *cond, const struct json *monitor_request) { const struct ovsdb_table_schema *ts = table->schema; enum ovsdb_monitor_selection select; const struct json *columns, *select_json, *where = NULL; struct ovsdb_parser parser; struct ovsdb_error *error; ovsdb_parser_init(&parser, monitor_request, "table %s", ts->name); if (cond) { where = ovsdb_parser_member(&parser, "where", OP_ARRAY | OP_OPTIONAL); } columns = ovsdb_parser_member(&parser, "columns", OP_ARRAY | OP_OPTIONAL); select_json = ovsdb_parser_member(&parser, "select", OP_OBJECT | OP_OPTIONAL); error = ovsdb_parser_finish(&parser); if (error) { return error; } if (select_json) { select = 0; ovsdb_parser_init(&parser, select_json, "table %s select", ts->name); if (parse_bool(&parser, "initial", true)) { select |= OJMS_INITIAL; } if (parse_bool(&parser, "insert", true)) { select |= OJMS_INSERT; } if (parse_bool(&parser, "delete", true)) { select |= OJMS_DELETE; } if (parse_bool(&parser, "modify", true)) { select |= OJMS_MODIFY; } error = ovsdb_parser_finish(&parser); if (error) { return error; } } else { select = OJMS_INITIAL | OJMS_INSERT | OJMS_DELETE | OJMS_MODIFY; } ovsdb_monitor_table_add_select(dbmon, table, select); if (columns) { size_t i; if (columns->type != JSON_ARRAY) { return ovsdb_syntax_error(columns, NULL, "array of column names expected"); } for (i = 0; i < columns->array.n; i++) { const struct ovsdb_column *column; const char *s; if (columns->array.elems[i]->type != JSON_STRING) { return ovsdb_syntax_error(columns, NULL, "array of column names expected"); } s = columns->array.elems[i]->string; column = shash_find_data(&table->schema->columns, s); if (!column) { return ovsdb_syntax_error(columns, NULL, "%s is not a valid " "column name", s); } if (ovsdb_monitor_add_column(dbmon, table, column, select, true)) { return ovsdb_syntax_error(columns, NULL, "column %s " "mentioned more than once", column->name); } } } else { struct shash_node *node; SHASH_FOR_EACH (node, &ts->columns) { const struct ovsdb_column *column = node->data; if (column->index != OVSDB_COL_UUID) { if (ovsdb_monitor_add_column(dbmon, table, column, select, true)) { return ovsdb_syntax_error(columns, NULL, "column %s " "mentioned more than once", column->name); } } } } if (cond) { error = ovsdb_monitor_table_condition_create(cond, table, where); if (error) { return error; } } return NULL; } static struct jsonrpc_msg * ovsdb_jsonrpc_monitor_create(struct ovsdb_jsonrpc_session *s, struct ovsdb *db, struct json *params, enum ovsdb_monitor_version version, const struct json *request_id) { struct ovsdb_jsonrpc_monitor *m = NULL; struct ovsdb_monitor *dbmon = NULL; struct json *monitor_id, *monitor_requests; struct ovsdb_error *error = NULL; struct shash_node *node; struct json *json; if ((version == OVSDB_MONITOR_V2 && json_array(params)->n != 3) || (version == OVSDB_MONITOR_V3 && json_array(params)->n != 4)) { error = ovsdb_syntax_error(params, NULL, "invalid parameters"); goto error; } monitor_id = params->array.elems[1]; monitor_requests = params->array.elems[2]; if (monitor_requests->type != JSON_OBJECT) { error = ovsdb_syntax_error(monitor_requests, NULL, "monitor-requests must be object"); goto error; } if (ovsdb_jsonrpc_monitor_find(s, monitor_id)) { error = ovsdb_syntax_error(monitor_id, NULL, "duplicate monitor ID"); goto error; } m = xzalloc(sizeof *m); m->session = s; m->db = db; m->dbmon = ovsdb_monitor_create(db, m); if (version == OVSDB_MONITOR_V2 || version == OVSDB_MONITOR_V3) { m->condition = ovsdb_monitor_session_condition_create(); } m->version = version; hmap_insert(&s->monitors, &m->node, json_hash(monitor_id, 0)); m->monitor_id = json_clone(monitor_id); SHASH_FOR_EACH (node, json_object(monitor_requests)) { const struct ovsdb_table *table; const struct json *mr_value; size_t i; table = ovsdb_get_table(m->db, node->name); if (!table) { error = ovsdb_syntax_error(NULL, NULL, "no table named %s", node->name); goto error; } ovsdb_monitor_add_table(m->dbmon, table); /* Parse columns. */ mr_value = node->data; if (mr_value->type == JSON_ARRAY) { const struct json_array *array = &mr_value->array; for (i = 0; i < array->n; i++) { error = ovsdb_jsonrpc_parse_monitor_request(m->dbmon, table, m->condition, array->elems[i]); if (error) { goto error; } } } else { error = ovsdb_jsonrpc_parse_monitor_request(m->dbmon, table, m->condition, mr_value); if (error) { goto error; } } } dbmon = ovsdb_monitor_add(m->dbmon); if (dbmon != m->dbmon) { /* Found an exisiting dbmon, reuse the current one. */ ovsdb_monitor_remove_jsonrpc_monitor(m->dbmon, m, NULL); ovsdb_monitor_add_jsonrpc_monitor(dbmon, m); m->dbmon = dbmon; } /* Only now we can bind session's condition to ovsdb_monitor */ if (m->condition) { ovsdb_monitor_condition_bind(m->dbmon, m->condition); } bool initial = false; if (version == OVSDB_MONITOR_V3) { struct json *last_id = params->array.elems[3]; if (last_id->type != JSON_STRING) { error = ovsdb_syntax_error(last_id, NULL, "last-txn-id must be string"); goto error; } struct uuid txn_uuid; if (!uuid_from_string(&txn_uuid, last_id->string)) { error = ovsdb_syntax_error(last_id, NULL, "last-txn-id must be UUID format."); goto error; } if (!uuid_is_zero(&txn_uuid)) { ovsdb_monitor_get_changes_after(&txn_uuid, m->dbmon, &m->change_set); } } if (!m->change_set) { ovsdb_monitor_get_initial(m->dbmon, &m->change_set); initial = true; } json = ovsdb_jsonrpc_monitor_compose_update(m, initial); json = json ? json : json_object_create(); if (m->version == OVSDB_MONITOR_V3) { struct json *json_last_id = json_string_create_nocopy( xasprintf(UUID_FMT, UUID_ARGS(ovsdb_monitor_get_last_txnid( m->dbmon)))); struct json *json_found = json_boolean_create(!initial); json = json_array_create_3(json_found, json_last_id, json); } return jsonrpc_create_reply(json, request_id); error: if (m) { ovsdb_jsonrpc_monitor_destroy(m, false); } return jsonrpc_create_error(ovsdb_error_to_json_free(error), request_id); } static struct ovsdb_error * ovsdb_jsonrpc_parse_monitor_cond_change_request( struct ovsdb_jsonrpc_monitor *m, const struct ovsdb_table *table, const struct json *cond_change_req) { const struct ovsdb_table_schema *ts = table->schema; const struct json *condition, *columns; struct ovsdb_parser parser; struct ovsdb_error *error; ovsdb_parser_init(&parser, cond_change_req, "table %s", ts->name); columns = ovsdb_parser_member(&parser, "columns", OP_ARRAY | OP_OPTIONAL); condition = ovsdb_parser_member(&parser, "where", OP_ARRAY | OP_OPTIONAL); error = ovsdb_parser_finish(&parser); if (error) { return error; } if (columns) { error = ovsdb_syntax_error(cond_change_req, NULL, "changing columns " "is unsupported"); return error; } error = ovsdb_monitor_table_condition_update(m->dbmon, m->condition, table, condition); return error; } static struct jsonrpc_msg * ovsdb_jsonrpc_monitor_cond_change(struct ovsdb_jsonrpc_session *s, struct json *params, const struct json *request_id) { struct ovsdb_error *error; struct ovsdb_jsonrpc_monitor *m; struct json *monitor_cond_change_reqs; struct shash_node *node; if (json_array(params)->n != 3) { error = ovsdb_syntax_error(params, NULL, "invalid parameters"); goto error; } m = ovsdb_jsonrpc_monitor_find(s, params->array.elems[0]); if (!m) { error = ovsdb_syntax_error(params->array.elems[0], NULL, "unknown monitor session"); goto error; } const struct json *new_monitor_id = params->array.elems[1]; bool changing_id = !json_equal(m->monitor_id, new_monitor_id); if (changing_id && ovsdb_jsonrpc_monitor_find(s, new_monitor_id)) { error = ovsdb_syntax_error(new_monitor_id, NULL, "duplicate monitor ID"); goto error; } monitor_cond_change_reqs = params->array.elems[2]; if (monitor_cond_change_reqs->type != JSON_OBJECT) { error = ovsdb_syntax_error(NULL, NULL, "monitor-cond-change-requests must be object"); goto error; } SHASH_FOR_EACH (node, json_object(monitor_cond_change_reqs)) { const struct ovsdb_table *table; const struct json *mr_value; size_t i; table = ovsdb_get_table(m->db, node->name); if (!table) { error = ovsdb_syntax_error(NULL, NULL, "no table named %s", node->name); goto error; } if (!ovsdb_monitor_table_exists(m->dbmon, table)) { error = ovsdb_syntax_error(NULL, NULL, "no table named %s in monitor session", node->name); goto error; } mr_value = node->data; if (mr_value->type == JSON_ARRAY) { const struct json_array *array = &mr_value->array; for (i = 0; i < array->n; i++) { error = ovsdb_jsonrpc_parse_monitor_cond_change_request( m, table, array->elems[i]); if (error) { goto error; } } } else { error = ovsdb_syntax_error( NULL, NULL, "table %s no monitor-cond-change JSON array", node->name); goto error; } } if (changing_id) { hmap_remove(&s->monitors, &m->node); json_destroy(m->monitor_id); m->monitor_id = json_clone(new_monitor_id); hmap_insert(&s->monitors, &m->node, json_hash(m->monitor_id, 0)); } /* Send the new update, if any, represents the difference from the old * condition and the new one. */ struct json *update_json; update_json = ovsdb_monitor_get_update(m->dbmon, false, true, m->condition, m->version, &m->change_set); if (update_json) { struct jsonrpc_msg *msg; struct json *p; if (m->version == OVSDB_MONITOR_V3) { struct json *json_last_id = json_string_create_nocopy( xasprintf(UUID_FMT, UUID_ARGS(ovsdb_monitor_get_last_txnid( m->dbmon)))); p = json_array_create_3(json_clone(m->monitor_id), json_last_id, update_json); } else { p = json_array_create_2(json_clone(m->monitor_id), update_json); } msg = ovsdb_jsonrpc_create_notify(m, p); jsonrpc_session_send(s->js, msg); } return jsonrpc_create_reply(json_object_create(), request_id); error: return jsonrpc_create_error(ovsdb_error_to_json_free(error), request_id); } static struct jsonrpc_msg * ovsdb_jsonrpc_monitor_cancel(struct ovsdb_jsonrpc_session *s, struct json_array *params, const struct json *request_id) { if (params->n != 1) { return jsonrpc_create_error(json_string_create("invalid parameters"), request_id); } else { struct ovsdb_jsonrpc_monitor *m; m = ovsdb_jsonrpc_monitor_find(s, params->elems[0]); if (!m) { return jsonrpc_create_error(json_string_create("unknown monitor"), request_id); } else { ovsdb_jsonrpc_monitor_destroy(m, false); return jsonrpc_create_reply(json_object_create(), request_id); } } } /* Database 'db' is about to be removed from the database server. To prepare, * this function removes all references from monitors in 's' to 'db'. */ static void ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s, struct ovsdb *db) { ovs_assert(db); struct ovsdb_jsonrpc_monitor *m, *next; HMAP_FOR_EACH_SAFE (m, next, node, &s->monitors) { if (m->db == db) { ovsdb_jsonrpc_monitor_destroy(m, true); } } } /* Cancels all monitors in 's'. */ static void ovsdb_jsonrpc_monitor_remove_all(struct ovsdb_jsonrpc_session *s) { struct ovsdb_jsonrpc_monitor *m, *next; HMAP_FOR_EACH_SAFE (m, next, node, &s->monitors) { ovsdb_jsonrpc_monitor_destroy(m, false); } } static struct json * ovsdb_jsonrpc_monitor_compose_update(struct ovsdb_jsonrpc_monitor *m, bool initial) { if (!ovsdb_monitor_needs_flush(m->dbmon, m->change_set)) { return NULL; } return ovsdb_monitor_get_update(m->dbmon, initial, false, m->condition, m->version, &m->change_set); } static bool ovsdb_jsonrpc_monitor_needs_flush(struct ovsdb_jsonrpc_session *s) { struct ovsdb_jsonrpc_monitor *m; HMAP_FOR_EACH (m, node, &s->monitors) { if (ovsdb_monitor_needs_flush(m->dbmon, m->change_set)) { return true; } } return false; } void ovsdb_jsonrpc_monitor_destroy(struct ovsdb_jsonrpc_monitor *m, bool notify_cancellation) { if (notify_cancellation) { struct ovsdb_jsonrpc_session *s = m->session; if (jsonrpc_session_is_connected(s->js) && s->db_change_aware) { struct jsonrpc_msg *notify = jsonrpc_create_notify( "monitor_canceled", json_array_create_1(json_clone(m->monitor_id))); ovsdb_jsonrpc_session_send(s, notify); } } json_destroy(m->monitor_id); hmap_remove(&m->session->monitors, &m->node); ovsdb_monitor_remove_jsonrpc_monitor(m->dbmon, m, m->change_set); ovsdb_monitor_session_condition_destroy(m->condition); free(m); } static struct jsonrpc_msg * ovsdb_jsonrpc_create_notify(const struct ovsdb_jsonrpc_monitor *m, struct json *params) { const char *method; switch(m->version) { case OVSDB_MONITOR_V1: method = "update"; break; case OVSDB_MONITOR_V2: method = "update2"; break; case OVSDB_MONITOR_V3: method = "update3"; break; case OVSDB_MONITOR_VERSION_MAX: default: OVS_NOT_REACHED(); } return jsonrpc_create_notify(method, params); } const struct uuid * ovsdb_jsonrpc_server_get_uuid(const struct ovsdb_jsonrpc_server *s) { return &s->up.uuid; } static void ovsdb_jsonrpc_monitor_flush_all(struct ovsdb_jsonrpc_session *s) { struct ovsdb_jsonrpc_monitor *m; HMAP_FOR_EACH (m, node, &s->monitors) { struct json *json; json = ovsdb_jsonrpc_monitor_compose_update(m, false); if (json) { struct jsonrpc_msg *msg; struct json *params; if (m->version == OVSDB_MONITOR_V3) { struct json *json_last_id = json_string_create_nocopy( xasprintf(UUID_FMT, UUID_ARGS(ovsdb_monitor_get_last_txnid( m->dbmon)))); params = json_array_create_3(json_clone(m->monitor_id), json_last_id, json); } else { params = json_array_create_2(json_clone(m->monitor_id), json); } msg = ovsdb_jsonrpc_create_notify(m, params); jsonrpc_session_send(s->js, msg); } } } void ovsdb_jsonrpc_disable_monitor_cond(void) { /* Once disabled, it is not possible to re-enable it. */ monitor_cond_enable__ = false; }
950927.c
/* * File: ms_task.c * Author: Mingqiang Zhuang * * Created on February 10, 2009 * * (c) Copyright 2009, Schooner Information Technology, Inc. * http://www.schoonerinfotech.com/ * */ #include "config.h" #include <inttypes.h> #if TIME_WITH_SYS_TIME # include <sys/time.h> # include <time.h> #else # if HAVE_SYS_TIME_H # include <sys/time.h> # else # include <time.h> # endif #endif #include "ms_thread.h" #include "ms_setting.h" #include "ms_atomic.h" /* command distribution adjustment cycle */ #define CMD_DISTR_ADJUST_CYCLE 1000 #define DISADJUST_FACTOR 0.03 /** * In one adjustment cycle, if undo set or get * operations proportion is more than 3% , means * there are too many new item or need more new * item in the window. This factor shows it. */ /* get item from task window */ static ms_task_item_t *ms_get_cur_opt_item(ms_conn_t *c); static ms_task_item_t *ms_get_next_get_item(ms_conn_t *c); static ms_task_item_t *ms_get_next_set_item(ms_conn_t *c); static ms_task_item_t *ms_get_random_overwrite_item(ms_conn_t *c); /* select next operation to do */ static void ms_select_opt(ms_conn_t *c, ms_task_t *task); /* set and get speed estimate for controlling and adjustment */ static bool ms_is_set_too_fast(ms_task_t *task); static bool ms_is_get_too_fast(ms_task_t *task); static void ms_kick_out_item(ms_task_item_t *item); /* miss rate adjustment */ static bool ms_need_overwrite_item(ms_task_t *task); static bool ms_adjust_opt(ms_conn_t *c, ms_task_t *task); /* deal with data verification initialization */ static void ms_task_data_verify_init(ms_task_t *task); static void ms_task_expire_verify_init(ms_task_t *task); /* select a new task to do */ static ms_task_t *ms_get_task(ms_conn_t *c, bool warmup); /* run the selected task */ static void ms_update_set_result(ms_conn_t *c, ms_task_item_t *item); static void ms_update_stat_result(ms_conn_t *c); static void ms_update_multi_get_result(ms_conn_t *c); static void ms_update_single_get_result(ms_conn_t *c, ms_task_item_t *item); static void ms_update_task_result(ms_conn_t *c); static void ms_single_getset_task_sch(ms_conn_t *c); static void ms_multi_getset_task_sch(ms_conn_t *c); static void ms_send_signal(ms_sync_lock_t *sync_lock); static void ms_warmup_server(ms_conn_t *c); static int ms_run_getset_task(ms_conn_t *c); /** * used to get the current operation item(object) * * @param c, pointer of the concurrency * * @return ms_task_item_t*, current operating item */ static ms_task_item_t *ms_get_cur_opt_item(ms_conn_t *c) { return c->curr_task.item; } /** * used to get the next item to do get operation * * @param c, pointer of the concurrency * * @return ms_task_item_t*, the pointer of the next item to do * get operation */ static ms_task_item_t *ms_get_next_get_item(ms_conn_t *c) { ms_task_item_t *item= NULL; if (c->set_cursor <= 0) { /* the first item in the window */ item= &c->item_win[0]; } else if (c->set_cursor > 0 && c->set_cursor < (uint32_t)c->win_size) { /* random get one item set before */ item= &c->item_win[random() % (int64_t)c->set_cursor]; } else { /* random get one item from the window */ item= &c->item_win[random() % c->win_size]; } return item; } /* ms_get_next_get_item */ /** * used to get the next item to do set operation * * @param c, pointer of the concurrency * * @return ms_task_item_t*, the pointer of the next item to do * set operation */ static ms_task_item_t *ms_get_next_set_item(ms_conn_t *c) { /** * when a set command successes, the cursor will plus 1. If set * fails, the cursor doesn't change. it isn't necessary to * increase the cursor here. */ return &c->item_win[(int64_t)c->set_cursor % c->win_size]; } /** * If we need do overwrite, we could select a item set before. * This function is used to get a item set before to do * overwrite. * * @param c, pointer of the concurrency * * @return ms_task_item_t*, the pointer of the previous item of * set operation */ static ms_task_item_t *ms_get_random_overwrite_item(ms_conn_t *c) { return ms_get_next_get_item(c); } /* ms_get_random_overwrite_item */ /** * According to the proportion of operations(get or set), select * an operation to do. * * @param c, pointer of the concurrency * @param task, pointer of current task in the concurrency */ static void ms_select_opt(ms_conn_t *c, ms_task_t *task) { double get_prop= ms_setting.cmd_distr[CMD_GET].cmd_prop; double set_prop= ms_setting.cmd_distr[CMD_SET].cmd_prop; /* update cycle operation number if necessary */ if ((task->cycle_undo_get == 0) || (task->cycle_undo_set == 0)) { task->cycle_undo_get+= (int)(CMD_DISTR_ADJUST_CYCLE * get_prop); task->cycle_undo_set+= (int)(CMD_DISTR_ADJUST_CYCLE * set_prop); } /** * According to operation distribution to choose doing which * operation. If it can't set new object to sever, just change * to do get operation. */ if ((set_prop > PROP_ERROR) && ((double)task->get_opt * set_prop >= (double)task->set_opt * get_prop)) { task->cmd= CMD_SET; task->item= ms_get_next_set_item(c); } else { task->cmd= CMD_GET; task->item= ms_get_next_get_item(c); } } /* ms_select_opt */ /** * used to judge whether the number of get operations done is * more than expected number of get operations to do right now. * * @param task, pointer of current task in the concurrency * * @return bool, if get too fast, return true, else return false */ static bool ms_is_get_too_fast(ms_task_t *task) { double get_prop= ms_setting.cmd_distr[CMD_GET].cmd_prop; double set_prop= ms_setting.cmd_distr[CMD_SET].cmd_prop; /* no get operation */ if (get_prop < PROP_ERROR) { return false; } int max_undo_set= (int)(set_prop / get_prop * (1.0 + DISADJUST_FACTOR)) * task->cycle_undo_get; if (((double)task->get_opt * set_prop > (double)task->set_opt * get_prop) && (task->cycle_undo_set > max_undo_set)) { return true; } return false; } /* ms_is_get_too_fast */ /** * used to judge whether the number of set operations done is * more than expected number of set operations to do right now. * * @param task, pointer of current task in the concurrency * * @return bool, if set too fast, return true, else return false */ static bool ms_is_set_too_fast(ms_task_t *task) { double get_prop= ms_setting.cmd_distr[CMD_GET].cmd_prop; double set_prop= ms_setting.cmd_distr[CMD_SET].cmd_prop; /* no set operation */ if (set_prop < PROP_ERROR) { return false; } /* If it does set operation too fast, skip some */ int max_undo_get= (int)((get_prop / set_prop * (1.0 + DISADJUST_FACTOR)) * (double)task->cycle_undo_set); if (((double)task->get_opt * set_prop < (double)task->set_opt * get_prop) && (task->cycle_undo_get > max_undo_get)) { return true; } return false; } /* ms_is_set_too_fast */ /** * kick out the old item in the window, and add a new item to * overwrite the old item. When we don't want to do overwrite * object, and the current item to do set operation is an old * item, we could kick out the old item and add a new item. Then * we can ensure we set new object every time. * * @param item, pointer of task item which includes the object * information */ static void ms_kick_out_item(ms_task_item_t *item) { /* allocate a new item */ item->key_prefix= ms_get_key_prefix(); item->key_suffix_offset++; item->value_offset= INVALID_OFFSET; /* new item use invalid value offset */ item->client_time= 0; } /* ms_kick_out_item */ /** * used to judge whether we need overwrite object based on the * options user specified * * @param task, pointer of current task in the concurrency * * @return bool, if need overwrite, return true, else return * false */ static bool ms_need_overwrite_item(ms_task_t *task) { ms_task_item_t *item= task->item; assert(item != NULL); assert(task->cmd == CMD_SET); /** * according to data overwrite percent to determine if do data * overwrite. */ if (task->overwrite_set < (double)task->set_opt * ms_setting.overwrite_percent) { return true; } return false; } /* ms_need_overwirte_item */ /** * used to adjust operation. the function must be called after * select operation. the function change get operation to set * operation, or set operation to get operation based on the * current case. * * @param c, pointer of the concurrency * @param task, pointer of current task in the concurrency * * @return bool, if success, return true, else return false */ static bool ms_adjust_opt(ms_conn_t *c, ms_task_t *task) { ms_task_item_t *item= task->item; assert(item != NULL); if (task->cmd == CMD_SET) { /* If did set operation too fast, skip some */ if (ms_is_set_too_fast(task)) { /* get the item instead */ if (item->value_offset != INVALID_OFFSET) { task->cmd= CMD_GET; return true; } } /* If the current item is not a new item, kick it out */ if (item->value_offset != INVALID_OFFSET) { if (ms_need_overwrite_item(task)) { /* overwrite */ task->overwrite_set++; } else { /* kick out the current item to do set operation */ ms_kick_out_item(item); } } else /* it's a new item */ { /* need overwrite */ if (ms_need_overwrite_item(task)) { /** * overwrite not use the item with current set cursor, revert * set cursor. */ c->set_cursor--; item= ms_get_random_overwrite_item(c); if (item->value_offset != INVALID_OFFSET) { task->item= item; task->overwrite_set++; } else /* item is a new item */ { /* select the item to run, and cancel overwrite */ task->item= item; } } } task->cmd= CMD_SET; return true; } else { if (item->value_offset == INVALID_OFFSET) { task->cmd= CMD_SET; return true; } /** * If It does get operation too fast, it will change the * operation to set. */ if (ms_is_get_too_fast(task)) { /* don't kick out the first item in the window */ if (! ms_is_set_too_fast(task)) { ms_kick_out_item(item); task->cmd= CMD_SET; return true; } else { return false; } } assert(item->value_offset != INVALID_OFFSET); task->cmd= CMD_GET; return true; } } /* ms_adjust_opt */ /** * used to initialize the task which need verify data. * * @param task, pointer of current task in the concurrency */ static void ms_task_data_verify_init(ms_task_t *task) { ms_task_item_t *item= task->item; assert(item != NULL); assert(task->cmd == CMD_GET); /** * according to data verification percent to determine if do * data verification. */ if (task->verified_get < (double)task->get_opt * ms_setting.verify_percent) { /** * currently it doesn't do verify, just increase the counter, * and do verification next proper get command */ if ((task->item->value_offset != INVALID_OFFSET) && (item->exp_time == 0)) { task->verify= true; task->finish_verify= false; task->verified_get++; } } } /* ms_task_data_verify_init */ /** * used to initialize the task which need verify expire time. * * @param task, pointer of current task in the concurrency */ static void ms_task_expire_verify_init(ms_task_t *task) { ms_task_item_t *item= task->item; assert(item != NULL); assert(task->cmd == CMD_GET); assert(item->exp_time > 0); task->verify= true; task->finish_verify= false; } /* ms_task_expire_verify_init */ /** * used to get one task, the function initializes the task * structure. * * @param c, pointer of the concurrency * @param warmup, whether it need warmup * * @return ms_task_t*, pointer of current task in the * concurrency */ static ms_task_t *ms_get_task(ms_conn_t *c, bool warmup) { ms_task_t *task= &c->curr_task; while (1) { task->verify= false; task->finish_verify= true; task->get_miss= true; if (warmup) { task->cmd= CMD_SET; task->item= ms_get_next_set_item(c); return task; } /* according to operation distribution to choose doing which operation */ ms_select_opt(c, task); if (! ms_adjust_opt(c, task)) { continue; } if ((ms_setting.verify_percent > 0) && (task->cmd == CMD_GET)) { ms_task_data_verify_init(task); } if ((ms_setting.exp_ver_per > 0) && (task->cmd == CMD_GET) && (task->item->exp_time > 0)) { ms_task_expire_verify_init(task); } break; } /** * Only update get and delete counter, set counter will be * updated after set operation successes. */ if (task->cmd == CMD_GET) { task->get_opt++; task->cycle_undo_get--; } return task; } /* ms_get_task */ /** * send a signal to the main monitor thread * * @param sync_lock, pointer of the lock */ static void ms_send_signal(ms_sync_lock_t *sync_lock) { pthread_mutex_lock(&sync_lock->lock); sync_lock->count++; pthread_cond_signal(&sync_lock->cond); pthread_mutex_unlock(&sync_lock->lock); } /* ms_send_signal */ /** * If user only want to do get operation, but there is no object * in server , so we use this function to warmup the server, and * set some objects to server. It runs at the beginning of task. * * @param c, pointer of the concurrency */ static void ms_warmup_server(ms_conn_t *c) { ms_task_t *task; ms_task_item_t *item; /** * Extra one loop to get the last command returned state. * Normally it gets the previous command returned state. */ if ((c->remain_warmup_num >= 0) && (c->remain_warmup_num != c->warmup_num)) { item= ms_get_cur_opt_item(c); /* only update the set command result state for data verification */ if ((c->precmd.cmd == CMD_SET) && (c->precmd.retstat == MCD_STORED)) { item->value_offset= item->key_suffix_offset; /* set success, update counter */ c->set_cursor++; } else if (c->precmd.cmd == CMD_SET && c->precmd.retstat != MCD_STORED) { printf("key: %" PRIx64 " didn't set success\n", item->key_prefix); } } /* the last time don't run a task */ if (c->remain_warmup_num-- > 0) { /* operate next task item */ task= ms_get_task(c, true); item= task->item; ms_mcd_set(c, item); } /** * finish warming up server, wait all connects initialize * complete. Then all connects can start do task at the same * time. */ if (c->remain_warmup_num == -1) { ms_send_signal(&ms_global.warmup_lock); c->remain_warmup_num--; /* never run the if branch */ } } /* ms_warmup_server */ /** * dispatch single get and set task * * @param c, pointer of the concurrency */ static void ms_single_getset_task_sch(ms_conn_t *c) { ms_task_t *task; ms_task_item_t *item; /* the last time don't run a task */ if (c->remain_exec_num-- > 0) { task= ms_get_task(c, false); item= task->item; if (task->cmd == CMD_SET) { ms_mcd_set(c, item); } else if (task->cmd == CMD_GET) { assert(task->cmd == CMD_GET); ms_mcd_get(c, item); } } } /* ms_single_getset_task_sch */ /** * dispatch multi-get and set task * * @param c, pointer of the concurrency */ static void ms_multi_getset_task_sch(ms_conn_t *c) { ms_task_t *task; ms_mlget_task_item_t *mlget_item; while (1) { if (c->remain_exec_num-- > 0) { task= ms_get_task(c, false); if (task->cmd == CMD_SET) /* just do it */ { ms_mcd_set(c, task->item); break; } else { assert(task->cmd == CMD_GET); mlget_item= &c->mlget_task.mlget_item[c->mlget_task.mlget_num]; mlget_item->item= task->item; mlget_item->verify= task->verify; mlget_item->finish_verify= task->finish_verify; mlget_item->get_miss= task->get_miss; c->mlget_task.mlget_num++; /* enough multi-get task items can be done */ if ((c->mlget_task.mlget_num >= ms_setting.mult_key_num) || ((c->remain_exec_num == 0) && (c->mlget_task.mlget_num > 0))) { ms_mcd_mlget(c); break; } } } else { if ((c->remain_exec_num <= 0) && (c->mlget_task.mlget_num > 0)) { ms_mcd_mlget(c); } break; } } } /* ms_multi_getset_task_sch */ /** * calculate the difference value of two time points * * @param start_time, the start time * @param end_time, the end time * * @return uint64_t, the difference value between start_time and end_time in us */ int64_t ms_time_diff(struct timeval *start_time, struct timeval *end_time) { int64_t endtime= end_time->tv_sec * 1000000 + end_time->tv_usec; int64_t starttime= start_time->tv_sec * 1000000 + start_time->tv_usec; assert(endtime >= starttime); return endtime - starttime; } /* ms_time_diff */ /** * after get the response from server for multi-get, the * function update the state of the task and do data verify if * necessary. * * @param c, pointer of the concurrency */ static void ms_update_multi_get_result(ms_conn_t *c) { ms_mlget_task_item_t *mlget_item; ms_task_item_t *item; char *orignval= NULL; char *orignkey= NULL; if (c == NULL) { return; } assert(c != NULL); for (int i= 0; i < c->mlget_task.mlget_num; i++) { mlget_item= &c->mlget_task.mlget_item[i]; item= mlget_item->item; orignval= &ms_setting.char_block[item->value_offset]; orignkey= &ms_setting.char_block[item->key_suffix_offset]; /* update get miss counter */ if (mlget_item->get_miss) { atomic_add_size(&ms_stats.get_misses, 1); } /* get nothing from server for this task item */ if (mlget_item->verify && ! mlget_item->finish_verify) { /* verify expire time if necessary */ if (item->exp_time > 0) { struct timeval curr_time; gettimeofday(&curr_time, NULL); /* object doesn't expire but can't get it now */ if (curr_time.tv_sec - item->client_time < item->exp_time - EXPIRE_TIME_ERROR) { atomic_add_size(&ms_stats.unexp_unget, 1); if (ms_setting.verbose) { char set_time[64]; char cur_time[64]; strftime(set_time, 64, "%Y-%m-%d %H:%M:%S", localtime(&item->client_time)); strftime(cur_time, 64, "%Y-%m-%d %H:%M:%S", localtime(&curr_time.tv_sec)); fprintf(stderr, "\n\t<%d expire time verification failed, object " "doesn't expire but can't get it now\n" "\tkey len: %d\n" "\tkey: %" PRIx64 " %.*s\n" "\tset time: %s current time: %s " "diff time: %d expire time: %d\n" "\texpected data len: %d\n" "\texpected data: %.*s\n" "\treceived data: \n", c->sfd, item->key_size, item->key_prefix, item->key_size - (int)KEY_PREFIX_SIZE, orignkey, set_time, cur_time, (int)(curr_time.tv_sec - item->client_time), item->exp_time, item->value_size, item->value_size, orignval); fflush(stderr); } } } else { atomic_add_size(&ms_stats.vef_miss, 1); if (ms_setting.verbose) { fprintf(stderr, "\n<%d data verification failed\n" "\tkey len: %d\n" "\tkey: %" PRIx64 " %.*s\n" "\texpected data len: %d\n" "\texpected data: %.*s\n" "\treceived data: \n", c->sfd, item->key_size, item->key_prefix, item->key_size - (int)KEY_PREFIX_SIZE, orignkey, item->value_size, item->value_size, orignval); fflush(stderr); } } } } c->mlget_task.mlget_num= 0; c->mlget_task.value_index= INVALID_OFFSET; } /* ms_update_multi_get_result */ /** * after get the response from server for single get, the * function update the state of the task and do data verify if * necessary. * * @param c, pointer of the concurrency * @param item, pointer of task item which includes the object * information */ static void ms_update_single_get_result(ms_conn_t *c, ms_task_item_t *item) { char *orignval= NULL; char *orignkey= NULL; if ((c == NULL) || (item == NULL)) { return; } assert(c != NULL); assert(item != NULL); orignval= &ms_setting.char_block[item->value_offset]; orignkey= &ms_setting.char_block[item->key_suffix_offset]; /* update get miss counter */ if ((c->precmd.cmd == CMD_GET) && c->curr_task.get_miss) { atomic_add_size(&ms_stats.get_misses, 1); } /* get nothing from server for this task item */ if ((c->precmd.cmd == CMD_GET) && c->curr_task.verify && ! c->curr_task.finish_verify) { /* verify expire time if necessary */ if (item->exp_time > 0) { struct timeval curr_time; gettimeofday(&curr_time, NULL); /* object doesn't expire but can't get it now */ if (curr_time.tv_sec - item->client_time < item->exp_time - EXPIRE_TIME_ERROR) { atomic_add_size(&ms_stats.unexp_unget, 1); if (ms_setting.verbose) { char set_time[64]; char cur_time[64]; strftime(set_time, 64, "%Y-%m-%d %H:%M:%S", localtime(&item->client_time)); strftime(cur_time, 64, "%Y-%m-%d %H:%M:%S", localtime(&curr_time.tv_sec)); fprintf(stderr, "\n\t<%d expire time verification failed, object " "doesn't expire but can't get it now\n" "\tkey len: %d\n" "\tkey: %" PRIx64 " %.*s\n" "\tset time: %s current time: %s " "diff time: %d expire time: %d\n" "\texpected data len: %d\n" "\texpected data: %.*s\n" "\treceived data: \n", c->sfd, item->key_size, item->key_prefix, item->key_size - (int)KEY_PREFIX_SIZE, orignkey, set_time, cur_time, (int)(curr_time.tv_sec - item->client_time), item->exp_time, item->value_size, item->value_size, orignval); fflush(stderr); } } } else { atomic_add_size(&ms_stats.vef_miss, 1); if (ms_setting.verbose) { fprintf(stderr, "\n<%d data verification failed\n" "\tkey len: %d\n" "\tkey: %" PRIx64 " %.*s\n" "\texpected data len: %d\n" "\texpected data: %.*s\n" "\treceived data: \n", c->sfd, item->key_size, item->key_prefix, item->key_size - (int)KEY_PREFIX_SIZE, orignkey, item->value_size, item->value_size, orignval); fflush(stderr); } } } } /* ms_update_single_get_result */ /** * after get the response from server for set the function * update the state of the task and do data verify if necessary. * * @param c, pointer of the concurrency * @param item, pointer of task item which includes the object * information */ static void ms_update_set_result(ms_conn_t *c, ms_task_item_t *item) { if ((c == NULL) || (item == NULL)) { return; } assert(c != NULL); assert(item != NULL); if (c->precmd.cmd == CMD_SET) { switch (c->precmd.retstat) { case MCD_STORED: if (item->value_offset == INVALID_OFFSET) { /* first set with the same offset of key suffix */ item->value_offset= item->key_suffix_offset; } else { /* not first set, just increase the value offset */ item->value_offset+= 1; } /* set successes, update counter */ c->set_cursor++; c->curr_task.set_opt++; c->curr_task.cycle_undo_set--; break; case MCD_SERVER_ERROR: default: break; } /* switch */ } } /* ms_update_set_result */ /** * update the response time result * * @param c, pointer of the concurrency */ static void ms_update_stat_result(ms_conn_t *c) { bool get_miss= false; if (c == NULL) { return; } assert(c != NULL); gettimeofday(&c->end_time, NULL); uint64_t time_diff= (uint64_t)ms_time_diff(&c->start_time, &c->end_time); pthread_mutex_lock(&ms_statistic.stat_mutex); switch (c->precmd.cmd) { case CMD_SET: ms_record_event(&ms_statistic.set_stat, time_diff, false); break; case CMD_GET: if (c->curr_task.get_miss) { get_miss= true; } ms_record_event(&ms_statistic.get_stat, time_diff, get_miss); break; default: break; } /* switch */ ms_record_event(&ms_statistic.total_stat, time_diff, get_miss); pthread_mutex_unlock(&ms_statistic.stat_mutex); } /* ms_update_stat_result */ /** * after get response from server for the current operation, and * before doing the next operation, update the state of the * current operation. * * @param c, pointer of the concurrency */ static void ms_update_task_result(ms_conn_t *c) { ms_task_item_t *item; if (c == NULL) { return; } assert(c != NULL); item= ms_get_cur_opt_item(c); if (item == NULL) { return; } assert(item != NULL); ms_update_set_result(c, item); if ((ms_setting.stat_freq > 0) && ((c->precmd.cmd == CMD_SET) || (c->precmd.cmd == CMD_GET))) { ms_update_stat_result(c); } /* update multi-get task item */ if (((ms_setting.mult_key_num > 1) && (c->mlget_task.mlget_num >= ms_setting.mult_key_num)) || ((c->remain_exec_num == 0) && (c->mlget_task.mlget_num > 0))) { ms_update_multi_get_result(c); } else { ms_update_single_get_result(c, item); } } /* ms_update_task_result */ /** * run get and set operation * * @param c, pointer of the concurrency * * @return int, if success, return EXIT_SUCCESS, else return -1 */ static int ms_run_getset_task(ms_conn_t *c) { /** * extra one loop to get the last command return state. get the * last command return state. */ if ((c->remain_exec_num >= 0) && (c->remain_exec_num != c->exec_num)) { ms_update_task_result(c); } /* multi-get */ if (ms_setting.mult_key_num > 1) { /* operate next task item */ ms_multi_getset_task_sch(c); } else { /* operate next task item */ ms_single_getset_task_sch(c); } /* no task to do, exit */ if ((c->remain_exec_num == -1) || ms_global.time_out) { return -1; } return EXIT_SUCCESS; } /* ms_run_getset_task */ /** * the state machine call the function to execute task. * * @param c, pointer of the concurrency * * @return int, if success, return EXIT_SUCCESS, else return -1 */ int ms_exec_task(struct conn *c) { if (! ms_global.finish_warmup) { ms_warmup_server(c); } else { if (ms_run_getset_task(c) != 0) { return -1; } } return EXIT_SUCCESS; } /* ms_exec_task */
365216.c
/* * Copyright 2003, 2004, 2005, 2006 PathScale, Inc. All Rights Reserved. */ /* Copyright (C) 2000, 2001 Silicon Graphics, Inc. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2.1 of the GNU Lesser General Public License as published by the Free Software Foundation. This program is distributed in the hope that it would be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Further, this software is distributed without any warranty that it is free of the rightful claim of any third person regarding infringement or the like. Any license provided herein, whether implied or otherwise, applies only to this software file. Patent licenses, if any, provided herein do not apply to combinations of this program with other software, or any other product whatsoever. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pky, Mountain View, CA 94043, or: http://www.sgi.com For further information regarding this notice, see: http://oss.sgi.com/projects/GenInfo/NoticeExplan */ /* ==================================================================== * ==================================================================== * * Module: atan2.c * $Revision: 1.5 $ * $Date: 04/12/21 14:58:21-08:00 $ * $Author: [email protected] $ * $Source: /home/bos/bk/kpro64-pending/libm/mips/SCCS/s.atan2.c $ * * Revision history: * 09-Jun-93 - Original Version * * Description: source code for atan2 function * * ==================================================================== * ==================================================================== */ static char *rcs_id = "$Source: /home/bos/bk/kpro64-pending/libm/mips/SCCS/s.atan2.c $ $Revision: 1.5 $"; #ifdef _CALL_MATHERR #include <stdio.h> #include <math.h> #include <errno.h> #endif #include "libm.h" #if defined(mips) && !defined(__GNUC__) extern double atan2(double, double); #pragma weak atan2 = __atan2 #endif #if defined(BUILD_OS_DARWIN) /* Mach-O doesn't support aliases */ extern double __atan2(double, double); #pragma weak atan2 double atan2(double y, double x) { return __atan2( y, x ); } #elif defined(__GNUC__) extern double __atan2(double, double); double atan2() __attribute__ ((weak, alias ("__atan2"))); #endif extern const du _atan2res0[2][2]; extern const du _atan2res1[2][2]; extern const du _atan2res2[2][2]; extern const du _atan2res4[4][4]; static const du Qnan = {D(QNANHI, QNANLO)}; static const du twop60 = {D(0x43b00000, 0x00000000)}; static const du twopm28 = {D(0x3e300000, 0x00000000)}; static const du limit1 = {D(0x3fc445f0, 0xfbb1cf92)}; /* tan(pi/20) */ static const du limit2 = {D(0x3fe04e08, 0x50c1dd5c)}; /* tan(3*pi/20) */ static const du rlimit4 = {D(0x3fe04e08, 0x50c1dd5c)}; /* 1/tan(7*pi/20) */ static const du rlimit5 = {D(0x3fc445f0, 0xfbb1cf92)}; /* 1/tan(9*pi/20) */ static const du piby2 = {D(0x3ff921fb, 0x54442d18)}; static const du m_piby2 = {D(0xbff921fb, 0x54442d18)}; /* the angles below have been chosen very carefully to minimize the ulps error of the entries in tantbl */ static const du angletbl[] = { {D(0x00000000, 0x00000000)}, {D(0x80000000, 0x00000000)}, {D(0x400921fb, 0x54442d18)}, {D(0xc00921fb, 0x54442d18)}, {D(0x3fd41b2f, 0x769ddfb2)}, /* ~pi/10 */ {D(0xbfd41b2f, 0x769ddfb2)}, /* ~-pi/10 */ {D(0x40069e95, 0x65707122)}, /* ~pi - pi/10 */ {D(0xc0069e95, 0x65707122)}, /* ~-(pi - pi/10) */ {D(0x3fe41b2f, 0x769cf2b1)}, /* ~2*pi/10 */ {D(0xbfe41b2f, 0x769cf2b1)}, {D(0x40041b2f, 0x769cf06c)}, {D(0xc0041b2f, 0x769cf06c)}, {D(0x3fee28c7, 0x31ec183d)}, /* ~3*pi/10 */ {D(0xbfee28c7, 0x31ec183d)}, {D(0x400197c9, 0x87c92709)}, {D(0xc00197c9, 0x87c92709)}, {D(0x3ff41b2f, 0x769cfe1f)}, /* ~4*pi/10 */ {D(0xbff41b2f, 0x769cfe1f)}, {D(0x3ffe28c7, 0x31eb5c12)}, {D(0xbffe28c7, 0x31eb5c12)}, {D(0x3ff921fb, 0x54442d18)}, /* pi/2 */ {D(0xbff921fb, 0x54442d18)}, {D(0x3ff921fb, 0x54442d18)}, {D(0xbff921fb, 0x54442d18)}, }; /* tangents of the angles in angletbl */ static const du tantbl[] = { {D(0x00000000, 0x00000000)}, {D(0x80000000, 0x00000000)}, {D(0x80000000, 0x00000000)}, {D(0x00000000, 0x00000000)}, {D(0x3fd4cb7b, 0xfb4a69b7)}, {D(0xbfd4cb7b, 0xfb4a69b7)}, {D(0xbfd4cb7b, 0xfb4a69b7)}, {D(0x3fd4cb7b, 0xfb4a69b7)}, {D(0x3fe73fd6, 0x1d9df809)}, {D(0xbfe73fd6, 0x1d9df809)}, {D(0xbfe73fd6, 0x1d9df809)}, {D(0x3fe73fd6, 0x1d9df809)}, {D(0x3ff605a9, 0x0c74a8a0)}, {D(0xbff605a9, 0x0c74a8a0)}, {D(0xbff605a9, 0x0c74a8a0)}, {D(0x3ff605a9, 0x0c74a8a0)}, {D(0x40089f18, 0x8bdd1d09)}, {D(0xc0089f18, 0x8bdd1d09)}, {D(0xc0089f18, 0x8bdd1d09)}, {D(0x40089f18, 0x8bdd1d09)}, }; /* coefficients for a 15th degree polynomial approximation of atan on the interval +/- tan(pi/20) */ static const du P[] = { {D(0x3ff00000, 0x00000000)}, {D(0xbfd55555, 0x55555547)}, {D(0x3fc99999, 0x999924e3)}, {D(0xbfc24924, 0x91a937fe)}, {D(0x3fbc71c6, 0x4c76a27a)}, {D(0xbfb74589, 0x00fd881c)}, {D(0x3fb3a350, 0x167cd5be)}, {D(0xbfaf5682, 0x2746dfc3)}, }; /* ==================================================================== * * FunctionName atan2 * * Description computes arctangent of arg1/arg2 * * ==================================================================== */ double __atan2( y, x ) double y, x; { #ifdef _32BIT_MACHINE int ix, iy; int xptx, xpty, xpts; int signx, signy; int l; #else long long ix, iy; long long xptx, xpty, xpts; long long signx, signy; long long l; #endif int i, j, k; double absx, absy; double tk, zk; double poly; double u, v, s, ss; double result; #ifdef _CALL_MATHERR struct exception exstruct; #endif /* extract exponents of y and x for some quick screening */ #ifdef _32BIT_MACHINE DBLHI2INT(y, iy); /* copy MSW of y to iy */ DBLHI2INT(x, ix); /* copy MSW of x to ix */ #else DBL2LL(y, iy); /* copy y to iy */ DBL2LL(x, ix); /* copy x to ix */ #endif xpty = (iy >> DMANTWIDTH); xpty &= 0x7ff; xptx = (ix >> DMANTWIDTH); xptx &= 0x7ff; signy = (iy >> (DMANTWIDTH + DEXPWIDTH)); signy = (signy & 1); signx = (ix >> (DMANTWIDTH + DEXPWIDTH)); signx = (signx & 1); /* filter out Nans */ if ( (xpty == 0x7ff) || (xptx == 0x7ff) ) { if ( (y != y) || (x != x) ) { /* y or x is a NaN; return a quiet NaN */ #ifdef _CALL_MATHERR exstruct.type = DOMAIN; exstruct.name = "atan2"; exstruct.arg1 = y; exstruct.arg2 = x; exstruct.retval = Qnan.d; if ( matherr( &exstruct ) == 0 ) { fprintf(stderr, "domain error in atan2\n"); SETERRNO(EDOM); } return ( exstruct.retval ); #else NAN_SETERRNO(EDOM); return ( Qnan.d ); #endif } } /* filter out zero arguments */ if ( x == 0.0 ) { if ( y == 0.0 ) { #ifdef _CALL_MATHERR exstruct.type = DOMAIN; exstruct.name = "atan2"; exstruct.arg1 = y; exstruct.arg2 = x; exstruct.retval = _atan2res0[signx][signy].d; if ( matherr( &exstruct ) == 0 ) { fprintf(stderr, "domain error in atan2\n"); SETERRNO(EDOM); } return ( exstruct.retval ); #else SETERRNO(EDOM); return ( _atan2res0[signx][signy].d ); #endif } return ( _atan2res1[signx][signy].d ); } else if ( (xpty == 0) && (y == 0.0) ) { result = _atan2res2[signx][signy].d; return ( result ); } /* a crude check to avoid underflow of x/y */ if ( xpty > xptx + 54 ) return ( (signy == 0) ? piby2.d : m_piby2.d ); /* a crude check for underflow of y/x */ if ( xpty < xptx - 1075 ) return ( _atan2res2[signx][signy].d ); /* Get rid of denorms by scaling; the preceding tests guarantee that no infinities will be generated by the scaling. */ if ( (xpty == 0) || (xptx == 0) ) { y = twop60.d*y; x = twop60.d*x; } /* filter out infinities */ i = (xptx == 0x7ff); j = (xpty == 0x7ff); /* if either x or y is very large, scale them both down to avoid overflow in the computation of s below */ if ( (xpty >= 0x7fc) || (xptx >= 0x7fc) ) { y = 0.25*y; x = 0.25*x; } if ( (i + j) == 0 ) { absx = fabs(x); absy = fabs(y); /* Note that the products in the next several lines will neither underflow nor overflow due to the earlier screening and scaling of arguments. */ if ( absy < absx ) { j = (absy >= absx*limit1.d); k = (absy >= absx*limit2.d); } else { j = (absx <= absy*rlimit4.d); k = (absx <= absy*rlimit5.d); j = j + 3; } k = j + k; k = 4*k + 2*signx + signy; /* compute reduced arg between +/- tan(pi/20) */ if ( k < 4 ) { if ( xpty < (xptx - 1074) ) { /* possible underflow of y/x */ v = x; #ifdef _32BIT_MACHINE DBLHI2INT(v, l); l &= DEXPMASK; l |= (0x3ff << DMANTWIDTH); #else DBL2LL(v, l); l &= DEXPMASK; l |= (0x3ffll << DMANTWIDTH); #endif s = y/v; #ifdef _32BIT_MACHINE DBLHI2INT(s, xpts); /* copy MSW of s to xpts */ #else DBL2LL(s, xpts); /* copy s to xpts */ #endif xpts >>= DMANTWIDTH; xpts &= 0x7ff; if ( (xpts + xptx - DEXPBIAS) < -1075 ) { /* y/x underflows; set s to +/- 0 */ s = 0.0*s; } else { s = y/x; } } else { s = y/x; } } else if ( k > 19 ) { s = -x/y; } else { tk = tantbl[k].d; u = y - tk*x; v = tk*y + x; s = u/v; } zk = angletbl[k].d; /* s = (y - tan(zk)*x)/(tan(zk)*y + x) Note that atan(y/x) is zk + atan(s); this is a standard trigonometric identity: Let tk = tan(zk). Using the formula for the tangent of the difference of two angles, we have tan(atan(y/x) - zk) = (y/x - tk)/(1 + y/x*tk) = s, so atan(y/x) - zk = atan(s). */ if ( fabs(s) < twopm28.d ) return ( s + zk ); ss = s*s; poly = (((((P[7].d*ss + P[6].d)*ss + P[5].d)*ss + P[4].d)*ss + P[3].d)*ss + P[2].d)*ss + P[1].d; result = poly*(ss*s) + s + zk; return ( result ); } else { i = i + i; i = i + signx; j = j + j; j = j + signy; return ( _atan2res4[i][j].d ); } } #if defined(BUILD_OS_DARWIN) /* Mach-O doesn't support aliases */ #pragma weak atan2l long double atan2l( long double y, long double x ) { return ( (long double)__atan2((double)y, (double)x) ); } #elif defined(__GNUC__) extern long double __atan2l(long double, long double); long double atan2l() __attribute__ ((weak, alias ("__atan2l"))); #endif long double __atan2l( y, x ) long double y, x; { return ( (long double)__atan2((double)y, (double)x) ); }
563568.c
/* * IPMI ACPI firmware handling * * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "hw/acpi/ipmi.h" void build_acpi_ipmi_devices(Aml *table, BusState *bus) { }
859952.c
/*************************************************************************** * nearname.c - search objects by its name with given radius, ^D to exit * * History * * 24-Aug-2004: include <stlib.h> to define atof() instead of local declaration * 23-Jul-2001: * -Added double atof(char []); to insure main() knows that atof() * returns a double. Joe Mazzarella * -Changed gets() to fgets() for compiler warnings about security. J.M. * * 1996: * Original. Xiuqin Wu * ***************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "ned_client.h" extern int ned_errno; void main(argc, argv) int argc; char *argv[]; { int st; int no_obj; int i, j; char objname[101]; char sradius[101]; double radius; CrossID *cp, *tmpcp; ObjInfo *op, *tmpop; MoreData *mdatap; st = ned_connect(); if (st < 0) { fprintf(stderr, "connection failed \n"); exit(1); } fprintf(stdout, "input the objname:"); fgets(objname, 101, stdin); fprintf(stdout, "input the radius(arcmin):"); while(fgets(sradius, 101, stdin) != (char*)NULL) { radius = atof(sradius); fprintf(stdout, "DEBUG: You input sradius=%s, which atof translates into radius=%f", sradius, radius); st = ned_obj_nearname(objname, radius, &no_obj, &op, &cp); if (st < 0) { /* for simple error message */ fprintf(stderr, "%s\n", ned_get_errmsg()); switch (ned_errno) { case NE_NAME: fprintf(stderr, "name %s can't be recognized by NED name interpreter\n", objname); break; case NE_RADIUS: fprintf(stderr, "radius is out of range\n"); break; case NE_AMBN: fprintf(stderr, "%d ambiguous name: \n", no_obj); for (i=0, tmpcp = cp; i<no_obj; i++, tmpcp++) fprintf(stderr, "%s \n", tmpcp->objname); break; case NE_NOBJ: fprintf(stderr, "object %s is not in NED database\n", cp->objname); break; case NE_NOSPACE: fprintf(stderr, "memory allocation error happened \n"); break; case NE_QUERY: fprintf(stderr, "Can't send query to the NED server\n"); break; case NE_BROKENC: fprintf(stderr, "The connection to server is broken\n"); break; } } /* -1 return code */ else { fprintf(stdout, "%d object(s) found in NED: \n", no_obj); for (i=0, tmpop = op; i<no_obj; i++, tmpop++) { fprintf(stdout, "\n\n%d crossid for object No. %d: \n\n", tmpop->no_crossid, i+1); for (j=0, tmpcp = tmpop->cp; j<tmpop->no_crossid; j++, tmpcp++) fprintf(stdout, "%s, %s \n", tmpcp->objname, tmpcp->objtype); fprintf(stdout, "distance : %f\n", tmpop->dist); fprintf(stdout, "no_ref : %d\n", tmpop->no_ref); fprintf(stdout, "no_note : %d\n", tmpop->no_note); fprintf(stdout, "no_photom: %d\n", tmpop->no_photom); fprintf(stdout, "obj type : %s\n", tmpop->objtype); fprintf(stdout, "ra : %f\n", tmpop->ra); fprintf(stdout, "dec : %f\n", tmpop->dec); fprintf(stdout, "unc_maj : %f\n", tmpop->unc_maj); fprintf(stdout, "unc_min : %f\n", tmpop->unc_min); fprintf(stdout, "unc_ang : %f\n", tmpop->unc_ang); fprintf(stdout, "refcode : %s\n", tmpop->refcode); mdatap = tmpop->mdp; while (mdatap) { fprintf(stdout, "%-10s: %s\n", mdatap->data_typec, mdatap->data); mdatap = mdatap->next; } } } if (cp) ned_free_cp(cp); if (op) ned_free_objp(op, no_obj); fprintf(stdout, "input objname:"); fgets(objname, 101, stdin); fprintf(stdout, "input the radius(arcmin):"); } ned_disconnect(); }
14518.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/cls_flower.c Flower classifier * * Copyright (c) 2015 Jiri Pirko <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/rhashtable.h> #include <linux/workqueue.h> #include <linux/refcount.h> #include <linux/if_ether.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/mpls.h> #include <net/sch_generic.h> #include <net/pkt_cls.h> #include <net/ip.h> #include <net/flow_dissector.h> #include <net/geneve.h> #include <net/vxlan.h> #include <net/erspan.h> #include <net/dst.h> #include <net/dst_metadata.h> #include <uapi/linux/netfilter/nf_conntrack_common.h> #define TCA_FLOWER_KEY_CT_FLAGS_MAX \ ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1) #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) struct fl_flow_key { struct flow_dissector_key_meta meta; struct flow_dissector_key_control control; struct flow_dissector_key_control enc_control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; struct flow_dissector_key_icmp icmp; struct flow_dissector_key_arp arp; struct flow_dissector_key_keyid enc_key_id; union { struct flow_dissector_key_ipv4_addrs enc_ipv4; struct flow_dissector_key_ipv6_addrs enc_ipv6; }; struct flow_dissector_key_ports enc_tp; struct flow_dissector_key_mpls mpls; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ip ip; struct flow_dissector_key_ip enc_ip; struct flow_dissector_key_enc_opts enc_opts; union { struct flow_dissector_key_ports tp; struct { struct flow_dissector_key_ports tp_min; struct flow_dissector_key_ports tp_max; }; } tp_range; struct flow_dissector_key_ct ct; struct flow_dissector_key_hash hash; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { unsigned short int start; unsigned short int end; }; struct fl_flow_mask { struct fl_flow_key key; struct fl_flow_mask_range range; u32 flags; struct rhash_head ht_node; struct rhashtable ht; struct rhashtable_params filter_ht_params; struct flow_dissector dissector; struct list_head filters; struct rcu_work rwork; struct list_head list; refcount_t refcnt; }; struct fl_flow_tmplt { struct fl_flow_key dummy_key; struct fl_flow_key mask; struct flow_dissector dissector; struct tcf_chain *chain; }; struct cls_fl_head { struct rhashtable ht; spinlock_t masks_lock; /* Protect masks list */ struct list_head masks; struct list_head hw_filters; struct rcu_work rwork; struct idr handle_idr; }; struct cls_fl_filter { struct fl_flow_mask *mask; struct rhash_head ht_node; struct fl_flow_key mkey; struct tcf_exts exts; struct tcf_result res; struct fl_flow_key key; struct list_head list; struct list_head hw_list; u32 handle; u32 flags; u32 in_hw_count; struct rcu_work rwork; struct net_device *hw_dev; /* Flower classifier is unlocked, which means that its reference counter * can be changed concurrently without any kind of external * synchronization. Use atomic reference counter to be concurrency-safe. */ refcount_t refcnt; bool deleted; }; static const struct rhashtable_params mask_ht_params = { .key_offset = offsetof(struct fl_flow_mask, key), .key_len = sizeof(struct fl_flow_key), .head_offset = offsetof(struct fl_flow_mask, ht_node), .automatic_shrinking = true, }; static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) { return mask->range.end - mask->range.start; } static void fl_mask_update_range(struct fl_flow_mask *mask) { const u8 *bytes = (const u8 *) &mask->key; size_t size = sizeof(mask->key); size_t i, first = 0, last; for (i = 0; i < size; i++) { if (bytes[i]) { first = i; break; } } last = first; for (i = size - 1; i != first; i--) { if (bytes[i]) { last = i; break; } } mask->range.start = rounddown(first, sizeof(long)); mask->range.end = roundup(last + 1, sizeof(long)); } static void *fl_key_get_start(struct fl_flow_key *key, const struct fl_flow_mask *mask) { return (u8 *) key + mask->range.start; } static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, struct fl_flow_mask *mask) { const long *lkey = fl_key_get_start(key, mask); const long *lmask = fl_key_get_start(&mask->key, mask); long *lmkey = fl_key_get_start(mkey, mask); int i; for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) *lmkey++ = *lkey++ & *lmask++; } static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, struct fl_flow_mask *mask) { const long *lmask = fl_key_get_start(&mask->key, mask); const long *ltmplt; int i; if (!tmplt) return true; ltmplt = fl_key_get_start(&tmplt->mask, mask); for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { if (~*ltmplt++ & *lmask++) return false; } return true; } static void fl_clear_masked_range(struct fl_flow_key *key, struct fl_flow_mask *mask) { memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); } static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, struct fl_flow_key *key, struct fl_flow_key *mkey) { u16 min_mask, max_mask, min_val, max_val; min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst); max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst); min_val = ntohs(filter->key.tp_range.tp_min.dst); max_val = ntohs(filter->key.tp_range.tp_max.dst); if (min_mask && max_mask) { if (ntohs(key->tp_range.tp.dst) < min_val || ntohs(key->tp_range.tp.dst) > max_val) return false; /* skb does not have min and max values */ mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; } return true; } static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, struct fl_flow_key *key, struct fl_flow_key *mkey) { u16 min_mask, max_mask, min_val, max_val; min_mask = ntohs(filter->mask->key.tp_range.tp_min.src); max_mask = ntohs(filter->mask->key.tp_range.tp_max.src); min_val = ntohs(filter->key.tp_range.tp_min.src); max_val = ntohs(filter->key.tp_range.tp_max.src); if (min_mask && max_mask) { if (ntohs(key->tp_range.tp.src) < min_val || ntohs(key->tp_range.tp.src) > max_val) return false; /* skb does not have min and max values */ mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; } return true; } static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, struct fl_flow_key *mkey) { return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), mask->filter_ht_params); } static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, struct fl_flow_key *mkey, struct fl_flow_key *key) { struct cls_fl_filter *filter, *f; list_for_each_entry_rcu(filter, &mask->filters, list) { if (!fl_range_port_dst_cmp(filter, key, mkey)) continue; if (!fl_range_port_src_cmp(filter, key, mkey)) continue; f = __fl_lookup(mask, mkey); if (f) return f; } return NULL; } static noinline_for_stack struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) { struct fl_flow_key mkey; fl_set_masked_key(&mkey, key, mask); if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) return fl_lookup_range(mask, &mkey, key); return __fl_lookup(mask, &mkey); } static u16 fl_ct_info_to_flower_map[] = { [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_RELATED, [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | TCA_FLOWER_KEY_CT_FLAGS_REPLY, [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_RELATED | TCA_FLOWER_KEY_CT_FLAGS_REPLY, [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_NEW, }; static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct cls_fl_head *head = rcu_dereference_bh(tp->root); bool post_ct = qdisc_skb_cb(skb)->post_ct; struct fl_flow_key skb_key; struct fl_flow_mask *mask; struct cls_fl_filter *f; list_for_each_entry_rcu(mask, &head->masks, list) { flow_dissector_init_keys(&skb_key.control, &skb_key.basic); fl_clear_masked_range(&skb_key, mask); skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); /* skb_flow_dissect() does not set n_proto in case an unknown * protocol, so do it rather here. */ skb_key.basic.n_proto = skb_protocol(skb, false); skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, ARRAY_SIZE(fl_ct_info_to_flower_map), post_ct); skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); f = fl_mask_lookup(mask, &skb_key); if (f && !tc_skip_sw(f->flags)) { *res = f->res; return tcf_exts_exec(skb, &f->exts, res); } } return -1; } static int fl_init(struct tcf_proto *tp) { struct cls_fl_head *head; head = kzalloc(sizeof(*head), GFP_KERNEL); if (!head) return -ENOBUFS; spin_lock_init(&head->masks_lock); INIT_LIST_HEAD_RCU(&head->masks); INIT_LIST_HEAD(&head->hw_filters); rcu_assign_pointer(tp->root, head); idr_init(&head->handle_idr); return rhashtable_init(&head->ht, &mask_ht_params); } static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) { /* temporary masks don't have their filters list and ht initialized */ if (mask_init_done) { WARN_ON(!list_empty(&mask->filters)); rhashtable_destroy(&mask->ht); } kfree(mask); } static void fl_mask_free_work(struct work_struct *work) { struct fl_flow_mask *mask = container_of(to_rcu_work(work), struct fl_flow_mask, rwork); fl_mask_free(mask, true); } static void fl_uninit_mask_free_work(struct work_struct *work) { struct fl_flow_mask *mask = container_of(to_rcu_work(work), struct fl_flow_mask, rwork); fl_mask_free(mask, false); } static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) { if (!refcount_dec_and_test(&mask->refcnt)) return false; rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); spin_lock(&head->masks_lock); list_del_rcu(&mask->list); spin_unlock(&head->masks_lock); tcf_queue_work(&mask->rwork, fl_mask_free_work); return true; } static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) { /* Flower classifier only changes root pointer during init and destroy. * Users must obtain reference to tcf_proto instance before calling its * API, so tp->root pointer is protected from concurrent call to * fl_destroy() by reference counting. */ return rcu_dereference_raw(tp->root); } static void __fl_destroy_filter(struct cls_fl_filter *f) { tcf_exts_destroy(&f->exts); tcf_exts_put_net(&f->exts); kfree(f); } static void fl_destroy_filter_work(struct work_struct *work) { struct cls_fl_filter *f = container_of(to_rcu_work(work), struct cls_fl_filter, rwork); __fl_destroy_filter(f); } static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool rtnl_held, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct flow_cls_offload cls_flower = {}; tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); cls_flower.command = FLOW_CLS_DESTROY; cls_flower.cookie = (unsigned long) f; tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, &f->flags, &f->in_hw_count, rtnl_held); } static int fl_hw_replace_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool rtnl_held, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct flow_cls_offload cls_flower = {}; bool skip_sw = tc_skip_sw(f->flags); int err = 0; cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); if (!cls_flower.rule) return -ENOMEM; tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); cls_flower.command = FLOW_CLS_REPLACE; cls_flower.cookie = (unsigned long) f; cls_flower.rule->match.dissector = &f->mask->dissector; cls_flower.rule->match.mask = &f->mask->key; cls_flower.rule->match.key = &f->mkey; cls_flower.classid = f->res.classid; err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (skip_sw) { NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); return err; } return 0; } err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw, &f->flags, &f->in_hw_count, rtnl_held); tc_cleanup_flow_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { fl_hw_destroy_filter(tp, f, rtnl_held, NULL); return err; } if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) return -EINVAL; return 0; } static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, bool rtnl_held) { struct tcf_block *block = tp->chain->block; struct flow_cls_offload cls_flower = {}; tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); cls_flower.command = FLOW_CLS_STATS; cls_flower.cookie = (unsigned long) f; cls_flower.classid = f->res.classid; tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, rtnl_held); tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, cls_flower.stats.pkts, cls_flower.stats.drops, cls_flower.stats.lastused, cls_flower.stats.used_hw_stats, cls_flower.stats.used_hw_stats_valid); } static void __fl_put(struct cls_fl_filter *f) { if (!refcount_dec_and_test(&f->refcnt)) return; if (tcf_exts_get_net(&f->exts)) tcf_queue_work(&f->rwork, fl_destroy_filter_work); else __fl_destroy_filter(f); } static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) { struct cls_fl_filter *f; rcu_read_lock(); f = idr_find(&head->handle_idr, handle); if (f && !refcount_inc_not_zero(&f->refcnt)) f = NULL; rcu_read_unlock(); return f; } static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) { struct cls_fl_head *head = fl_head_dereference(tp); *last = false; spin_lock(&tp->lock); if (f->deleted) { spin_unlock(&tp->lock); return -ENOENT; } f->deleted = true; rhashtable_remove_fast(&f->mask->ht, &f->ht_node, f->mask->filter_ht_params); idr_remove(&head->handle_idr, f->handle); list_del_rcu(&f->list); spin_unlock(&tp->lock); *last = fl_mask_put(head, f->mask); if (!tc_skip_hw(f->flags)) fl_hw_destroy_filter(tp, f, rtnl_held, extack); tcf_unbind_filter(tp, &f->res); __fl_put(f); return 0; } static void fl_destroy_sleepable(struct work_struct *work) { struct cls_fl_head *head = container_of(to_rcu_work(work), struct cls_fl_head, rwork); rhashtable_destroy(&head->ht); kfree(head); module_put(THIS_MODULE); } static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { struct cls_fl_head *head = fl_head_dereference(tp); struct fl_flow_mask *mask, *next_mask; struct cls_fl_filter *f, *next; bool last; list_for_each_entry_safe(mask, next_mask, &head->masks, list) { list_for_each_entry_safe(f, next, &mask->filters, list) { __fl_delete(tp, f, &last, rtnl_held, extack); if (last) break; } } idr_destroy(&head->handle_idr); __module_get(THIS_MODULE); tcf_queue_work(&head->rwork, fl_destroy_sleepable); } static void fl_put(struct tcf_proto *tp, void *arg) { struct cls_fl_filter *f = arg; __fl_put(f); } static void *fl_get(struct tcf_proto *tp, u32 handle) { struct cls_fl_head *head = fl_head_dereference(tp); return __fl_get(head, handle); } static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, [TCA_FLOWER_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_CT_STATE] = NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), [TCA_FLOWER_KEY_CT_STATE_MASK] = NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, .len = 128 / BITS_PER_BYTE }, [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, .len = 128 / BITS_PER_BYTE }, [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, }; static const struct nla_policy enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, }; static const struct nla_policy geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, }; static const struct nla_policy vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, }; static const struct nla_policy erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, }; static const struct nla_policy mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, }; static void fl_set_key_val(struct nlattr **tb, void *val, int val_type, void *mask, int mask_type, int len) { if (!tb[val_type]) return; nla_memcpy(val, tb[val_type], len); if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) memset(mask, 0xff, len); else nla_memcpy(mask, tb[mask_type], len); } static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) { fl_set_key_val(tb, &key->tp_range.tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); fl_set_key_val(tb, &key->tp_range.tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); fl_set_key_val(tb, &key->tp_range.tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); fl_set_key_val(tb, &key->tp_range.tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && ntohs(key->tp_range.tp_max.dst) <= ntohs(key->tp_range.tp_min.dst)) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_PORT_DST_MIN], "Invalid destination port range (min must be strictly smaller than max)"); return -EINVAL; } if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && ntohs(key->tp_range.tp_max.src) <= ntohs(key->tp_range.tp_min.src)) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_PORT_SRC_MIN], "Invalid source port range (min must be strictly smaller than max)"); return -EINVAL; } return 0; } static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, struct flow_dissector_key_mpls *key_val, struct flow_dissector_key_mpls *key_mask, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; struct flow_dissector_mpls_lse *lse_mask; struct flow_dissector_mpls_lse *lse_val; u8 lse_index; u8 depth; int err; err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, mpls_stack_entry_policy, extack); if (err < 0) return err; if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); return -EINVAL; } depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); /* LSE depth starts at 1, for consistency with terminology used by * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. */ if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], "Invalid MPLS depth"); return -EINVAL; } lse_index = depth - 1; dissector_set_mpls_lse(key_val, lse_index); dissector_set_mpls_lse(key_mask, lse_index); lse_val = &key_val->ls[lse_index]; lse_mask = &key_mask->ls[lse_index]; if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); lse_mask->mpls_ttl = MPLS_TTL_MASK; } if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); if (bos & ~MPLS_BOS_MASK) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], "Bottom Of Stack (BOS) must be 0 or 1"); return -EINVAL; } lse_val->mpls_bos = bos; lse_mask->mpls_bos = MPLS_BOS_MASK; } if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); if (tc & ~MPLS_TC_MASK) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], "Traffic Class (TC) must be between 0 and 7"); return -EINVAL; } lse_val->mpls_tc = tc; lse_mask->mpls_tc = MPLS_TC_MASK; } if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); if (label & ~MPLS_LABEL_MASK) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], "Label must be between 0 and 1048575"); return -EINVAL; } lse_val->mpls_label = label; lse_mask->mpls_label = MPLS_LABEL_MASK; } return 0; } static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, struct flow_dissector_key_mpls *key_val, struct flow_dissector_key_mpls *key_mask, struct netlink_ext_ack *extack) { struct nlattr *nla_lse; int rem; int err; if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, "NLA_F_NESTED is missing"); return -EINVAL; } nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { NL_SET_ERR_MSG_ATTR(extack, nla_lse, "Invalid MPLS option type"); return -EINVAL; } err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); if (err < 0) return err; } if (rem) { NL_SET_ERR_MSG(extack, "Bytes leftover after parsing MPLS options"); return -EINVAL; } return 0; } static int fl_set_key_mpls(struct nlattr **tb, struct flow_dissector_key_mpls *key_val, struct flow_dissector_key_mpls *key_mask, struct netlink_ext_ack *extack) { struct flow_dissector_mpls_lse *lse_mask; struct flow_dissector_mpls_lse *lse_val; if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { if (tb[TCA_FLOWER_KEY_MPLS_TTL] || tb[TCA_FLOWER_KEY_MPLS_BOS] || tb[TCA_FLOWER_KEY_MPLS_TC] || tb[TCA_FLOWER_KEY_MPLS_LABEL]) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_OPTS], "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); return -EBADMSG; } return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], key_val, key_mask, extack); } lse_val = &key_val->ls[0]; lse_mask = &key_mask->ls[0]; if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); lse_mask->mpls_ttl = MPLS_TTL_MASK; dissector_set_mpls_lse(key_val, 0); dissector_set_mpls_lse(key_mask, 0); } if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); if (bos & ~MPLS_BOS_MASK) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_BOS], "Bottom Of Stack (BOS) must be 0 or 1"); return -EINVAL; } lse_val->mpls_bos = bos; lse_mask->mpls_bos = MPLS_BOS_MASK; dissector_set_mpls_lse(key_val, 0); dissector_set_mpls_lse(key_mask, 0); } if (tb[TCA_FLOWER_KEY_MPLS_TC]) { u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); if (tc & ~MPLS_TC_MASK) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_TC], "Traffic Class (TC) must be between 0 and 7"); return -EINVAL; } lse_val->mpls_tc = tc; lse_mask->mpls_tc = MPLS_TC_MASK; dissector_set_mpls_lse(key_val, 0); dissector_set_mpls_lse(key_mask, 0); } if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); if (label & ~MPLS_LABEL_MASK) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_FLOWER_KEY_MPLS_LABEL], "Label must be between 0 and 1048575"); return -EINVAL; } lse_val->mpls_label = label; lse_mask->mpls_label = MPLS_LABEL_MASK; dissector_set_mpls_lse(key_val, 0); dissector_set_mpls_lse(key_mask, 0); } return 0; } static void fl_set_key_vlan(struct nlattr **tb, __be16 ethertype, int vlan_id_key, int vlan_prio_key, struct flow_dissector_key_vlan *key_val, struct flow_dissector_key_vlan *key_mask) { #define VLAN_PRIORITY_MASK 0x7 if (tb[vlan_id_key]) { key_val->vlan_id = nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; key_mask->vlan_id = VLAN_VID_MASK; } if (tb[vlan_prio_key]) { key_val->vlan_priority = nla_get_u8(tb[vlan_prio_key]) & VLAN_PRIORITY_MASK; key_mask->vlan_priority = VLAN_PRIORITY_MASK; } key_val->vlan_tpid = ethertype; key_mask->vlan_tpid = cpu_to_be16(~0); } static void fl_set_key_flag(u32 flower_key, u32 flower_mask, u32 *dissector_key, u32 *dissector_mask, u32 flower_flag_bit, u32 dissector_flag_bit) { if (flower_mask & flower_flag_bit) { *dissector_mask |= dissector_flag_bit; if (flower_key & flower_flag_bit) *dissector_key |= dissector_flag_bit; } } static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, u32 *flags_mask, struct netlink_ext_ack *extack) { u32 key, mask; /* mask is mandatory for flags */ if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { NL_SET_ERR_MSG(extack, "Missing flags mask"); return -EINVAL; } key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS])); mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); *flags_key = 0; *flags_mask = 0; fl_set_key_flag(key, mask, flags_key, flags_mask, TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); fl_set_key_flag(key, mask, flags_key, flags_mask, TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_FIRST_FRAG); return 0; } static void fl_set_key_ip(struct nlattr **tb, bool encap, struct flow_dissector_key_ip *key, struct flow_dissector_key_ip *mask) { int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); } static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, int depth, int option_len, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; struct nlattr *class = NULL, *type = NULL, *data = NULL; struct geneve_opt *opt; int err, data_len = 0; if (option_len > sizeof(struct geneve_opt)) data_len = option_len - sizeof(struct geneve_opt); opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; memset(opt, 0xff, option_len); opt->length = data_len / 4; opt->r1 = 0; opt->r2 = 0; opt->r3 = 0; /* If no mask has been prodived we assume an exact match. */ if (!depth) return sizeof(struct geneve_opt) + data_len; if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); return -EINVAL; } err = nla_parse_nested_deprecated(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, nla, geneve_opt_policy, extack); if (err < 0) return err; /* We are not allowed to omit any of CLASS, TYPE or DATA * fields from the key. */ if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); return -EINVAL; } /* Omitting any of CLASS, TYPE or DATA fields is allowed * for the mask. */ if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { int new_len = key->enc_opts.len; data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; data_len = nla_len(data); if (data_len < 4) { NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); return -ERANGE; } if (data_len % 4) { NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); return -ERANGE; } new_len += sizeof(struct geneve_opt) + data_len; BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); if (new_len > FLOW_DIS_TUN_OPTS_MAX) { NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); return -ERANGE; } opt->length = data_len / 4; memcpy(opt->opt_data, nla_data(data), data_len); } if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; opt->opt_class = nla_get_be16(class); } if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; opt->type = nla_get_u8(type); } return sizeof(struct geneve_opt) + data_len; } static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, int depth, int option_len, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; struct vxlan_metadata *md; int err; md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; memset(md, 0xff, sizeof(*md)); if (!depth) return sizeof(*md); if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); return -EINVAL; } err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, vxlan_opt_policy, extack); if (err < 0) return err; if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); return -EINVAL; } if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); md->gbp &= VXLAN_GBP_MASK; } return sizeof(*md); } static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, int depth, int option_len, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; struct erspan_metadata *md; int err; md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; memset(md, 0xff, sizeof(*md)); md->version = 1; if (!depth) return sizeof(*md); if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); return -EINVAL; } err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, erspan_opt_policy, extack); if (err < 0) return err; if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); return -EINVAL; } if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); if (md->version == 1) { if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); return -EINVAL; } if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; memset(&md->u, 0x00, sizeof(md->u)); md->u.index = nla_get_be32(nla); } } else if (md->version == 2) { if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); return -EINVAL; } if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(nla); } if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(nla)); } } else { NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); return -EINVAL; } return sizeof(*md); } static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) { const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; int err, option_len, key_depth, msk_depth = 0; err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], TCA_FLOWER_KEY_ENC_OPTS_MAX, enc_opts_policy, extack); if (err) return err; nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], TCA_FLOWER_KEY_ENC_OPTS_MAX, enc_opts_policy, extack); if (err) return err; nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); if (!nla_ok(nla_opt_msk, msk_depth)) { NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks"); return -EINVAL; } } nla_for_each_attr(nla_opt_key, nla_enc_key, nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { switch (nla_type(nla_opt_key)) { case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: if (key->enc_opts.dst_opt_type && key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); return -EINVAL; } option_len = 0; key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; option_len = fl_set_geneve_opt(nla_opt_key, key, key_depth, option_len, extack); if (option_len < 0) return option_len; key->enc_opts.len += option_len; /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; option_len = fl_set_geneve_opt(nla_opt_msk, mask, msk_depth, option_len, extack); if (option_len < 0) return option_len; mask->enc_opts.len += option_len; if (key->enc_opts.len != mask->enc_opts.len) { NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); return -EINVAL; } break; case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: if (key->enc_opts.dst_opt_type) { NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); return -EINVAL; } option_len = 0; key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; option_len = fl_set_vxlan_opt(nla_opt_key, key, key_depth, option_len, extack); if (option_len < 0) return option_len; key->enc_opts.len += option_len; /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; option_len = fl_set_vxlan_opt(nla_opt_msk, mask, msk_depth, option_len, extack); if (option_len < 0) return option_len; mask->enc_opts.len += option_len; if (key->enc_opts.len != mask->enc_opts.len) { NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); return -EINVAL; } break; case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: if (key->enc_opts.dst_opt_type) { NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); return -EINVAL; } option_len = 0; key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; option_len = fl_set_erspan_opt(nla_opt_key, key, key_depth, option_len, extack); if (option_len < 0) return option_len; key->enc_opts.len += option_len; /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; option_len = fl_set_erspan_opt(nla_opt_msk, mask, msk_depth, option_len, extack); if (option_len < 0) return option_len; mask->enc_opts.len += option_len; if (key->enc_opts.len != mask->enc_opts.len) { NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); return -EINVAL; } break; default: NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); return -EINVAL; } if (!msk_depth) continue; if (!nla_ok(nla_opt_msk, msk_depth)) { NL_SET_ERR_MSG(extack, "A mask attribute is invalid"); return -EINVAL; } nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); } return 0; } static int fl_validate_ct_state(u16 state, struct nlattr *tb, struct netlink_ext_ack *extack) { if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) { NL_SET_ERR_MSG_ATTR(extack, tb, "no trk, so no other flag can be set"); return -EINVAL; } if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { NL_SET_ERR_MSG_ATTR(extack, tb, "new and est are mutually exclusive"); return -EINVAL; } if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID && state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { NL_SET_ERR_MSG_ATTR(extack, tb, "when inv is set, only trk may be set"); return -EINVAL; } if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { NL_SET_ERR_MSG_ATTR(extack, tb, "new and rpl are mutually exclusive"); return -EINVAL; } return 0; } static int fl_set_key_ct(struct nlattr **tb, struct flow_dissector_key_ct *key, struct flow_dissector_key_ct *mask, struct netlink_ext_ack *extack) { if (tb[TCA_FLOWER_KEY_CT_STATE]) { int err; if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); return -EOPNOTSUPP; } fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, sizeof(key->ct_state)); err = fl_validate_ct_state(key->ct_state & mask->ct_state, tb[TCA_FLOWER_KEY_CT_STATE_MASK], extack); if (err) return err; } if (tb[TCA_FLOWER_KEY_CT_ZONE]) { if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); return -EOPNOTSUPP; } fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, sizeof(key->ct_zone)); } if (tb[TCA_FLOWER_KEY_CT_MARK]) { if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); return -EOPNOTSUPP; } fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, sizeof(key->ct_mark)); } if (tb[TCA_FLOWER_KEY_CT_LABELS]) { if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); return -EOPNOTSUPP; } fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, sizeof(key->ct_labels)); } return 0; } static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) { __be16 ethertype; int ret = 0; if (tb[TCA_FLOWER_INDEV]) { int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); if (err < 0) return err; key->meta.ingress_ifindex = err; mask->meta.ingress_ifindex = 0xffffffff; } fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)); fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, sizeof(key->eth.src)); if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); if (eth_type_vlan(ethertype)) { fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan); if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); if (eth_type_vlan(ethertype)) { fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_CVLAN_ID, TCA_FLOWER_KEY_CVLAN_PRIO, &key->cvlan, &mask->cvlan); fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, &mask->basic.n_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.n_proto)); } else { key->basic.n_proto = ethertype; mask->basic.n_proto = cpu_to_be16(~0); } } } else { key->basic.n_proto = ethertype; mask->basic.n_proto = cpu_to_be16(~0); } } if (key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) { fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)); fl_set_key_ip(tb, false, &key->ip, &mask->ip); } if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; mask->control.addr_type = ~0; fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, sizeof(key->ipv4.src)); fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, sizeof(key->ipv4.dst)); } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; mask->control.addr_type = ~0; fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, sizeof(key->ipv6.src)); fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, sizeof(key->ipv6.dst)); } if (key->basic.ip_proto == IPPROTO_TCP) { fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, sizeof(key->tp.src)); fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, sizeof(key->tp.dst)); fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, sizeof(key->tcp.flags)); } else if (key->basic.ip_proto == IPPROTO_UDP) { fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, sizeof(key->tp.src)); fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, sizeof(key->tp.dst)); } else if (key->basic.ip_proto == IPPROTO_SCTP) { fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, sizeof(key->tp.src)); fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, sizeof(key->tp.dst)); } else if (key->basic.n_proto == htons(ETH_P_IP) && key->basic.ip_proto == IPPROTO_ICMP) { fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, sizeof(key->icmp.type)); fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE_MASK, sizeof(key->icmp.code)); } else if (key->basic.n_proto == htons(ETH_P_IPV6) && key->basic.ip_proto == IPPROTO_ICMPV6) { fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, sizeof(key->icmp.type)); fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE_MASK, sizeof(key->icmp.code)); } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || key->basic.n_proto == htons(ETH_P_MPLS_MC)) { ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); if (ret) return ret; } else if (key->basic.n_proto == htons(ETH_P_ARP) || key->basic.n_proto == htons(ETH_P_RARP)) { fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, sizeof(key->arp.sip)); fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, sizeof(key->arp.tip)); fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, sizeof(key->arp.op)); fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, sizeof(key->arp.sha)); fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, sizeof(key->arp.tha)); } if (key->basic.ip_proto == IPPROTO_TCP || key->basic.ip_proto == IPPROTO_UDP || key->basic.ip_proto == IPPROTO_SCTP) { ret = fl_set_key_port_range(tb, key, mask, extack); if (ret) return ret; } if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; mask->enc_control.addr_type = ~0; fl_set_key_val(tb, &key->enc_ipv4.src, TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, sizeof(key->enc_ipv4.src)); fl_set_key_val(tb, &key->enc_ipv4.dst, TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, sizeof(key->enc_ipv4.dst)); } if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; mask->enc_control.addr_type = ~0; fl_set_key_val(tb, &key->enc_ipv6.src, TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, sizeof(key->enc_ipv6.src)); fl_set_key_val(tb, &key->enc_ipv6.dst, TCA_FLOWER_KEY_ENC_IPV6_DST, &mask->enc_ipv6.dst, TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, sizeof(key->enc_ipv6.dst)); } fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, sizeof(key->enc_key_id.keyid)); fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, sizeof(key->enc_tp.src)); fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, sizeof(key->enc_tp.dst)); fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, sizeof(key->hash.hash)); if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { ret = fl_set_enc_opt(tb, key, mask, extack); if (ret) return ret; } ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); if (ret) return ret; if (tb[TCA_FLOWER_KEY_FLAGS]) ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags, extack); return ret; } static void fl_mask_copy(struct fl_flow_mask *dst, struct fl_flow_mask *src) { const void *psrc = fl_key_get_start(&src->key, src); void *pdst = fl_key_get_start(&dst->key, src); memcpy(pdst, psrc, fl_mask_range(src)); dst->range = src->range; } static const struct rhashtable_params fl_ht_params = { .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ .head_offset = offsetof(struct cls_fl_filter, ht_node), .automatic_shrinking = true, }; static int fl_init_mask_hashtable(struct fl_flow_mask *mask) { mask->filter_ht_params = fl_ht_params; mask->filter_ht_params.key_len = fl_mask_range(mask); mask->filter_ht_params.key_offset += mask->range.start; return rhashtable_init(&mask->ht, &mask->filter_ht_params); } #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) #define FL_KEY_IS_MASKED(mask, member) \ memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 0, FL_KEY_MEMBER_SIZE(member)) \ #define FL_KEY_SET(keys, cnt, id, member) \ do { \ keys[cnt].key_id = id; \ keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ cnt++; \ } while(0); #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ do { \ if (FL_KEY_IS_MASKED(mask, member)) \ FL_KEY_SET(keys, cnt, id, member); \ } while(0); static void fl_init_dissector(struct flow_dissector *dissector, struct fl_flow_key *mask) { struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; size_t cnt = 0; FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_META, meta); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IP, ip); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_TCP, tcp); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ICMP, icmp); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ARP, arp); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_MPLS, mpls); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_VLAN, vlan); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_CVLAN, cvlan); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); if (FL_KEY_IS_MASKED(mask, enc_ipv4) || FL_KEY_IS_MASKED(mask, enc_ipv6)) FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_CT, ct); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_HASH, hash); skb_flow_dissector_init(dissector, keys, cnt); } static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, struct fl_flow_mask *mask) { struct fl_flow_mask *newmask; int err; newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); if (!newmask) return ERR_PTR(-ENOMEM); fl_mask_copy(newmask, mask); if ((newmask->key.tp_range.tp_min.dst && newmask->key.tp_range.tp_max.dst) || (newmask->key.tp_range.tp_min.src && newmask->key.tp_range.tp_max.src)) newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; err = fl_init_mask_hashtable(newmask); if (err) goto errout_free; fl_init_dissector(&newmask->dissector, &newmask->key); INIT_LIST_HEAD_RCU(&newmask->filters); refcount_set(&newmask->refcnt, 1); err = rhashtable_replace_fast(&head->ht, &mask->ht_node, &newmask->ht_node, mask_ht_params); if (err) goto errout_destroy; spin_lock(&head->masks_lock); list_add_tail_rcu(&newmask->list, &head->masks); spin_unlock(&head->masks_lock); return newmask; errout_destroy: rhashtable_destroy(&newmask->ht); errout_free: kfree(newmask); return ERR_PTR(err); } static int fl_check_assign_mask(struct cls_fl_head *head, struct cls_fl_filter *fnew, struct cls_fl_filter *fold, struct fl_flow_mask *mask) { struct fl_flow_mask *newmask; int ret = 0; rcu_read_lock(); /* Insert mask as temporary node to prevent concurrent creation of mask * with same key. Any concurrent lookups with same key will return * -EAGAIN because mask's refcnt is zero. */ fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, &mask->ht_node, mask_ht_params); if (!fnew->mask) { rcu_read_unlock(); if (fold) { ret = -EINVAL; goto errout_cleanup; } newmask = fl_create_new_mask(head, mask); if (IS_ERR(newmask)) { ret = PTR_ERR(newmask); goto errout_cleanup; } fnew->mask = newmask; return 0; } else if (IS_ERR(fnew->mask)) { ret = PTR_ERR(fnew->mask); } else if (fold && fold->mask != fnew->mask) { ret = -EINVAL; } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { /* Mask was deleted concurrently, try again */ ret = -EAGAIN; } rcu_read_unlock(); return ret; errout_cleanup: rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); return ret; } static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, struct nlattr *est, struct fl_flow_tmplt *tmplt, u32 flags, struct netlink_ext_ack *extack) { int err; err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack); if (err < 0) return err; if (tb[TCA_FLOWER_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); if (flags & TCA_ACT_FLAGS_NO_RTNL) rtnl_lock(); tcf_bind_filter(tp, &f->res, base); if (flags & TCA_ACT_FLAGS_NO_RTNL) rtnl_unlock(); } err = fl_set_key(net, tb, &f->key, &mask->key, extack); if (err) return err; fl_mask_update_range(mask); fl_set_masked_key(&f->mkey, &f->key, mask); if (!fl_mask_fits_tmplt(tmplt, mask)) { NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); return -EINVAL; } return 0; } static int fl_ht_insert_unique(struct cls_fl_filter *fnew, struct cls_fl_filter *fold, bool *in_ht) { struct fl_flow_mask *mask = fnew->mask; int err; err = rhashtable_lookup_insert_fast(&mask->ht, &fnew->ht_node, mask->filter_ht_params); if (err) { *in_ht = false; /* It is okay if filter with same key exists when * overwriting. */ return fold && err == -EEXIST ? 0 : err; } *in_ht = true; return 0; } static int fl_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, void **arg, u32 flags, struct netlink_ext_ack *extack) { struct cls_fl_head *head = fl_head_dereference(tp); bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); struct cls_fl_filter *fold = *arg; struct cls_fl_filter *fnew; struct fl_flow_mask *mask; struct nlattr **tb; bool in_ht; int err; if (!tca[TCA_OPTIONS]) { err = -EINVAL; goto errout_fold; } mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); if (!mask) { err = -ENOBUFS; goto errout_fold; } tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); if (!tb) { err = -ENOBUFS; goto errout_mask_alloc; } err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy, NULL); if (err < 0) goto errout_tb; if (fold && handle && fold->handle != handle) { err = -EINVAL; goto errout_tb; } fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (!fnew) { err = -ENOBUFS; goto errout_tb; } INIT_LIST_HEAD(&fnew->hw_list); refcount_set(&fnew->refcnt, 1); err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); if (err < 0) goto errout; if (tb[TCA_FLOWER_FLAGS]) { fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); if (!tc_flags_valid(fnew->flags)) { err = -EINVAL; goto errout; } } err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], tp->chain->tmplt_priv, flags, extack); if (err) goto errout; err = fl_check_assign_mask(head, fnew, fold, mask); if (err) goto errout; err = fl_ht_insert_unique(fnew, fold, &in_ht); if (err) goto errout_mask; if (!tc_skip_hw(fnew->flags)) { err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); if (err) goto errout_ht; } if (!tc_in_hw(fnew->flags)) fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; spin_lock(&tp->lock); /* tp was deleted concurrently. -EAGAIN will cause caller to lookup * proto again or create new one, if necessary. */ if (tp->deleting) { err = -EAGAIN; goto errout_hw; } if (fold) { /* Fold filter was deleted concurrently. Retry lookup. */ if (fold->deleted) { err = -EAGAIN; goto errout_hw; } fnew->handle = handle; if (!in_ht) { struct rhashtable_params params = fnew->mask->filter_ht_params; err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, params); if (err) goto errout_hw; in_ht = true; } refcount_inc(&fnew->refcnt); rhashtable_remove_fast(&fold->mask->ht, &fold->ht_node, fold->mask->filter_ht_params); idr_replace(&head->handle_idr, fnew, fnew->handle); list_replace_rcu(&fold->list, &fnew->list); fold->deleted = true; spin_unlock(&tp->lock); fl_mask_put(head, fold->mask); if (!tc_skip_hw(fold->flags)) fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); tcf_unbind_filter(tp, &fold->res); /* Caller holds reference to fold, so refcnt is always > 0 * after this. */ refcount_dec(&fold->refcnt); __fl_put(fold); } else { if (handle) { /* user specifies a handle and it doesn't exist */ err = idr_alloc_u32(&head->handle_idr, fnew, &handle, handle, GFP_ATOMIC); /* Filter with specified handle was concurrently * inserted after initial check in cls_api. This is not * necessarily an error if NLM_F_EXCL is not set in * message flags. Returning EAGAIN will cause cls_api to * try to update concurrently inserted rule. */ if (err == -ENOSPC) err = -EAGAIN; } else { handle = 1; err = idr_alloc_u32(&head->handle_idr, fnew, &handle, INT_MAX, GFP_ATOMIC); } if (err) goto errout_hw; refcount_inc(&fnew->refcnt); fnew->handle = handle; list_add_tail_rcu(&fnew->list, &fnew->mask->filters); spin_unlock(&tp->lock); } *arg = fnew; kfree(tb); tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); return 0; errout_ht: spin_lock(&tp->lock); errout_hw: fnew->deleted = true; spin_unlock(&tp->lock); if (!tc_skip_hw(fnew->flags)) fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); if (in_ht) rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, fnew->mask->filter_ht_params); errout_mask: fl_mask_put(head, fnew->mask); errout: __fl_put(fnew); errout_tb: kfree(tb); errout_mask_alloc: tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); errout_fold: if (fold) __fl_put(fold); return err; } static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) { struct cls_fl_head *head = fl_head_dereference(tp); struct cls_fl_filter *f = arg; bool last_on_mask; int err = 0; err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); *last = list_empty(&head->masks); __fl_put(f); return err; } static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held) { struct cls_fl_head *head = fl_head_dereference(tp); unsigned long id = arg->cookie, tmp; struct cls_fl_filter *f; arg->count = arg->skip; rcu_read_lock(); idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { /* don't return filters that are being deleted */ if (!refcount_inc_not_zero(&f->refcnt)) continue; rcu_read_unlock(); if (arg->fn(tp, f, arg) < 0) { __fl_put(f); arg->stop = 1; rcu_read_lock(); break; } __fl_put(f); arg->count++; rcu_read_lock(); } rcu_read_unlock(); arg->cookie = id; } static struct cls_fl_filter * fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) { struct cls_fl_head *head = fl_head_dereference(tp); spin_lock(&tp->lock); if (list_empty(&head->hw_filters)) { spin_unlock(&tp->lock); return NULL; } if (!f) f = list_entry(&head->hw_filters, struct cls_fl_filter, hw_list); list_for_each_entry_continue(f, &head->hw_filters, hw_list) { if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { spin_unlock(&tp->lock); return f; } } spin_unlock(&tp->lock); return NULL; } static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct flow_cls_offload cls_flower = {}; struct cls_fl_filter *f = NULL; int err; /* hw_filters list can only be changed by hw offload functions after * obtaining rtnl lock. Make sure it is not changed while reoffload is * iterating it. */ ASSERT_RTNL(); while ((f = fl_get_next_hw_filter(tp, f, add))) { cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); if (!cls_flower.rule) { __fl_put(f); return -ENOMEM; } tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); cls_flower.command = add ? FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; cls_flower.cookie = (unsigned long)f; cls_flower.rule->match.dissector = &f->mask->dissector; cls_flower.rule->match.mask = &f->mask->key; cls_flower.rule->match.key = &f->mkey; err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (tc_skip_sw(f->flags)) { NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); __fl_put(f); return err; } goto next_flow; } cls_flower.classid = f->res.classid; err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSFLOWER, &cls_flower, cb_priv, &f->flags, &f->in_hw_count); tc_cleanup_flow_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { __fl_put(f); return err; } next_flow: __fl_put(f); } return 0; } static void fl_hw_add(struct tcf_proto *tp, void *type_data) { struct flow_cls_offload *cls_flower = type_data; struct cls_fl_filter *f = (struct cls_fl_filter *) cls_flower->cookie; struct cls_fl_head *head = fl_head_dereference(tp); spin_lock(&tp->lock); list_add(&f->hw_list, &head->hw_filters); spin_unlock(&tp->lock); } static void fl_hw_del(struct tcf_proto *tp, void *type_data) { struct flow_cls_offload *cls_flower = type_data; struct cls_fl_filter *f = (struct cls_fl_filter *) cls_flower->cookie; spin_lock(&tp->lock); if (!list_empty(&f->hw_list)) list_del_init(&f->hw_list); spin_unlock(&tp->lock); } static int fl_hw_create_tmplt(struct tcf_chain *chain, struct fl_flow_tmplt *tmplt) { struct flow_cls_offload cls_flower = {}; struct tcf_block *block = chain->block; cls_flower.rule = flow_rule_alloc(0); if (!cls_flower.rule) return -ENOMEM; cls_flower.common.chain_index = chain->index; cls_flower.command = FLOW_CLS_TMPLT_CREATE; cls_flower.cookie = (unsigned long) tmplt; cls_flower.rule->match.dissector = &tmplt->dissector; cls_flower.rule->match.mask = &tmplt->mask; cls_flower.rule->match.key = &tmplt->dummy_key; /* We don't care if driver (any of them) fails to handle this * call. It serves just as a hint for it. */ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); kfree(cls_flower.rule); return 0; } static void fl_hw_destroy_tmplt(struct tcf_chain *chain, struct fl_flow_tmplt *tmplt) { struct flow_cls_offload cls_flower = {}; struct tcf_block *block = chain->block; cls_flower.common.chain_index = chain->index; cls_flower.command = FLOW_CLS_TMPLT_DESTROY; cls_flower.cookie = (unsigned long) tmplt; tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); } static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, struct nlattr **tca, struct netlink_ext_ack *extack) { struct fl_flow_tmplt *tmplt; struct nlattr **tb; int err; if (!tca[TCA_OPTIONS]) return ERR_PTR(-EINVAL); tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); if (!tb) return ERR_PTR(-ENOBUFS); err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy, NULL); if (err) goto errout_tb; tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); if (!tmplt) { err = -ENOMEM; goto errout_tb; } tmplt->chain = chain; err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); if (err) goto errout_tmplt; fl_init_dissector(&tmplt->dissector, &tmplt->mask); err = fl_hw_create_tmplt(chain, tmplt); if (err) goto errout_tmplt; kfree(tb); return tmplt; errout_tmplt: kfree(tmplt); errout_tb: kfree(tb); return ERR_PTR(err); } static void fl_tmplt_destroy(void *tmplt_priv) { struct fl_flow_tmplt *tmplt = tmplt_priv; fl_hw_destroy_tmplt(tmplt->chain, tmplt); kfree(tmplt); } static int fl_dump_key_val(struct sk_buff *skb, void *val, int val_type, void *mask, int mask_type, int len) { int err; if (!memchr_inv(mask, 0, len)) return 0; err = nla_put(skb, val_type, len, val); if (err) return err; if (mask_type != TCA_FLOWER_UNSPEC) { err = nla_put(skb, mask_type, len, mask); if (err) return err; } return 0; } static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, struct fl_flow_key *mask) { if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)) || fl_dump_key_val(skb, &key->tp_range.tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)) || fl_dump_key_val(skb, &key->tp_range.tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)) || fl_dump_key_val(skb, &key->tp_range.tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src))) return -1; return 0; } static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, struct flow_dissector_key_mpls *mpls_key, struct flow_dissector_key_mpls *mpls_mask, u8 lse_index) { struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; int err; err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, lse_index + 1); if (err) return err; if (lse_mask->mpls_ttl) { err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, lse_key->mpls_ttl); if (err) return err; } if (lse_mask->mpls_bos) { err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, lse_key->mpls_bos); if (err) return err; } if (lse_mask->mpls_tc) { err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, lse_key->mpls_tc); if (err) return err; } if (lse_mask->mpls_label) { err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, lse_key->mpls_label); if (err) return err; } return 0; } static int fl_dump_key_mpls_opts(struct sk_buff *skb, struct flow_dissector_key_mpls *mpls_key, struct flow_dissector_key_mpls *mpls_mask) { struct nlattr *opts; struct nlattr *lse; u8 lse_index; int err; opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); if (!opts) return -EMSGSIZE; for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { if (!(mpls_mask->used_lses & 1 << lse_index)) continue; lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); if (!lse) { err = -EMSGSIZE; goto err_opts; } err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, lse_index); if (err) goto err_opts_lse; nla_nest_end(skb, lse); } nla_nest_end(skb, opts); return 0; err_opts_lse: nla_nest_cancel(skb, lse); err_opts: nla_nest_cancel(skb, opts); return err; } static int fl_dump_key_mpls(struct sk_buff *skb, struct flow_dissector_key_mpls *mpls_key, struct flow_dissector_key_mpls *mpls_mask) { struct flow_dissector_mpls_lse *lse_mask; struct flow_dissector_mpls_lse *lse_key; int err; if (!mpls_mask->used_lses) return 0; lse_mask = &mpls_mask->ls[0]; lse_key = &mpls_key->ls[0]; /* For backward compatibility, don't use the MPLS nested attributes if * the rule can be expressed using the old attributes. */ if (mpls_mask->used_lses & ~1 || (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && !lse_mask->mpls_tc && !lse_mask->mpls_label)) return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); if (lse_mask->mpls_ttl) { err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, lse_key->mpls_ttl); if (err) return err; } if (lse_mask->mpls_tc) { err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, lse_key->mpls_tc); if (err) return err; } if (lse_mask->mpls_label) { err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, lse_key->mpls_label); if (err) return err; } if (lse_mask->mpls_bos) { err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, lse_key->mpls_bos); if (err) return err; } return 0; } static int fl_dump_key_ip(struct sk_buff *skb, bool encap, struct flow_dissector_key_ip *key, struct flow_dissector_key_ip *mask) { int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) return -1; return 0; } static int fl_dump_key_vlan(struct sk_buff *skb, int vlan_id_key, int vlan_prio_key, struct flow_dissector_key_vlan *vlan_key, struct flow_dissector_key_vlan *vlan_mask) { int err; if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) return 0; if (vlan_mask->vlan_id) { err = nla_put_u16(skb, vlan_id_key, vlan_key->vlan_id); if (err) return err; } if (vlan_mask->vlan_priority) { err = nla_put_u8(skb, vlan_prio_key, vlan_key->vlan_priority); if (err) return err; } return 0; } static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, u32 *flower_key, u32 *flower_mask, u32 flower_flag_bit, u32 dissector_flag_bit) { if (dissector_mask & dissector_flag_bit) { *flower_mask |= flower_flag_bit; if (dissector_key & dissector_flag_bit) *flower_key |= flower_flag_bit; } } static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) { u32 key, mask; __be32 _key, _mask; int err; if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) return 0; key = 0; mask = 0; fl_get_key_flag(flags_key, flags_mask, &key, &mask, TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); fl_get_key_flag(flags_key, flags_mask, &key, &mask, TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_FIRST_FRAG); _key = cpu_to_be32(key); _mask = cpu_to_be32(mask); err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); if (err) return err; return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); } static int fl_dump_key_geneve_opt(struct sk_buff *skb, struct flow_dissector_key_enc_opts *enc_opts) { struct geneve_opt *opt; struct nlattr *nest; int opt_off = 0; nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); if (!nest) goto nla_put_failure; while (enc_opts->len > opt_off) { opt = (struct geneve_opt *)&enc_opts->data[opt_off]; if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, opt->opt_class)) goto nla_put_failure; if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, opt->type)) goto nla_put_failure; if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, opt->length * 4, opt->opt_data)) goto nla_put_failure; opt_off += sizeof(struct geneve_opt) + opt->length * 4; } nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int fl_dump_key_vxlan_opt(struct sk_buff *skb, struct flow_dissector_key_enc_opts *enc_opts) { struct vxlan_metadata *md; struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); if (!nest) goto nla_put_failure; md = (struct vxlan_metadata *)&enc_opts->data[0]; if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int fl_dump_key_erspan_opt(struct sk_buff *skb, struct flow_dissector_key_enc_opts *enc_opts) { struct erspan_metadata *md; struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); if (!nest) goto nla_put_failure; md = (struct erspan_metadata *)&enc_opts->data[0]; if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) goto nla_put_failure; if (md->version == 1 && nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) goto nla_put_failure; if (md->version == 2 && (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, md->u.md2.dir) || nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, get_hwid(&md->u.md2)))) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int fl_dump_key_ct(struct sk_buff *skb, struct flow_dissector_key_ct *key, struct flow_dissector_key_ct *mask) { if (IS_ENABLED(CONFIG_NF_CONNTRACK) && fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, sizeof(key->ct_state))) goto nla_put_failure; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, sizeof(key->ct_zone))) goto nla_put_failure; if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, sizeof(key->ct_mark))) goto nla_put_failure; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, sizeof(key->ct_labels))) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, struct flow_dissector_key_enc_opts *enc_opts) { struct nlattr *nest; int err; if (!enc_opts->len) return 0; nest = nla_nest_start_noflag(skb, enc_opt_type); if (!nest) goto nla_put_failure; switch (enc_opts->dst_opt_type) { case TUNNEL_GENEVE_OPT: err = fl_dump_key_geneve_opt(skb, enc_opts); if (err) goto nla_put_failure; break; case TUNNEL_VXLAN_OPT: err = fl_dump_key_vxlan_opt(skb, enc_opts); if (err) goto nla_put_failure; break; case TUNNEL_ERSPAN_OPT: err = fl_dump_key_erspan_opt(skb, enc_opts); if (err) goto nla_put_failure; break; default: goto nla_put_failure; } nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int fl_dump_key_enc_opt(struct sk_buff *skb, struct flow_dissector_key_enc_opts *key_opts, struct flow_dissector_key_enc_opts *msk_opts) { int err; err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); if (err) return err; return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); } static int fl_dump_key(struct sk_buff *skb, struct net *net, struct fl_flow_key *key, struct fl_flow_key *mask) { if (mask->meta.ingress_ifindex) { struct net_device *dev; dev = __dev_get_by_index(net, key->meta.ingress_ifindex); if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) goto nla_put_failure; } if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)) || fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, sizeof(key->eth.src)) || fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, &mask->basic.n_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.n_proto))) goto nla_put_failure; if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) goto nla_put_failure; if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) goto nla_put_failure; if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, TCA_FLOWER_KEY_CVLAN_PRIO, &key->cvlan, &mask->cvlan) || (mask->cvlan.vlan_tpid && nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, key->cvlan.vlan_tpid))) goto nla_put_failure; if (mask->basic.n_proto) { if (mask->cvlan.vlan_tpid) { if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, key->basic.n_proto)) goto nla_put_failure; } else if (mask->vlan.vlan_tpid) { if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, key->basic.n_proto)) goto nla_put_failure; } } if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)) || fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) goto nla_put_failure; if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, sizeof(key->ipv4.src)) || fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, sizeof(key->ipv4.dst)))) goto nla_put_failure; else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, sizeof(key->ipv6.src)) || fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, sizeof(key->ipv6.dst)))) goto nla_put_failure; if (key->basic.ip_proto == IPPROTO_TCP && (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, sizeof(key->tp.src)) || fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, sizeof(key->tp.dst)) || fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, sizeof(key->tcp.flags)))) goto nla_put_failure; else if (key->basic.ip_proto == IPPROTO_UDP && (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, sizeof(key->tp.src)) || fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, sizeof(key->tp.dst)))) goto nla_put_failure; else if (key->basic.ip_proto == IPPROTO_SCTP && (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, sizeof(key->tp.src)) || fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, sizeof(key->tp.dst)))) goto nla_put_failure; else if (key->basic.n_proto == htons(ETH_P_IP) && key->basic.ip_proto == IPPROTO_ICMP && (fl_dump_key_val(skb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, sizeof(key->icmp.type)) || fl_dump_key_val(skb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE_MASK, sizeof(key->icmp.code)))) goto nla_put_failure; else if (key->basic.n_proto == htons(ETH_P_IPV6) && key->basic.ip_proto == IPPROTO_ICMPV6 && (fl_dump_key_val(skb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, sizeof(key->icmp.type)) || fl_dump_key_val(skb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE_MASK, sizeof(key->icmp.code)))) goto nla_put_failure; else if ((key->basic.n_proto == htons(ETH_P_ARP) || key->basic.n_proto == htons(ETH_P_RARP)) && (fl_dump_key_val(skb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, sizeof(key->arp.sip)) || fl_dump_key_val(skb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, sizeof(key->arp.tip)) || fl_dump_key_val(skb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, sizeof(key->arp.op)) || fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, sizeof(key->arp.sha)) || fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, sizeof(key->arp.tha)))) goto nla_put_failure; if ((key->basic.ip_proto == IPPROTO_TCP || key->basic.ip_proto == IPPROTO_UDP || key->basic.ip_proto == IPPROTO_SCTP) && fl_dump_key_port_range(skb, key, mask)) goto nla_put_failure; if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && (fl_dump_key_val(skb, &key->enc_ipv4.src, TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, sizeof(key->enc_ipv4.src)) || fl_dump_key_val(skb, &key->enc_ipv4.dst, TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, sizeof(key->enc_ipv4.dst)))) goto nla_put_failure; else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && (fl_dump_key_val(skb, &key->enc_ipv6.src, TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, sizeof(key->enc_ipv6.src)) || fl_dump_key_val(skb, &key->enc_ipv6.dst, TCA_FLOWER_KEY_ENC_IPV6_DST, &mask->enc_ipv6.dst, TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, sizeof(key->enc_ipv6.dst)))) goto nla_put_failure; if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, &mask->enc_key_id, TCA_FLOWER_UNSPEC, sizeof(key->enc_key_id)) || fl_dump_key_val(skb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, sizeof(key->enc_tp.src)) || fl_dump_key_val(skb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, sizeof(key->enc_tp.dst)) || fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) goto nla_put_failure; if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) goto nla_put_failure; if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) goto nla_put_failure; if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, sizeof(key->hash.hash))) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) { struct cls_fl_filter *f = fh; struct nlattr *nest; struct fl_flow_key *key, *mask; bool skip_hw; if (!f) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (!nest) goto nla_put_failure; spin_lock(&tp->lock); if (f->res.classid && nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) goto nla_put_failure_locked; key = &f->key; mask = &f->mask->key; skip_hw = tc_skip_hw(f->flags); if (fl_dump_key(skb, net, key, mask)) goto nla_put_failure_locked; if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) goto nla_put_failure_locked; spin_unlock(&tp->lock); if (!skip_hw) fl_hw_update_stats(tp, f, rtnl_held); if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts)) goto nla_put_failure; nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &f->exts) < 0) goto nla_put_failure; return skb->len; nla_put_failure_locked: spin_unlock(&tp->lock); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) { struct cls_fl_filter *f = fh; struct nlattr *nest; bool skip_hw; if (!f) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (!nest) goto nla_put_failure; spin_lock(&tp->lock); skip_hw = tc_skip_hw(f->flags); if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) goto nla_put_failure_locked; spin_unlock(&tp->lock); if (!skip_hw) fl_hw_update_stats(tp, f, rtnl_held); if (tcf_exts_terse_dump(skb, &f->exts)) goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; nla_put_failure_locked: spin_unlock(&tp->lock); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) { struct fl_flow_tmplt *tmplt = tmplt_priv; struct fl_flow_key *key, *mask; struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (!nest) goto nla_put_failure; key = &tmplt->dummy_key; mask = &tmplt->mask; if (fl_dump_key(skb, net, key, mask)) goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, unsigned long base) { struct cls_fl_filter *f = fh; if (f && f->res.classid == classid) { if (cl) __tcf_bind_filter(q, &f->res, base); else __tcf_unbind_filter(q, &f->res); } } static bool fl_delete_empty(struct tcf_proto *tp) { struct cls_fl_head *head = fl_head_dereference(tp); spin_lock(&tp->lock); tp->deleting = idr_is_empty(&head->handle_idr); spin_unlock(&tp->lock); return tp->deleting; } static struct tcf_proto_ops cls_fl_ops __read_mostly = { .kind = "flower", .classify = fl_classify, .init = fl_init, .destroy = fl_destroy, .get = fl_get, .put = fl_put, .change = fl_change, .delete = fl_delete, .delete_empty = fl_delete_empty, .walk = fl_walk, .reoffload = fl_reoffload, .hw_add = fl_hw_add, .hw_del = fl_hw_del, .dump = fl_dump, .terse_dump = fl_terse_dump, .bind_class = fl_bind_class, .tmplt_create = fl_tmplt_create, .tmplt_destroy = fl_tmplt_destroy, .tmplt_dump = fl_tmplt_dump, .owner = THIS_MODULE, .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, }; static int __init cls_fl_init(void) { return register_tcf_proto_ops(&cls_fl_ops); } static void __exit cls_fl_exit(void) { unregister_tcf_proto_ops(&cls_fl_ops); } module_init(cls_fl_init); module_exit(cls_fl_exit); MODULE_AUTHOR("Jiri Pirko <[email protected]>"); MODULE_DESCRIPTION("Flower classifier"); MODULE_LICENSE("GPL v2");
830098.c
/* MIT License Copyright (c) 2018 Viviano Riccardo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files ((the "LICENSE")), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "llab.h" char* get_full_path(char* directory, char* filename){ char* temp = (char*)malloc(sizeof(char)*256);// temp = (256) temp[0] = '\0'; strcat(temp,directory); strcat(temp,filename); return temp; } /* This function set the output from a given mask already set * * Input: * @ int size:= the size of input and output and mask * * @ float* mask:= the vector of the mask (0s and 1s) * dimensions: size * * @ float* input:= the vector of the input before the dropout * dimensions: size * @ float* output:= the vector of the input after the dropout * dimensions: size * */ void get_dropout_array(int size, float* mask, float* input, float* output){ int i; for(i = 0; i < size; i++){ output[i] = mask[i]*input[i]; } } /* This function set the mask for dropout for a layer * * Input: * @ int size:= the size of input,mask * @ float* mask:= the mask that must be set * @ float threshold:= the dropout threshold * * */ void set_dropout_mask(int size, float* mask, float threshold){ int i; for(i = 0; i < size; i++){ if(r2() < threshold) mask[i] = 0; } } /* This function add the l2regularization noise to a single weight derivative * * Input: * @ float* dw:= the derivative of the weight * @ float w:= the weight * @ float lambda:= an hyperparameter * @ int n:= the number of total weights in the network * */ void ridge_regression(float *dw, float w, float lambda_value, int n){ (*dw) = (*dw) + (float)((((double)(lambda_value))/((double)(n)))*((double)(w))); } /* Function used to read all the files in a directory * Input: * @char** name:= a matrix of char with * dimensione: n_files*longest_length__of_files * @char* directory:= the name of the directory where the files are * */ int read_files(char** name, char* directory){ DIR *d; struct dirent *dir; int count = 0; int index = 0; char* temp = "."; char* temp2 = ".."; char* temp3 = (char*)malloc(sizeof(char*)*256); temp3[0] = '\0'; strcat(temp3,directory); d = opendir(directory); if(d == NULL) return 1; if (d) { while ((dir = readdir(d)) != NULL) { if((strcmp(dir->d_name, temp) && strcmp(dir->d_name, temp2))){ strcat(temp3,dir->d_name); strcpy(name[count],temp3); temp3[0] = '\0'; strcat(temp3,directory); fprintf(stderr,"%s\n", name[count]); count++; } } closedir(d); } free(temp3); return(count); } /* Function used to convert a number in a string * * Input: * @int i:= the number that want to be converted in string * @char b[]:= an array where the string will be stored * */ char* itoa(int i, char b[]){ char const digit[] = "0123456789"; char* p = b; if(i<0){ *p++ = '-'; i *= -1; } int shifter = i; do{ ++p; shifter = shifter/10; }while(shifter); *p = '\0'; do{ *--p = digit[i%10]; i = i/10; }while(i); return b; } /* Function used to shuffle randomly the pointers of the matrix m * * Input: * @char** m:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_char_matrix(char** m,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); char* t = m[j]; m[j] = m[i]; m[i] = t; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 * * Input: * @char** m:= a matrix * dimensions: n*k * @char** m1:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_char_matrices(char** m,char** m1,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); char* t = m[j]; char* t1 = m1[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 * * Input: * @char** m:= a matrix * dimensions: n*k * @char** m1:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_float_matrices(float** m,float** m1,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); float* t = m[j]; float* t1 = m1[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 and 2 vectors, float and int * * Input: * @char** m:= a matrix * dimensions: n*k * @char** m1:= a matrix * dimensions: n*k * @float* f:= the float vector * @int* v:= the int vector * @int n:= number of pointers char* of m * */ int shuffle_char_matrices_float_int_vectors(char** m,char** m1,float* f, int* v,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); char* t = m[j]; char* t1 = m1[j]; float t2 = f[j]; int t3 = v[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; f[j] = f[i]; f[i] = t2; v[i] = v[i]; v[i] = t3; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 and 2 vectors, float and int * * Input: * @char** m:= a matrix * dimensions: n*k * @char** m1:= a matrix * dimensions: n*k * @float* f:= the float vector * @int* v:= the int vector * @int n:= number of pointers char* of m * */ int shuffle_float_matrices_float_int_vectors(float** m,float** m1,float* f, int* v,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); float* t = m[j]; float* t1 = m1[j]; float t2 = f[j]; int t3 = v[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; f[j] = f[i]; f[i] = t2; v[i] = v[i]; v[i] = t3; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 and 2 vectors, float and int * * Input: * @char** m:= a matrix * dimensions: n*k * @char** m1:= a matrix * dimensions: n*k * @float* f:= the float vector * @int* v:= the int vector * @int* v2:= the int vector * @int n:= number of pointers char* of m * */ int shuffle_char_matrices_float_int_int_vectors(char** m,char** m1,float* f, int* v, int* v2, int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); char* t = m[j]; char* t1 = m1[j]; float t2 = f[j]; int t3 = v[j]; int t4 = v2[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; f[j] = f[i]; f[i] = t2; v[i] = v[i]; v[i] = t3; v2[i] = v2[i]; v2[i] = t4; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 and 2 vectors, float and int * * Input: * @float** m:= a matrix * dimensions: n*k * @float** m1:= a matrix * dimensions: n*k * @float* f:= the float vector * @int* v:= the int vector * @int* v2:= the int vector * @int n:= number of pointers char* of m * */ int shuffle_float_matrices_float_int_int_vectors(float** m,float** m1,float* f, int* v, int* v2, int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); float* t = m[j]; float* t1 = m1[j]; float t2 = f[j]; int t3 = v[j]; int t4 = v2[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; f[j] = f[i]; f[i] = t2; v[i] = v[i]; v[i] = t3; v2[i] = v2[i]; v2[i] = t4; } } return 0; } /* Function used to shuffle randomly the pointers of the matrix m * * Input: * @char** m:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_float_matrix(float** m,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); float* t = m[j]; m[j] = m[i]; m[i] = t; } } return 0; } int shuffle_float_matrix_float_tensor(float** m,float*** t,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); float* temp = m[j]; float** temp2 = t[j]; m[j] = m[i]; m[i] = temp; t[j] = t[i]; t[i] = temp2; } } return 0; } /* Function used to shuffle randomly the pointers of the matrix m * * Input: * @char** m:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_int_matrix(int** m,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); int* t = m[j]; m[j] = m[i]; m[i] = t; } } return 0; } /* Function used to shuffle randomly the pointers of the matrix m * * Input: * @char** m:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_int_array(int* m,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); int t = m[j]; m[j] = m[i]; m[i] = t; } } return 0; } /* Function used to shuffle randomly the pointers of the matrix m * * Input: * @char** m:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_int_array_until_length(int* m,int n, int length){ if (n > 1) { size_t i; for (i = 0; i < length - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); int t = m[j]; m[j] = m[i]; m[i] = t; } } return 0; } /* Function used to shuffle randomly the pointers of the 2 matrices m and m1 * * Input: * @int** m:= a matrix * dimensions: n*k * @int** m1:= a matrix * dimensions: n*k * @int n:= number of pointers char* of m * */ int shuffle_int_matrices(int** m,int** m1,int n){ if (n > 1) { size_t i; for (i = 0; i < n - 1; i++) { size_t j = i + rand() / (RAND_MAX / (n - i) + 1); int* t = m[j]; int* t1 = m1[j]; m[j] = m[i]; m[i] = t; m1[j] = m1[i]; m1[i] = t1; } } return 0; } /* function to check if number is a nan. If the input is not real then it returns false else returns true*/ int bool_is_real(float d){ return !(d != d); } /* this function read a file and store the content in a char* vector * * Input: * @char** ksource:= the space where must be stored the file * @char* fname:= the name of the file * @int* size:= the size of the file that will be filled * * */ int read_file_in_char_vector(char** ksource, char* fname, int* size){ int i; FILE *kfile; size_t kfilesize; kfile = fopen(fname, "r" ); if(kfile == NULL){ fprintf(stderr,"Error opening file %s\n",fname); return 1; } fseek( kfile, 0, SEEK_END ); kfilesize = ((size_t)ftell(kfile)); rewind( kfile ); (*ksource) = (char*)malloc(kfilesize*sizeof(char)); i = fread((*ksource), 1, kfilesize, kfile ); fclose( kfile ); (*size) = kfilesize; return 0; } /* given a float* input array this function copies it in float* output array * * Input: * * @ float* input:= the array that must be copied * @ float* output:= the copied array * @ int size:= the dimensions of input and output * * */ void copy_array(float* input, float* output, int size){ if(input == NULL || output == NULL || !size) return; memcpy(output,input,(sizeof(float)*size)); } /* given a int* input array this function copies it in float* output array * * Input: * * @ int* input:= the array that must be copied * @ int* output:= the copied array * @ int size:= the dimensions of input and output * * */ void copy_int_array(int* input, int* output, int size){ if(input == NULL || output == NULL || !size) return; memcpy(output,input,(sizeof(int)*size)); } /* given a char* input array this function copies it in char* output array * * Input: * * @ char* input:= the array that must be copied * @ char* output:= the copied array * @ int size:= the dimensions of input and output * * */ void copy_char_array(char* input, char* output, int size){ if(input == NULL || output == NULL || !size) return; memcpy(output,input,(sizeof(char)*size)); } /* This function frees a space allocated by a matrix * * Input: * * @ void** m:= the matrix m * @ int n:= number of rows of m * * */ void free_matrix(void** m, int n){ int i; for(i = 0; i < n; i++){ if(m[i] != NULL) free(m[i]); } free(m); } /* This function returns a matrix that can be associated with the confusion matrix * where the rows rapresent the model output*2 with real yes and no and the cols rapresent predicted model yes and predicted model no * label i: * * label i: true positive true negative * * * model positive tp(correct) fp(incorrect) * * * model negative fn(incorrect) tn(correct) * * Inputs: * * @ float* model_output:= the output from the model * @ float* real_output:= the real output from data * @ long long unsigned int** cm:= a confusion matrix already computed for others output arrays, in this case * the correct/incorrect responses will be summed with the previous computed, dimensions = size*2xsize*2 * @ int size:= the length of model_output and real output arrays * @ float threshold:= the arbitrary threshold chosen to classify as output 1, for example: * if threshold = 0.5 and model_output[i] >= threshold then model_output[i] is classified as 1 */ long long unsigned int** confusion_matrix(float* model_output, float* real_output, long long unsigned int** cm, int size, float threshold){ long long unsigned int** conf_mat; int i; if(cm == NULL){ conf_mat = (long long unsigned int**)malloc(sizeof(long long unsigned int*)*size*2); for(i = 0; i < 2*size; i++){ conf_mat[i] = (long long unsigned int*)calloc(size*2,sizeof(long long unsigned int)); } } else conf_mat = cm; for(i = 0; i < size; i++){ if(real_output[i] >= threshold && model_output[i] >= threshold) conf_mat[i*2+1][i*2+1]++; else if(real_output[i] < threshold && model_output[i] < threshold) conf_mat[i*2][i*2]++; else if(real_output[i] >= threshold && model_output[i] < threshold) conf_mat[i*2+1][i*2]++; else if(real_output[i] < threshold && model_output[i] >= threshold) conf_mat[i*2][i*2+1]++; } return conf_mat; } /* this function returns an array with the accuracy for each label i * * Inputs: * * @ long long unsigned int** cm:= a confusion matrix, dimensions = size*2xsize*2 * @ int size:= confusion materix dimensions * */ double* accuracy_array(long long unsigned int** cm, int size){ double* accuracy_arr = (double*)calloc(size,sizeof(double)); int i; for(i = 0; i < size; i++){ accuracy_arr[i] = (double)100*((double)cm[i*2][i*2]+cm[i*2+1][i*2+1])/((double)((cm[i*2][i*2]+cm[i*2+1][i*2+1]+cm[i*2+1][i*2]+cm[i*2][i*2+1]))); if(accuracy_arr[i]!=accuracy_arr[i]) accuracy_arr[i] = 0; } return accuracy_arr; } /* this function returns an array with the precision for each label i * * Inputs: * * @ long long unsigned int** cm:= a confusion matrix, dimensions = size*2xsize*2 * @ int size:= confusion materix dimensions * */ double* precision_array(long long unsigned int** cm, int size){ double* accuracy_arr = (double*)calloc(size,sizeof(double)); int i; for(i = 0; i < size; i++){ accuracy_arr[i] = (double)100*((double)cm[i*2+1][i*2+1])/((double)((cm[i*2+1][i*2+1]+cm[i*2][i*2+1]))); if(accuracy_arr[i]!=accuracy_arr[i]) accuracy_arr[i] = 0; } return accuracy_arr; } /* this function returns an array with the sensitivity for each label i * * Inputs: * * @ long long unsigned int** cm:= a confusion matrix, dimensions = size*2xsize*2 * @ int size:= confusion materix dimensions * */ double* sensitivity_array(long long unsigned int** cm, int size){ double* accuracy_arr = (double*)calloc(size,sizeof(double)); int i; for(i = 0; i < size; i++){ accuracy_arr[i] = (double)100*((double)cm[i*2+1][i*2+1])/((double)((cm[i*2+1][i*2+1]+cm[i*2+1][i*2]))); if(accuracy_arr[i]!=accuracy_arr[i]) accuracy_arr[i] = 0; } return accuracy_arr; } /* this function returns an array with the specificity for each label i * * Inputs: * * @ long long unsigned int** cm:= a confusion matrix, dimensions = size*2xsize*2 * @ int size:= confusion materix dimensions * */ double* specificity_array(long long unsigned int** cm, int size){ double* accuracy_arr = (double*)calloc(size,sizeof(double)); int i; for(i = 0; i < size; i++){ accuracy_arr[i] = (double)100*((double)cm[i*2][i*2])/((double)((cm[i*2][i*2]+cm[i*2][i*2+1]))); if(accuracy_arr[i]!=accuracy_arr[i]) accuracy_arr[i] = 0; } return accuracy_arr; } void print_accuracy(long long unsigned int** cm, int size){ int i; double* aa = accuracy_array(cm,size); for(i = 0; i < size; i++){ printf("%lf ",aa[i]); } printf("\n"); free(aa); } void print_precision(long long unsigned int** cm, int size){ int i; double* aa = precision_array(cm,size); for(i = 0; i < size; i++){ printf("%lf ",aa[i]); } printf("\n"); free(aa); } void print_sensitivity(long long unsigned int** cm, int size){ int i; double* aa = sensitivity_array(cm,size); for(i = 0; i < size; i++){ printf("%lf ",aa[i]); } printf("\n"); free(aa); } void print_specificity(long long unsigned int** cm, int size){ int i; double* aa = specificity_array(cm,size); for(i = 0; i < size; i++){ printf("%lf ",aa[i]); } printf("\n"); free(aa); } /* this function given a float array and a int array of indices from 0 to hi * already sorted will sort the array of indices based on the float a * * input: * * @ float A[]:= the array of values * @ int I[]:= the array of indices * @ int lo:= 0 * int hi:= len-1 * */ void quick_sort(float A[], int I[], int lo, int hi){ if (lo < hi) { float pivot = A[I[lo + (hi - lo) / 2]]; int t; int i = lo - 1; int j = hi + 1; while (1) { while (A[I[++i]] < pivot); while (A[I[--j]] > pivot); if (i >= j) break; t = I[i]; I[i] = I[j]; I[j] = t; } quick_sort(A, I, lo, j); quick_sort(A, I, j + 1, hi); } } /* this function given a float array and a int array of indices from 0 to hi * already sorted will sort the array of indices based on the float a * * input: * * @ float A[]:= the array of values * @ int I[]:= the array of indices * @ int lo:= 0 * int hi:= len-1 * */ void quick_sort_int(int A[], int I[], int lo, int hi){ if (lo < hi) { int pivot = A[I[lo + (hi - lo) / 2]]; int t; int i = lo - 1; int j = hi + 1; while (1) { while (A[I[++i]] < pivot); while (A[I[--j]] > pivot); if (i >= j) break; t = I[i]; I[i] = I[j]; I[j] = t; } quick_sort_int(A, I, lo, j); quick_sort_int(A, I, j + 1, hi); } } char** get_files(int index1, int n_files){ char** files = (char**)malloc(sizeof(char*)*n_files); int i; char* temp = ".bin"; for(i = 0; i < n_files; i++){ files[i] = (char*)malloc(sizeof(char*)*256); files[i][0] = '.'; files[i][1] = '/'; files[i][2] = '\0'; char* b = (char*)malloc(sizeof(char)*256); b = itoa((i+index1),b); strcat(files[i],b); strcat(files[i],temp); free(b); } return files; } /* this function checks if there is some nan in a matrix, returns 1 if there is at least 1 nan, 0 otherwise * * Inputs: * * @ float** m:= the matrix that must be checked * @ int rows:= the rows of matrix that must be checked * @ int cols:= the cols matrix that must be checked * */ int check_nans_matrix(float** m, int rows, int cols){ int i,j; for(i = 0; i < rows; i++){ for(j = 0; j < cols; j++){ if(!bool_is_real(m[i][j])) return 1; } } return 0; } void merge(float* values, int* indices, int temp[], int from_index, int mid, int to, int length){ int k = from_index, i = from_index, j = mid + 1; while (i <= mid && j <= to){ if (values[indices[i]] < values[indices[j]]) { temp[k++] = indices[i++]; } else { temp[k++] = indices[j++]; } } while (i < length && i <= mid) { temp[k++] = indices[i++]; } for (i = from_index; i <= to; i++) { indices[i] = temp[i]; } } void mergesort(float* values, int* indices, int low, int high){ int i,m,from,mid,to,length = high-low + 1; int* temp = (int*)calloc(length,sizeof(int)); for(i = 0; i < length; i++){ temp[i] = indices[i]; } for (m = 1; m <= high - low; m = 2*m){ for (i = low; i < high; i += 2*m){ from = i; mid = i + m - 1; to = min(i + 2*m - 1, high); merge(values,indices, temp, from, mid, to,length); } } free(temp); } void sort(float* values, int* indices, int low, int high){ if(high-low > SORT_SWITCH_THRESHOLD) mergesort(values,indices,low,high); else quick_sort(values,indices,low,high); } void free_tensor(float*** t, int dim1, int dim2){ int i,j; for(i = 0; i < dim1; i++){ for(j = 0; j < dim2; j++){ free(t[i][j]); } free(t[i]); } free(t); } void free_4D_tensor(float**** t, int dim1, int dim2, int dim3){ int i,j,k; for(i = 0; i < dim1; i++){ for(j = 0; j < dim2; j++){ for(k = 0; k < dim3; k++){ free(t[i][j][k]); } free(t[i][j]); } free(t[i]); } free(t); } void set_vector_with_value(float value, float* v, int dimension){ int i; for(i = 0; i < dimension; i++){ v[i] = value; } } void set_int_vector_with_value(int value, int* v, int dimension){ int i; for(i = 0; i < dimension; i++){ v[i] = value; } } // this function gives me the file of all the data files. each line in the file is a filename // that filename contains all the filenames of the data the client must work with // furthermore each line at the end of the file name has a ; to tell: that filename is currently being used or not // this function checks this file and return an array of package size length within the first free filename it meets // then it sets the ; to that filename char* read_files_from_file(char* file, int package_size){ // checking files of the subset int size = 0,count,i; char* ksource; read_file_in_char_vector(&ksource,file,&size);// reading the files char* temp = (char*)calloc(package_size,sizeof(char)); char* temp2 = NULL; FILE* f = fopen(file,"w"); for(i = 0,count = 0; i < size; i++){ if(ksource[i] != '\n' && temp2 == NULL){ fprintf(f,"%c",ksource[i]); temp[count] = ksource[i]; count++; } else if (ksource[i] == '\n' && temp2 == NULL){ if(temp[count-1] != ';'){ fprintf(f,";"); temp[count] = '\0'; temp2 = temp; } else{ free(temp); temp = (char*)calloc(package_size,sizeof(char)); } fprintf(f,"\n"); count = 0; } else{ fprintf(f,"%c",ksource[i]); } } fclose(f); if(temp2 == NULL) free(temp); free(ksource); return temp2; } // read above. this function open that file and remove the ; from the line of file_to_free void set_files_free_from_file(char* file_to_free, char* file){ // checking files of the subset int size = 0,count,i; char* ksource; read_file_in_char_vector(&ksource,file,&size);// reading the files char* temp = (char*)calloc(1024,sizeof(char)); FILE* f = fopen(file,"w"); for(i = 0,count = 0; i < size; i++){ if(ksource[i] != '\n' && ksource[i] != ';'){ fprintf(f,"%c",ksource[i]); temp[count] = ksource[i]; count++; } else{ if(ksource[i] == ';'){ temp[count] = '\0'; printf("%s\n%s\n",temp,file_to_free); printf("%d\n",strcmp(temp,file_to_free)); if(strcmp(temp,file_to_free)) fprintf(f,";"); } else fprintf(f,"\n"); free(temp); temp = (char*)calloc(1024,sizeof(char)); count = 0; } } fclose(f); free(temp); free(ksource); } // this function removes all the ; from file void remove_occupied_sets(char* file){ // checking files of the subset int size = 0,i; char* ksource; read_file_in_char_vector(&ksource,file,&size);// reading the files FILE* f = fopen(file,"w"); for(i = 0; i < size; i++){ if(ksource[i] != ';') fprintf(f,"%c",ksource[i]); } fclose(f); free(ksource); } /* msleep(): Sleep for the requested number of milliseconds. */ int msleep(long msec) { struct timespec ts; int res; if (msec < 0) { errno = EINVAL; return -1; } ts.tv_sec = msec / 1000; ts.tv_nsec = (msec % 1000) * 1000000; do { res = nanosleep(&ts, &ts); } while (res && errno == EINTR); return res; } /* get a copy of the input array in a new allocated array * */ int* get_new_copy_int_array(int* array, int size){ if(array == NULL) return NULL; int* new = (int*)malloc(sizeof(int)*size); copy_int_array(array,new,size); return new; } int argmax(float* vector, int dimension){ if(vector == NULL || dimension <= 0) return -1; int i,index = 0; float max = vector[0]; for(i = 0; i < dimension; i++){ if(vector[i] > max){ max = vector[i]; index = i; } } return index; } // flaot are the values, indeces are the indices of the values // the indeces array is sorted in this way the first is the greatest // reverse_indices[i] tells us where the values[i] is in the array indices void max_heapify(float* values, uint* indices,uint* reverse_indices, uint n, uint i) { // Find largest among root, left child and right child uint largest = i; uint left = 2 * i + 1; uint right = 2 * i + 2; if (left < n && values[indices[left]] >= values[indices[largest]]) largest = left; if (right < n && values[indices[right]] >= values[indices[largest]]) largest = right; // Swap and continue heapifying if root is not largest if (largest != i) { uint x = indices[i]; indices[i] = indices[largest]; reverse_indices[indices[largest]] = i; reverse_indices[x] = largest; indices[largest] = x; max_heapify(values,indices, reverse_indices, n,largest); } } // flaot are the values, indeces are the indices of the values // the indeces array is sorted in this way the first is the greatest // reverse_indices[i] tells us where the values[i] is in the array indices void max_heapify_up(float* values, uint* indices,uint* reverse_indices, uint n, uint i) { // Find largest among root, left child and right child if(i == 0) return; uint smallest = i; uint parent; if((i%2)) parent = (i-1)/2; else parent = (i-2)/2; if (values[indices[parent]] <= values[indices[smallest]]) smallest = parent; if (smallest != i) { uint x = indices[i]; indices[i] = indices[smallest]; reverse_indices[indices[smallest]] = i; reverse_indices[x] = smallest; indices[smallest] = x; max_heapify_up(values,indices, reverse_indices, n,smallest); } } void remove_ith_element_from_max_heap(float* values, uint* indices,uint* reverse_indices, uint n, uint i){ if(i >= n) return; float value1 = values[indices[i]]; float value2 = values[indices[n-1]]; reverse_indices[indices[i]] = n-1; reverse_indices[indices[n-1]] = i; uint x = indices[i]; indices[i] = indices[n-1]; indices[n-1] = x; if(value2 < value1) max_heapify(values,indices, reverse_indices, n-1,i); else if(value2>value1) max_heapify_up(values,indices, reverse_indices, n-1,i); } void update_recursive_cumulative_heap_up(float* values, uint index, uint started_index, uint n, float value){ uint parent; if(index){ if(!(index%2)) parent = (index-2)/2; else parent = (index-1)/2; if(!started_index){ values[index]+=value; } } else{ if(!started_index){ values[index]+=value; } return; } update_recursive_cumulative_heap_up(values,parent,0,n,value); } int index_is_inside_buffer(uint* buffer, uint length, uint index){ uint i; for(i = 0; i < length; i++){ if(buffer[i] == index){ return 1; } } return 0; } int value_is_child(uint child, uint parent){ if(child <= parent) return 0; do{ if(child%2) child = (child-1)/2; else child = (child-2)/2; }while(child > parent); if(child == parent) return 1; return 0; } float subtracted_value(uint index, float* current_values, uint* taken_values, uint taken_values_length){ float ret = 0; uint i; for(i = 0; i < taken_values_length; i++){ if(!index_is_inside_buffer(taken_values,i,taken_values[i])){ if(value_is_child(taken_values[i],index)) ret+=current_values[taken_values[i]]; } } return ret; } float subtracted_value_rewards(uint index, int* current_values, uint* taken_values, uint taken_values_length,float alpha){ float ret = 0; uint i; for(i = 0; i < taken_values_length; i++){ if(!index_is_inside_buffer(taken_values,i,taken_values[i]) && current_values[taken_values[i]] > 0){ if(value_is_child(taken_values[i],index)) ret+=pow(((double)1/((double)current_values[taken_values[i]])),alpha); //printf("i, ret: %d, %f\n",taken_values[i],ret); } } return ret; } // log(n) sample uint weighted_random_sample(float* cumulative_values, float* current_values, uint index, uint size, float random_value, double sum, uint* taken_values, uint taken_values_length){ if(index >= size){ //printf("M "); return index-1; } float v = 0, v_left = 0; if(!index_is_inside_buffer(taken_values,taken_values_length,index)){ v = current_values[index]/sum; if(random_value <= v){ //printf("K: %d, %d. ", index, index_is_inside_buffer(taken_values,taken_values_length,index)); return index; } } uint left = index*2+1; uint right = index*2+2; if(left >= size){ //printf("L "); uint i; for(i = index+1; i < size; i++){ if(!index_is_inside_buffer(taken_values,taken_values_length,i)) return i; } return index; } if(right >= size){ uint i; //printf("R"); if(!index_is_inside_buffer(taken_values,taken_values_length,left)) return left; for(i = index+1; i < size; i++){ if(!index_is_inside_buffer(taken_values,taken_values_length,i)) return i; } return left; } if(!index_is_inside_buffer(taken_values,taken_values_length,left)){ v_left = current_values[left]; } random_value-=v; float sub = subtracted_value(left,current_values,taken_values,taken_values_length); if(random_value <= ((v_left+cumulative_values[left]-sub)/sum)) return weighted_random_sample(cumulative_values, current_values, left, size, random_value, sum,taken_values,taken_values_length); else return weighted_random_sample(cumulative_values, current_values, right, size, random_value-((v_left+cumulative_values[left]-sub)/sum), sum,taken_values,taken_values_length); } uint weighted_random_sample_rewards(float* cumulative_values, int* current_values, uint index, uint size, float random_value, double sum, uint* taken_values, uint taken_values_length, float alpha){ if(index >= size){ //printf("M "); return index-1; } float v = 0; float v_left = 0; if(current_values[index] > 0 && !index_is_inside_buffer(taken_values,taken_values_length,index)){ v = pow(((double)1/((double)current_values[index])),alpha)/sum; if(random_value <= v){ //printf("random, v, sum, index: %f, %f, %lf, %d\n",random_value,v,sum, index); //printf("K: %d, %d. ", index, index_is_inside_buffer(taken_values,taken_values_length,index)); return index; } } //printf("random, v, sum, index: %f, %f, %lf, %d\n",random_value,v,sum, index); uint left = index*2+1; uint right = index*2+2; random_value-=v; if(left >= size){ if(current_values[index]>0) return index; else return size; } if(right >= size){ uint i; if(current_values[left] > 0) return left; else return size; } if(current_values[left] > 0 && !index_is_inside_buffer(taken_values,taken_values_length,left)){ v_left = pow(((double)1/((double)current_values[left])),alpha); } float sub = subtracted_value_rewards(left,current_values,taken_values,taken_values_length,alpha); //printf("sub, rv: %f %f\n",sub,random_value); uint returned_index = size; if(random_value <= ((v_left+cumulative_values[left]-sub)/sum)){ returned_index = weighted_random_sample_rewards(cumulative_values, current_values, left, size, random_value, sum,taken_values,taken_values_length,alpha); if(returned_index == size) returned_index = weighted_random_sample_rewards(cumulative_values, current_values, right, size, random_value-((v_left+cumulative_values[left]-sub)/sum), sum,taken_values,taken_values_length,alpha); if(current_values[index] > 0 && returned_index == size) return index; return returned_index; } else{ returned_index = weighted_random_sample_rewards(cumulative_values, current_values, right, size, random_value-((v_left+cumulative_values[left]-sub)/sum), sum,taken_values,taken_values_length,alpha); if(returned_index == size) returned_index = weighted_random_sample_rewards(cumulative_values, current_values, left, size, random_value, sum,taken_values,taken_values_length,alpha); if(current_values[index] > 0 && returned_index == size) return index; return returned_index; } } int is_little_endian(){ unsigned int x = 1; return ((int) (((char *)&x)[0]) == 1); } void reverse_ptr(void* ptr, uint64_t size){ if(size <= 1) return; char* array = (char*) ptr; uint64_t i, len = size/2; for(i = 0; i < len; i++){ char temp = array[i]; array[i] = array[size-1-i]; array[size-1-i] = temp; } return; } void swap_array_bytes_order(void* ptr, uint64_t size, uint64_t len){ if(size <= 1 || !len) return; char* array = (char*) &ptr; uint64_t i; for(i = 0; i < len; i++){ reverse_ptr(array + i*size,size); } } void convert_data(void* ptr, uint64_t size, uint64_t len){ if(!is_little_endian()) swap_array_bytes_order(ptr,size,len); }
1315.c
#include "mcu_init.h" /* Defines ------------------------------------------------------------------*/ /* Typedefs -----------------------------------------------------------------*/ /* Macros -------------------------------------------------------------------*/ /* Local variables ----------------------------------------------------------*/ /* Extern variables ---------------------------------------------------------*/ /* Global variables ---------------------------------------------------------*/ /* Private function prototypes ----------------------------------------------*/ /* Public functions ---------------------------------------------------------*/ void SystemClock_Config(void) { BOARD_BootClockPll24M(); } /* Private functions --------------------------------------------------------*/
509279.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE122_Heap_Based_Buffer_Overflow__c_CWE805_struct_memmove_51b.c Label Definition File: CWE122_Heap_Based_Buffer_Overflow__c_CWE805.label.xml Template File: sources-sink-51b.tmpl.c */ /* * @description * CWE: 122 Heap Based Buffer Overflow * BadSource: Allocate using malloc() and set data pointer to a small buffer * GoodSource: Allocate using malloc() and set data pointer to a large buffer * Sink: memmove * BadSink : Copy twoIntsStruct array to data using memmove * Flow Variant: 51 Data flow: data passed as an argument from one function to another in different source files * * */ #include "std_testcase.h" /* all the sinks are the same, we just want to know where the hit originated if a tool flags one */ #ifndef OMITBAD void CWE122_Heap_Based_Buffer_Overflow__c_CWE805_struct_memmove_51b_badSink(twoIntsStruct * data) { { twoIntsStruct source[100]; { size_t i; /* Initialize array */ for (i = 0; i < 100; i++) { source[i].intOne = 0; source[i].intTwo = 0; } } /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ memmove(data, source, 100*sizeof(twoIntsStruct)); printStructLine(&data[0]); free(data); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE122_Heap_Based_Buffer_Overflow__c_CWE805_struct_memmove_51b_goodG2BSink(twoIntsStruct * data) { { twoIntsStruct source[100]; { size_t i; /* Initialize array */ for (i = 0; i < 100; i++) { source[i].intOne = 0; source[i].intTwo = 0; } } /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ memmove(data, source, 100*sizeof(twoIntsStruct)); printStructLine(&data[0]); free(data); } } #endif /* OMITGOOD */
206572.c
/* * %CopyrightBegin% * * Copyright Ericsson AB 2009-2018. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% */ /* Erlang Native InterFace */ /* * Environment contains a pointer to currently executing process. * In the dirty case this pointer do however not point to the * actual process structure of the executing process, but instead * a "shadow process structure". This in order to be able to handle * heap allocation without the need to acquire the main lock on * the process. * * The dirty process is allowed to allocate on the heap without * the main lock, i.e., incrementing htop, but is not allowed to * modify mbuf, offheap, etc without the main lock. The dirty * process moves mbuf list and offheap list of the shadow process * structure into the real structure when the dirty nif call * completes. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "erl_nif.h" #include "sys.h" #include "global.h" #include "erl_binary.h" #include "bif.h" #include "error.h" #include "big.h" #include "erl_map.h" #include "beam_bp.h" #include "erl_thr_progress.h" #include "dtrace-wrapper.h" #include "erl_process.h" #include "erl_bif_unique.h" #include "erl_utils.h" #include "erl_io_queue.h" #include "erl_proc_sig_queue.h" #undef ERTS_WANT_NFUNC_SCHED_INTERNALS__ #define ERTS_WANT_NFUNC_SCHED_INTERNALS__ #include "erl_nfunc_sched.h" #if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP)) #define HAVE_USE_DTRACE 1 #endif #include <limits.h> #include <stddef.h> /* offsetof */ /* Information about a loaded nif library. * Each successful call to erlang:load_nif will allocate an instance of * erl_module_nif. Two calls opening the same library will thus have the same * 'handle'. */ struct erl_module_nif { void* priv_data; void* handle; /* "dlopen" */ struct enif_entry_t entry; erts_refc_t rt_cnt; /* number of resource types */ erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */ Module* mod; /* Can be NULL if orphan with dtor-resources left */ ErlNifFunc _funcs_copy_[1]; /* only used for old libs */ }; typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]); #ifdef DEBUG # define READONLY_CHECK # define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1) #endif #ifdef READONLY_CHECK # define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE) static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz); #else # define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0) #endif #ifdef ERTS_NIF_ASSERT_IN_ENV # define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__) static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func); # include "erl_gc.h" #else # define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) #endif #ifdef DEBUG static int is_offheap(const ErlOffHeap* off_heap); #endif #ifdef USE_VM_PROBES void dtrace_nifenv_str(ErlNifEnv *, char *); #endif #define MIN_HEAP_FRAG_SZ 200 static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp); static ERTS_INLINE int is_scheduler(void) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); if (!esdp) return 0; if (ERTS_SCHEDULER_IS_DIRTY(esdp)) return -1; return 1; } static ERTS_INLINE void execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) { if (schedp) *schedp = is_scheduler(); if (c_pp) { if (!env || env->proc->common.id == ERTS_INVALID_PID) *c_pp = NULL; else { Process *c_p = env->proc; if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) { ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); } else { c_p = env->proc->next; ASSERT(is_scheduler() < 0); ASSERT(c_p && env->proc->common.id == c_p->common.id); } *c_pp = c_p; ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)); } } } static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, size_t need) { Eterm* hp = env->hp; env->hp += need; if (env->hp <= env->hp_end) { return hp; } return alloc_heap_heavy(env, need, hp); } static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp) { env->hp = hp; if (env->heap_frag == NULL) { ASSERT(HEAP_LIMIT(env->proc) == env->hp_end); ASSERT(env->hp + need > env->hp_end); HEAP_TOP(env->proc) = env->hp; } else { Uint usz = env->hp - env->heap_frag->mem; env->proc->mbuf_sz += usz - env->heap_frag->used_size; env->heap_frag->used_size = usz; ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); } hp = erts_heap_alloc(env->proc, need, MIN_HEAP_FRAG_SZ); env->heap_frag = MBUF(env->proc); env->hp = hp + need; env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size; return hp; } #if SIZEOF_LONG != ERTS_SIZEOF_ETERM static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need) { if (env->hp + may_need > env->hp_end) { alloc_heap_heavy(env, may_need, env->hp); env->hp -= may_need; } } #endif void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, Process* tracee) { env->mod_nif = mod_nif; env->proc = p; env->hp = HEAP_TOP(p); env->hp_end = HEAP_LIMIT(p); env->heap_frag = NULL; env->fpe_was_unmasked = erts_block_fpe(); env->tmp_obj_list = NULL; env->exception_thrown = 0; env->tracee = tracee; ASSERT(p->common.id != ERTS_INVALID_PID); #ifdef ERTS_NIF_ASSERT_IN_ENV env->dbg_disable_assert_in_env = 0; #endif #if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); ASSERT(esdp); if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { erts_aint32_t state = erts_atomic32_read_nob(&p->state); ASSERT(p->scheduler_data == esdp); ASSERT((state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS)) && !(state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS))); } } #endif } static void full_cache_env(ErlNifEnv *env); static void cache_env(ErlNifEnv* env); static void full_flush_env(ErlNifEnv *env); static void flush_env(ErlNifEnv* env); /* Temporary object header, auto-deallocated when NIF returns or when * independent environment is cleared. * * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its * first element must not require greater alignment than `next`. */ struct enif_tmp_obj_t { struct enif_tmp_obj_t* next; void (*dtor)(struct enif_tmp_obj_t*); ErtsAlcType_t allocator; /*char data[];*/ }; static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env) { while (env->tmp_obj_list != NULL) { struct enif_tmp_obj_t* free_me = env->tmp_obj_list; env->tmp_obj_list = free_me->next; free_me->dtor(free_me); } } /* Whether the given environment is bound to a process and will be cleaned up * when the NIF returns. It's safe to use temp_alloc for objects in * env->tmp_obj_list when this is true. */ static ERTS_INLINE int is_proc_bound(ErlNifEnv *env) { return env->mod_nif != NULL; } /* Allocates and attaches an object to the given environment, running its * destructor when the environment is cleared. To avoid temporary variables the * address of the allocated object is returned instead of the enif_tmp_obj_t. * * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free * the object. If the destructor needs to refer to the allocated object its * address will be &tmp_obj[1]. */ static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size, void (*dtor)(struct enif_tmp_obj_t*)) { struct enif_tmp_obj_t *tmp_obj; ErtsAlcType_t allocator; allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF; tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size)); tmp_obj->next = env->tmp_obj_list; tmp_obj->allocator = allocator; tmp_obj->dtor = dtor; env->tmp_obj_list = tmp_obj; return (void*)&tmp_obj[1]; } /* Generic destructor for objects allocated through alloc_tmp_obj that don't * care about their payload. */ static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj) { erts_free(tmp_obj->allocator, tmp_obj); } void erts_post_nif(ErlNifEnv* env) { erts_unblock_fpe(env->fpe_was_unmasked); full_flush_env(env); free_tmp_objs(env); env->exiting = ERTS_PROC_IS_EXITING(env->proc); } /* * Initialize a NifExport struct. Create it if needed and store it in the * proc. The direct_fp function is what will be invoked by op_call_nif, and * the indirect_fp function, if not NULL, is what the direct_fp function * will call. If the allocated NifExport isn't enough to hold all of argv, * allocate a larger one. Save 'current' and registers if first time this * call is scheduled. */ static ERTS_INLINE ERL_NIF_TERM schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[]) { NifExport *ep; Process *c_p, *dirty_shadow_proc; execution_state(env, &c_p, NULL); if (c_p == env->proc) dirty_shadow_proc = NULL; else dirty_shadow_proc = env->proc; ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); ep = erts_nif_export_schedule(c_p, dirty_shadow_proc, c_p->current, c_p->cp, BeamOpCodeAddr(op_call_nif), direct_fp, indirect_fp, mod, func_name, argc, (const Eterm *) argv); if (!ep->m) { /* First time this call is scheduled... */ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); ep->m = env->mod_nif; } return (ERL_NIF_TERM) THE_NON_VALUE; } static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg) { int exiting; ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg; NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I); ErtsCodeMFA *codemfa = erts_code_to_codemfa(I); NativeFunPtr dirty_nif = (NativeFunPtr) I[1]; ErlNifEnv env; ERL_NIF_TERM result; #ifdef DEBUG erts_aint32_t state = erts_atomic32_read_nob(&c_p->state); ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p)); ASSERT(!c_p->scheduler_data); ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); ASSERT(esdp); nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER; #endif erts_pre_nif(&env, c_p, nep->m, NULL); env.proc = erts_make_dirty_shadow_proc(esdp, c_p); env.proc->freason = EXC_NULL; env.proc->fvalue = NIL; env.proc->ftrace = NIL; env.proc->i = c_p->i; ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); ASSERT(esdp->current_nif == NULL); esdp->current_nif = &env; erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(esdp->current_nif == &env); esdp->current_nif = NULL; ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); ASSERT(env.proc->next == c_p); exiting = ERTS_PROC_IS_EXITING(c_p); if (!exiting) { if (env.exception_thrown) { schedule_exception: schedule(&env, dirty_nif_exception, NULL, am_erts_internal, am_dirty_nif_exception, 1, &env.proc->fvalue); } else if (is_value(result)) { schedule(&env, dirty_nif_finalizer, NULL, am_erts_internal, am_dirty_nif_finalizer, 1, &result); } else if (env.proc->freason != TRAP) { /* user returned garbage... */ ERTS_DECL_AM(badreturn); (void) enif_raise_exception(&env, AM_badreturn); goto schedule_exception; } else { /* Rescheduled by dirty NIF call... */ ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER); } c_p->i = env.proc->i; c_p->arity = env.proc->arity; } #ifdef DEBUG if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER) nep->func = NULL; #endif erts_unblock_fpe(env.fpe_was_unmasked); full_flush_env(&env); free_tmp_objs(&env); return exiting; } static void full_flush_env(ErlNifEnv* env) { flush_env(env); if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) /* Dirty nif call using shadow process struct */ erts_flush_dirty_shadow_proc(env->proc); } static void full_cache_env(ErlNifEnv* env) { if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { erts_cache_dirty_shadow_proc(env->proc); /* * If shadow proc had heap fragments when flushed * those have now been moved to the real proc. * Ensure heap pointers do not point into a heap * fragment on real proc... */ ASSERT(!env->proc->mbuf); env->hp_end = HEAP_LIMIT(env->proc); env->hp = HEAP_TOP(env->proc); } cache_env(env); } /* Flush out our cached heap pointers to allow an ordinary HAlloc */ static void flush_env(ErlNifEnv* env) { if (env->heap_frag == NULL) { ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); ASSERT(env->hp >= HEAP_TOP(env->proc)); ASSERT(env->hp <= HEAP_LIMIT(env->proc)); HEAP_TOP(env->proc) = env->hp; } else { Uint usz; ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); usz = env->hp - env->heap_frag->mem; env->proc->mbuf_sz += usz - env->heap_frag->used_size; env->heap_frag->used_size = usz; ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); } } /* Restore cached heap pointers to allow alloc_heap again. */ static void cache_env(ErlNifEnv* env) { env->heap_frag = MBUF(env->proc); if (env->heap_frag == NULL) { ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); ASSERT(env->hp <= HEAP_TOP(env->proc)); ASSERT(env->hp <= HEAP_LIMIT(env->proc)); env->hp = HEAP_TOP(env->proc); } else { env->hp = env->heap_frag->mem + env->heap_frag->used_size; env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size; } } void* enif_priv_data(ErlNifEnv* env) { return env->mod_nif->priv_data; } void* enif_alloc(size_t size) { return erts_alloc_fnf(ERTS_ALC_T_NIF, (Uint) size); } void* enif_realloc(void* ptr, size_t size) { return erts_realloc_fnf(ERTS_ALC_T_NIF, ptr, size); } void enif_free(void* ptr) { erts_free(ERTS_ALC_T_NIF, ptr); } struct enif_msg_environment_t { ErlNifEnv env; Process phony_proc; }; static ERTS_INLINE void setup_nif_env(struct enif_msg_environment_t* msg_env, struct erl_module_nif* mod, Process* tracee) { Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */ msg_env->env.hp = phony_heap; msg_env->env.hp_end = phony_heap; msg_env->env.heap_frag = NULL; msg_env->env.mod_nif = mod; msg_env->env.tmp_obj_list = NULL; msg_env->env.proc = &msg_env->phony_proc; msg_env->env.exception_thrown = 0; sys_memset(&msg_env->phony_proc, 0, sizeof(Process)); HEAP_START(&msg_env->phony_proc) = phony_heap; HEAP_TOP(&msg_env->phony_proc) = phony_heap; HEAP_LIMIT(&msg_env->phony_proc) = phony_heap; HEAP_END(&msg_env->phony_proc) = phony_heap; MBUF(&msg_env->phony_proc) = NULL; msg_env->phony_proc.common.id = ERTS_INVALID_PID; msg_env->env.tracee = tracee; #ifdef FORCE_HEAP_FRAGS msg_env->phony_proc.space_verified = 0; msg_env->phony_proc.space_verified_from = NULL; #endif #ifdef ERTS_NIF_ASSERT_IN_ENV msg_env->env.dbg_disable_assert_in_env = 0; #endif } ErlNifEnv* enif_alloc_env(void) { struct enif_msg_environment_t* msg_env = erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t)); setup_nif_env(msg_env, NULL, NULL); return &msg_env->env; } void enif_free_env(ErlNifEnv* env) { enif_clear_env(env); erts_free(ERTS_ALC_T_NIF, env); } static ERTS_INLINE void pre_nif_noproc(struct enif_msg_environment_t* msg_env, struct erl_module_nif* mod, Process* tracee) { setup_nif_env(msg_env, mod, tracee); msg_env->env.fpe_was_unmasked = erts_block_fpe(); } static ERTS_INLINE void post_nif_noproc(struct enif_msg_environment_t* msg_env) { erts_unblock_fpe(msg_env->env.fpe_was_unmasked); enif_clear_env(&msg_env->env); } static ERTS_INLINE void clear_offheap(ErlOffHeap* oh) { oh->first = NULL; oh->overhead = 0; } void enif_clear_env(ErlNifEnv* env) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env; Process* p = &menv->phony_proc; ASSERT(p == menv->env.proc); ASSERT(p->common.id == ERTS_INVALID_PID); ASSERT(MBUF(p) == menv->env.heap_frag); free_tmp_objs(env); if (MBUF(p) != NULL) { erts_cleanup_offheap(&MSO(p)); clear_offheap(&MSO(p)); free_message_buffer(MBUF(p)); MBUF(p) = NULL; menv->env.heap_frag = NULL; } ASSERT(HEAP_TOP(p) == HEAP_END(p)); menv->env.hp = menv->env.hp_end = HEAP_TOP(p); ASSERT(!is_offheap(&MSO(p))); } #ifdef DEBUG static int enif_send_delay = 0; #define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 32 == 0) #else #ifdef ERTS_PROC_LOCK_OWN_IMPL #define ERTS_FORCE_ENIF_SEND_DELAY() 0 #else /* * We always schedule messages if we do not use our own * process lock implementation, as if we try to do a trylock on * a lock that might already be locked by the same thread. * And what happens then with different mutex implementations * is not always guaranteed. */ #define ERTS_FORCE_ENIF_SEND_DELAY() 1 #endif #endif int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) { ErlTraceMessageQueue *msgq, **last_msgq; int reds = 0; /* Only one thread at a time is allowed to flush trace messages, so we require the main lock to be held when doing the flush */ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = c_p->trace_msg_q; if (!msgq) goto error; do { Process* rp; ErtsProcLocks rp_locks; ErtsMessage *first, **last; Uint len; first = msgq->first; last = msgq->last; len = msgq->len; msgq->first = NULL; msgq->last = &msgq->first; msgq->len = 0; erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); ASSERT(len != 0); rp = erts_proc_lookup(msgq->receiver); if (rp) { rp_locks = 0; if (rp->common.id == c_p->common.id) rp_locks = c_p_locks; erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len); if (rp->common.id == c_p->common.id) rp_locks &= ~c_p_locks; if (rp_locks) erts_proc_unlock(rp, rp_locks); reds += len; } else { erts_cleanup_messages(first); } reds += 1; erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = msgq->next; } while (msgq); last_msgq = &c_p->trace_msg_q; while (*last_msgq) { msgq = *last_msgq; if (msgq->len == 0) { *last_msgq = msgq->next; erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq); } else { last_msgq = &msgq->next; } } error: erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); return reds; } int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; ErtsProcLocks rp_locks = 0; ErtsProcLocks lc_locks = 0; Process* rp; Process* c_p; ErtsMessage *mp; Eterm from; Eterm receiver = to_pid->pid; int scheduler; execution_state(env, &c_p, &scheduler); if (scheduler > 0) { /* Normal scheduler */ rp = erts_proc_lookup(receiver); if (!rp) return 0; } else { if (c_p) { ASSERT(scheduler < 0); /* Dirty scheduler */ if (ERTS_PROC_IS_EXITING(c_p)) return 0; if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } } rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, receiver, rp_locks, ERTS_P2P_FLG_INC_REFC); if (!rp) { if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return 0; } } if (c_p == rp) rp_locks = ERTS_PROC_LOCK_MAIN; if (menv) { Eterm token = c_p ? SEQ_TRACE_TOKEN(c_p) : am_undefined; if (token != NIL && token != am_undefined) { /* This code is copied from erts_send_message */ Eterm stoken = SEQ_TRACE_TOKEN(c_p); #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm utag = NIL; *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", c_p->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", rp->common.id); } #endif if (have_seqtrace(stoken)) { seq_trace_update_send(c_p); seq_trace_output(stoken, msg, SEQ_TRACE_SEND, rp->common.id, c_p); } #ifdef USE_VM_PROBES if (!(DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) { stoken = NIL; } #endif token = enif_make_copy(msg_env, stoken); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) { if (is_immed(DT_UTAG(c_p))) utag = DT_UTAG(c_p); else utag = enif_make_copy(msg_env, DT_UTAG(c_p)); } if (DTRACE_ENABLED(message_send)) { if (have_seqtrace(stoken)) { tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, size_object(msg), tok_label, tok_lastcnt, tok_serial); } #endif } flush_env(msg_env); mp = erts_alloc_message(0, NULL); ERL_MESSAGE_TOKEN(mp) = token; mp->data.heap_frag = menv->env.heap_frag; ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc)); if (mp->data.heap_frag != NULL) { /* Move all offheap's from phony proc to the first fragment. Quick and dirty... */ ASSERT(!is_offheap(&mp->data.heap_frag->off_heap)); mp->data.heap_frag->off_heap = MSO(&menv->phony_proc); clear_offheap(&MSO(&menv->phony_proc)); menv->env.heap_frag = NULL; MBUF(&menv->phony_proc) = NULL; } } else { erts_literal_area_t litarea; ErlOffHeap *ohp; Eterm *hp; Uint sz; INITIALIZE_LITERAL_PURGE_AREA(litarea); sz = size_object_litopt(msg, &litarea); if (c_p && !env->tracee) { full_flush_env(env); mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp); full_cache_env(env); } else { erts_aint_t state = erts_atomic32_read_nob(&rp->state); if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { mp = erts_alloc_message(sz, &hp); ohp = sz == 0 ? NULL : &mp->hfrag.off_heap; } else { ErlHeapFragment *bp = new_message_buffer(sz); mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; hp = bp->mem; ohp = &bp->off_heap; } } ERL_MESSAGE_TOKEN(mp) = am_undefined; msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea); } from = c_p ? c_p->common.id : am_undefined; if (!env || !env->tracee) { if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) { full_flush_env(env); trace_send(c_p, receiver, msg); full_cache_env(env); } } else { /* This clause is taken when the nif is called in the context of a traced process. We do not know which locks we have so we have to do a try lock and if that fails we enqueue the message in a special trace message output queue of the tracee */ ErlTraceMessageQueue *msgq; Process *t_p = env->tracee; erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); msgq = t_p->trace_msg_q; while (msgq != NULL) { if (msgq->receiver == receiver) { break; } msgq = msgq->next; } #ifdef ERTS_ENABLE_LOCK_CHECK lc_locks = erts_proc_lc_my_proc_locks(rp); rp_locks |= lc_locks; #endif if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq || rp_locks & ERTS_PROC_LOCK_MSGQ || erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ERL_MESSAGE_TERM(mp) = msg; ERL_MESSAGE_FROM(mp) = from; ERL_MESSAGE_TOKEN(mp) = am_undefined; if (!msgq) { msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE, sizeof(ErlTraceMessageQueue)); msgq->receiver = receiver; msgq->first = mp; msgq->last = &mp->next; msgq->len = 1; /* Insert in linked list */ msgq->next = t_p->trace_msg_q; t_p->trace_msg_q = msgq; erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(t_p, 0); } else { msgq->len++; *msgq->last = mp; msgq->last = &mp->next; erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); } goto done; } else { erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); rp_locks &= ~ERTS_PROC_LOCK_TRACE; rp_locks |= ERTS_PROC_LOCK_MSGQ; } } if (c_p) erts_queue_proc_message(c_p, rp, rp_locks, mp, msg); else erts_queue_message(rp, rp_locks, mp, msg, from); done: if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks & ~lc_locks) erts_proc_unlock(rp, rp_locks & ~lc_locks); if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (scheduler <= 0) erts_proc_dec_refc(rp); return 1; } int enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg) { int iflags = (erts_port_synchronous_ops ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP : ERTS_PORT_SFLGS_INVALID_LOOKUP); int scheduler; Process *c_p; Port *prt; int res; if (!env) erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL"); execution_state(env, &c_p, &scheduler); if (!c_p) c_p = env->proc; if (scheduler > 0) prt = erts_port_lookup(to_port->port_id, iflags); else { if (ERTS_PROC_IS_EXITING(c_p)) return 0; prt = erts_thr_port_lookup(to_port->port_id, iflags); } if (!prt) res = 0; else res = erts_port_output_async(prt, c_p->common.id, msg); if (scheduler <= 0) erts_port_dec_refc(prt); return res; } /* * env must be the caller's environment in a scheduler or NULL in a * non-scheduler thread. * name must be an atom - anything else will just waste time. */ static Eterm call_whereis(ErlNifEnv *env, Eterm name) { Process *c_p; Eterm res; int scheduler; execution_state(env, &c_p, &scheduler); ASSERT((c_p && scheduler) || (!c_p && !scheduler)); if (scheduler < 0) { /* dirty scheduler */ if (ERTS_PROC_IS_EXITING(c_p)) return 0; if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) c_p = NULL; /* as we don't have main lock */ } if (c_p) { /* main lock may be released below and c_p->htop updated by others */ flush_env(env); } res = erts_whereis_name_to_id(c_p, name); if (c_p) cache_env(env); return res; } int enif_whereis_pid(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid) { Eterm res; if (is_not_atom(name)) return 0; res = call_whereis(env, name); /* enif_get_local_ functions check the type */ return enif_get_local_pid(env, res, pid); } int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port) { Eterm res; if (is_not_atom(name)) return 0; res = call_whereis(env, name); /* enif_get_local_ functions check the type */ return enif_get_local_port(env, res, port); } ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) { Uint sz; Eterm* hp; /* * No preserved sharing allowed as long as literals are also preserved. * Process independent environment can not be reached by purge. */ sz = size_object(src_term); hp = alloc_heap(dst_env, sz); return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc)); } #ifdef DEBUG static int is_offheap(const ErlOffHeap* oh) { return oh->first != NULL; } #endif ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid) { if (caller_env->proc->common.id == ERTS_INVALID_PID) return NULL; pid->pid = caller_env->proc->common.id; return pid; } int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid) { if (is_internal_pid(term)) { pid->pid=term; return 1; } return 0; } int enif_get_local_port(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPort* port) { if (is_internal_port(term)) { port->port_id=term; return 1; } return 0; } int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term) { return is_atom(term); } int enif_is_binary(ErlNifEnv* env, ERL_NIF_TERM term) { return is_binary(term) && (binary_bitsize(term) % 8 == 0); } int enif_is_empty_list(ErlNifEnv* env, ERL_NIF_TERM term) { return is_nil(term); } int enif_is_fun(ErlNifEnv* env, ERL_NIF_TERM term) { return is_fun(term); } int enif_is_pid(ErlNifEnv* env, ERL_NIF_TERM term) { return is_pid(term); } int enif_is_port(ErlNifEnv* env, ERL_NIF_TERM term) { return is_port(term); } int enif_is_ref(ErlNifEnv* env, ERL_NIF_TERM term) { return is_ref(term); } int enif_is_tuple(ErlNifEnv* env, ERL_NIF_TERM term) { return is_tuple(term); } int enif_is_list(ErlNifEnv* env, ERL_NIF_TERM term) { return is_list(term) || is_nil(term); } int enif_is_exception(ErlNifEnv* env, ERL_NIF_TERM term) { return env->exception_thrown && term == THE_NON_VALUE; } int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term) { return is_number(term); } static void aligned_binary_dtor(struct enif_tmp_obj_t* obj) { erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator); } int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin) { ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF; union { struct enif_tmp_obj_t* tmp; byte* raw_ptr; }u; if (is_binary(bin_term)) { ProcBin *pb = (ProcBin*) binary_val(bin_term); if (pb->thing_word == HEADER_SUB_BIN) { ErlSubBin* sb = (ErlSubBin*) pb; pb = (ProcBin*) binary_val(sb->orig); } if (pb->thing_word == HEADER_PROC_BIN && pb->flags) erts_emasculate_writable_binary(pb); } u.tmp = NULL; bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator, sizeof(struct enif_tmp_obj_t)); if (bin->data == NULL) { return 0; } if (u.tmp != NULL) { u.tmp->allocator = allocator; u.tmp->next = env->tmp_obj_list; u.tmp->dtor = &aligned_binary_dtor; env->tmp_obj_list = u.tmp; } bin->size = binary_size(bin_term); bin->ref_bin = NULL; ADD_READONLY_CHECK(env, bin->data, bin->size); return 1; } int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) { ErlDrvSizeT sz; if (is_binary(term)) { return enif_inspect_binary(env,term,bin); } if (is_nil(term)) { bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */ bin->size = 0; bin->ref_bin = NULL; return 1; } if (erts_iolist_size(term, &sz)) { return 0; } bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor); bin->size = sz; bin->ref_bin = NULL; erts_iolist_to_buf(term, (char*) bin->data, sz); ADD_READONLY_CHECK(env, bin->data, bin->size); return 1; } int enif_alloc_binary(size_t size, ErlNifBinary* bin) { Binary* refbin; refbin = erts_bin_drv_alloc_fnf(size); /* BUGBUG: alloc type? */ if (refbin == NULL) { return 0; /* The NIF must take action */ } bin->size = size; bin->data = (unsigned char*) refbin->orig_bytes; bin->ref_bin = refbin; return 1; } int enif_realloc_binary(ErlNifBinary* bin, size_t size) { if (bin->ref_bin != NULL) { Binary* oldbin; Binary* newbin; oldbin = (Binary*) bin->ref_bin; newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size); if (!newbin) { return 0; } bin->ref_bin = newbin; bin->data = (unsigned char*) newbin->orig_bytes; bin->size = size; } else { unsigned char* old_data = bin->data; size_t cpy_sz = (size < bin->size ? size : bin->size); enif_alloc_binary(size, bin); sys_memcpy(bin->data, old_data, cpy_sz); } return 1; } void enif_release_binary(ErlNifBinary* bin) { if (bin->ref_bin != NULL) { Binary* refbin = bin->ref_bin; erts_bin_release(refbin); } #ifdef DEBUG bin->data = NULL; bin->ref_bin = NULL; #endif } unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size, ERL_NIF_TERM* termp) { flush_env(env); *termp = new_binary(env->proc, NULL, size); cache_env(env); return binary_bytes(*termp); } int enif_term_to_binary(ErlNifEnv *dst_env, ERL_NIF_TERM term, ErlNifBinary *bin) { Sint size; byte *bp; Binary* refbin; size = erts_encode_ext_size(term); if (!enif_alloc_binary(size, bin)) return 0; refbin = bin->ref_bin; bp = bin->data; erts_encode_ext(term, &bp); bin->size = bp - bin->data; refbin->orig_size = bin->size; ASSERT(bin->data + bin->size == bp); return 1; } size_t enif_binary_to_term(ErlNifEnv *dst_env, const unsigned char* data, size_t data_sz, ERL_NIF_TERM *term, ErlNifBinaryToTerm opts) { Sint size; ErtsHeapFactory factory; byte *bp = (byte*) data; Uint32 flags = 0; switch ((Uint32)opts) { case 0: break; case ERL_NIF_BIN2TERM_SAFE: flags = ERTS_DIST_EXT_BTT_SAFE; break; default: return 0; } if ((size = erts_decode_ext_size(bp, data_sz)) < 0) return 0; if (size > 0) { flush_env(dst_env); erts_factory_proc_prealloc_init(&factory, dst_env->proc, size); } else { erts_factory_dummy_init(&factory); } *term = erts_decode_ext(&factory, &bp, flags); if (is_non_value(*term)) { return 0; } if (size > 0) { erts_factory_close(&factory); cache_env(dst_env); } ASSERT(bp > data); return bp - data; } int enif_is_identical(Eterm lhs, Eterm rhs) { return EQ(lhs,rhs); } int enif_compare(Eterm lhs, Eterm rhs) { Sint result = CMP(lhs,rhs); if (result < 0) { return -1; } else if (result > 0) { return 1; } return result; } ErlNifUInt64 enif_hash(ErlNifHash type, Eterm term, ErlNifUInt64 salt) { switch (type) { case ERL_NIF_INTERNAL_HASH: return make_internal_hash(term, (Uint32) salt); case ERL_NIF_PHASH2: /* It appears that make_hash2 doesn't always react to seasoning * as well as it should. Therefore, let's make it ignore the salt * value and declare salted uses of phash2 as unsupported. */ return make_hash2(term) & ((1 << 27) - 1); default: return 0; } } int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array) { Eterm* ptr; if (is_not_tuple(tpl)) { return 0; } ptr = tuple_val(tpl); *arity = arityval(*ptr); *array = ptr+1; return 1; } int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len, ErlNifCharEncoding encoding) { Eterm* listptr; int n = 0; ASSERT(encoding == ERL_NIF_LATIN1); if (len < 1) { return 0; } while (is_not_nil(list)) { if (is_not_list(list)) { buf[n] = '\0'; return 0; } listptr = list_val(list); if (!is_byte(*listptr)) { buf[n] = '\0'; return 0; } buf[n++] = unsigned_val(*listptr); if (n >= len) { buf[n-1] = '\0'; /* truncate */ return -len; } list = CDR(listptr); } buf[n] = '\0'; return n + 1; } Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin) { Eterm bin_term; if (bin->ref_bin != NULL) { Binary* binary = bin->ref_bin; /* If the binary is smaller than the heap binary limit we'll return a * heap binary to reduce the number of small refc binaries in the * system. We can't simply release the refc binary right away however; * the documentation states that the binary should be considered * read-only from this point on, which implies that it should still be * readable. * * We could keep it alive until we return by adding it to the temporary * object list, but that requires an off-heap allocation which is * potentially quite slow, so we create a dummy ProcBin instead and * rely on the next minor GC to get rid of it. */ if (bin->size <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hb; hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size)); hb->thing_word = header_heap_bin(bin->size); hb->size = bin->size; sys_memcpy(hb->data, bin->data, bin->size); erts_build_proc_bin(&MSO(env->proc), alloc_heap(env, PROC_BIN_SIZE), binary); bin_term = make_binary(hb); } else { bin_term = erts_build_proc_bin(&MSO(env->proc), alloc_heap(env, PROC_BIN_SIZE), binary); } /* Our (possibly shared) ownership has been transferred to the term. */ bin->ref_bin = NULL; } else { flush_env(env); bin_term = new_binary(env->proc, bin->data, bin->size); cache_env(env); } return bin_term; } Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term, size_t pos, size_t size) { ErlSubBin* sb; Eterm orig; Uint offset, bit_offset, bit_size; #ifdef DEBUG size_t src_size; ASSERT(is_binary(bin_term)); src_size = binary_size(bin_term); ASSERT(pos <= src_size); ASSERT(size <= src_size); ASSERT(pos + size <= src_size); #endif sb = (ErlSubBin*) alloc_heap(env, ERL_SUB_BIN_SIZE); ERTS_GET_REAL_BIN(bin_term, orig, offset, bit_offset, bit_size); sb->thing_word = HEADER_SUB_BIN; sb->size = size; sb->offs = offset + pos; sb->orig = orig; sb->bitoffs = bit_offset; sb->bitsize = 0; sb->is_writable = 0; return make_binary(sb); } Eterm enif_make_badarg(ErlNifEnv* env) { return enif_raise_exception(env, am_badarg); } Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason) { env->exception_thrown = 1; env->proc->fvalue = reason; BIF_ERROR(env->proc, EXC_ERROR); } int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason) { if (env->exception_thrown && reason != NULL) *reason = env->proc->fvalue; return env->exception_thrown; } int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len, ErlNifCharEncoding encoding) { Atom* ap; ASSERT(encoding == ERL_NIF_LATIN1); if (is_not_atom(atom) || len==0) { return 0; } ap = atom_tab(atom_val(atom)); if (ap->latin1_chars < 0 || ap->latin1_chars >= len) { return 0; } if (ap->latin1_chars == ap->len) { sys_memcpy(buf, ap->name, ap->len); } else { int dlen = erts_utf8_to_latin1((byte*)buf, ap->name, ap->len); ASSERT(dlen == ap->latin1_chars); (void)dlen; } buf[ap->latin1_chars] = '\0'; return ap->latin1_chars + 1; } int enif_get_int(ErlNifEnv* env, Eterm term, int* ip) { #if SIZEOF_INT == ERTS_SIZEOF_ETERM return term_to_Sint(term, (Sint*)ip); #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM) Sint i; if (!term_to_Sint(term, &i) || i < INT_MIN || i > INT_MAX) { return 0; } *ip = (int) i; return 1; #else # error Unknown word size #endif } int enif_get_uint(ErlNifEnv* env, Eterm term, unsigned* ip) { #if SIZEOF_INT == ERTS_SIZEOF_ETERM return term_to_Uint(term, (Uint*)ip); #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM) Uint i; if (!term_to_Uint(term, &i) || i > UINT_MAX) { return 0; } *ip = (unsigned) i; return 1; #endif } int enif_get_long(ErlNifEnv* env, Eterm term, long* ip) { #if SIZEOF_LONG == ERTS_SIZEOF_ETERM return term_to_Sint(term, ip); #elif SIZEOF_LONG == 8 return term_to_Sint64(term, ip); #elif SIZEOF_LONG == SIZEOF_INT int tmp,ret; ret = enif_get_int(env,term,&tmp); if (ret) { *ip = (long) tmp; } return ret; #else # error Unknown long word size #endif } int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip) { #if SIZEOF_LONG == ERTS_SIZEOF_ETERM return term_to_Uint(term, ip); #elif SIZEOF_LONG == 8 return term_to_Uint64(term, ip); #elif SIZEOF_LONG == SIZEOF_INT int ret; unsigned int tmp; ret = enif_get_uint(env,term,&tmp); if (ret) { *ip = (unsigned long) tmp; } return ret; #else # error Unknown long word size #endif } #if HAVE_INT64 && SIZEOF_LONG != 8 int enif_get_int64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifSInt64* ip) { return term_to_Sint64(term, ip); } int enif_get_uint64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifUInt64* ip) { return term_to_Uint64(term, ip); } #endif /* HAVE_INT64 && SIZEOF_LONG != 8 */ int enif_get_double(ErlNifEnv* env, ERL_NIF_TERM term, double* dp) { FloatDef f; if (is_not_float(term)) { return 0; } GET_DOUBLE(term, f); *dp = f.fd; return 1; } int enif_get_atom_length(ErlNifEnv* env, Eterm atom, unsigned* len, ErlNifCharEncoding enc) { Atom* ap; ASSERT(enc == ERL_NIF_LATIN1); if (is_not_atom(atom)) return 0; ap = atom_tab(atom_val(atom)); if (ap->latin1_chars < 0) { return 0; } *len = ap->latin1_chars; return 1; } int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail) { Eterm* val; if (is_not_list(term)) return 0; val = list_val(term); *head = CAR(val); *tail = CDR(val); return 1; } int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len) { Sint i; Uint u; if ((i = erts_list_length(term)) < 0) return 0; u = (Uint)i; if ((unsigned)u != u) return 0; *len = u; return 1; } ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i) { #if SIZEOF_INT == ERTS_SIZEOF_ETERM return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2)); #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM) return make_small(i); #endif } ERL_NIF_TERM enif_make_uint(ErlNifEnv* env, unsigned i) { #if SIZEOF_INT == ERTS_SIZEOF_ETERM return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2)); #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM) return make_small(i); #endif } ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i) { if (IS_SSMALL(i)) { return make_small(i); } #if SIZEOF_LONG == ERTS_SIZEOF_ETERM return small_to_big(i, alloc_heap(env,2)); #elif SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM return make_small(i); #elif SIZEOF_LONG == 8 ensure_heap(env,3); return erts_sint64_to_big(i, &env->hp); #endif } ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i) { if (IS_USMALL(0,i)) { return make_small(i); } #if SIZEOF_LONG == ERTS_SIZEOF_ETERM return uint_to_big(i,alloc_heap(env,2)); #elif SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM return make_small(i); #elif SIZEOF_LONG == 8 ensure_heap(env,3); return erts_uint64_to_big(i, &env->hp); #endif } #if HAVE_INT64 && SIZEOF_LONG != 8 ERL_NIF_TERM enif_make_int64(ErlNifEnv* env, ErlNifSInt64 i) { Uint* hp; Uint need = 0; erts_bld_sint64(NULL, &need, i); hp = alloc_heap(env, need); return erts_bld_sint64(&hp, NULL, i); } ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i) { Uint* hp; Uint need = 0; erts_bld_uint64(NULL, &need, i); hp = alloc_heap(env, need); return erts_bld_uint64(&hp, NULL, i); } #endif /* HAVE_INT64 && SIZEOF_LONG != 8 */ ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d) { Eterm* hp; FloatDef f; if (!erts_isfinite(d)) return enif_make_badarg(env); hp = alloc_heap(env,FLOAT_SIZE_OBJECT); f.fd = d; PUT_DOUBLE(f, hp); return make_float(hp); } ERL_NIF_TERM enif_make_atom(ErlNifEnv* env, const char* name) { return enif_make_atom_len(env, name, sys_strlen(name)); } ERL_NIF_TERM enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len) { if (len > MAX_ATOM_CHARACTERS) return enif_make_badarg(env); return erts_atom_put((byte*)name, len, ERTS_ATOM_ENC_LATIN1, 1); } int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom, ErlNifCharEncoding enc) { return enif_make_existing_atom_len(env, name, sys_strlen(name), atom, enc); } int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom, ErlNifCharEncoding encoding) { ASSERT(encoding == ERL_NIF_LATIN1); if (len > MAX_ATOM_CHARACTERS) return 0; return erts_atom_get(name, len, atom, ERTS_ATOM_ENC_LATIN1); } ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...) { #ifdef ERTS_NIF_ASSERT_IN_ENV int nr = 0; #endif Eterm* hp = alloc_heap(env,cnt+1); Eterm ret = make_tuple(hp); va_list ap; *hp++ = make_arityval(cnt); va_start(ap,cnt); while (cnt--) { Eterm elem = va_arg(ap,Eterm); ASSERT_IN_ENV(env, elem, ++nr, "tuple"); *hp++ = elem; } va_end(ap); return ret; } ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt) { #ifdef ERTS_NIF_ASSERT_IN_ENV int nr = 0; #endif Eterm* hp = alloc_heap(env,cnt+1); Eterm ret = make_tuple(hp); const Eterm* src = arr; *hp++ = make_arityval(cnt); while (cnt--) { ASSERT_IN_ENV(env, *src, ++nr, "tuple"); *hp++ = *src++; } return ret; } ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr) { Eterm* hp = alloc_heap(env,2); Eterm ret = make_list(hp); ASSERT_IN_ENV(env, car, 0, "head of list cell"); ASSERT_IN_ENV(env, cdr, 0, "tail of list cell"); CAR(hp) = car; CDR(hp) = cdr; return ret; } ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...) { if (cnt == 0) { return NIL; } else { #ifdef ERTS_NIF_ASSERT_IN_ENV int nr = 0; #endif Eterm* hp = alloc_heap(env,cnt*2); Eterm ret = make_list(hp); Eterm* last = &ret; va_list ap; va_start(ap,cnt); while (cnt--) { Eterm term = va_arg(ap,Eterm); *last = make_list(hp); ASSERT_IN_ENV(env, term, ++nr, "list"); *hp = term; last = ++hp; ++hp; } va_end(ap); *last = NIL; return ret; } } ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt) { #ifdef ERTS_NIF_ASSERT_IN_ENV int nr = 0; #endif Eterm* hp = alloc_heap(env,cnt*2); Eterm ret = make_list(hp); Eterm* last = &ret; const Eterm* src = arr; while (cnt--) { Eterm term = *src++; *last = make_list(hp); ASSERT_IN_ENV(env, term, ++nr, "list"); *hp = term; last = ++hp; ++hp; } *last = NIL; return ret; } ERL_NIF_TERM enif_make_string(ErlNifEnv* env, const char* string, ErlNifCharEncoding encoding) { return enif_make_string_len(env, string, sys_strlen(string), encoding); } ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding encoding) { Eterm* hp = alloc_heap(env,len*2); ASSERT(encoding == ERL_NIF_LATIN1); return erts_bld_string_n(&hp,NULL,string,len); } ERL_NIF_TERM enif_make_ref(ErlNifEnv* env) { Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE); return erts_make_ref_in_buffer(hp); } void enif_system_info(ErlNifSysInfo *sip, size_t si_size) { driver_system_info(sip, si_size); } int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) { Eterm *listptr, ret, *hp; ret = NIL; while (is_not_nil(term)) { if (is_not_list(term)) { return 0; } hp = alloc_heap(env, 2); listptr = list_val(term); ret = CONS(hp, CAR(listptr), ret); term = CDR(listptr); } *list = ret; return 1; } int enif_is_current_process_alive(ErlNifEnv* env) { Process *c_p; int scheduler; execution_state(env, &c_p, &scheduler); if (!c_p) erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: " "Invalid environment"); if (!scheduler) erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: " "called from non-scheduler thread"); return !ERTS_PROC_IS_EXITING(c_p); } int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc) { int scheduler; execution_state(env, NULL, &scheduler); if (scheduler > 0) return !!erts_proc_lookup(proc->pid); else { Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0, ERTS_P2P_FLG_INC_REFC); if (rp) erts_proc_dec_refc(rp); return !!rp; } } int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port) { int scheduler; Uint32 iflags = (erts_port_synchronous_ops ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP : ERTS_PORT_SFLGS_INVALID_LOOKUP); execution_state(env, NULL, &scheduler); if (scheduler > 0) return !!erts_port_lookup(port->port_id, iflags); else { Port *prt = erts_thr_port_lookup(port->port_id, iflags); if (prt) erts_port_dec_refc(prt); return !!prt; } } ERL_NIF_TERM enif_now_time(ErlNifEnv *env) { Uint mega, sec, micro; Eterm *hp; get_now(&mega, &sec, &micro); hp = alloc_heap(env, 4); return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro)); } ERL_NIF_TERM enif_cpu_time(ErlNifEnv *env) { #ifdef HAVE_ERTS_NOW_CPU Uint mega, sec, micro; Eterm *hp; erts_get_now_cpu(&mega, &sec, &micro); hp = alloc_heap(env, 4); return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro)); #else return enif_make_badarg(env); #endif } ERL_NIF_TERM enif_make_unique_integer(ErlNifEnv *env, ErlNifUniqueInteger properties) { int monotonic = properties & ERL_NIF_UNIQUE_MONOTONIC; int positive = properties & ERL_NIF_UNIQUE_POSITIVE; Eterm *hp; Uint hsz; if (monotonic) { Sint64 raw_unique = erts_raw_get_unique_monotonic_integer(); hsz = erts_raw_unique_monotonic_integer_heap_size(raw_unique, positive); hp = alloc_heap(env, hsz); return erts_raw_make_unique_monotonic_integer_value(&hp, raw_unique, positive); } else { Uint64 raw_unique[ERTS_UNIQUE_INT_RAW_VALUES]; erts_raw_get_unique_integer(raw_unique); hsz = erts_raw_unique_integer_heap_size(raw_unique, positive); hp = alloc_heap(env, hsz); return erts_raw_make_unique_integer(&hp, raw_unique, positive); } } ErlNifMutex* enif_mutex_create(char *name) { return erl_drv_mutex_create(name); } void enif_mutex_destroy(ErlNifMutex *mtx) { erl_drv_mutex_destroy(mtx); } int enif_mutex_trylock(ErlNifMutex *mtx) { return erl_drv_mutex_trylock(mtx); } void enif_mutex_lock(ErlNifMutex *mtx) { erl_drv_mutex_lock(mtx); } void enif_mutex_unlock(ErlNifMutex *mtx) { erl_drv_mutex_unlock(mtx); } ErlNifCond* enif_cond_create(char *name) { return erl_drv_cond_create(name); } void enif_cond_destroy(ErlNifCond *cnd) { erl_drv_cond_destroy(cnd); } void enif_cond_signal(ErlNifCond *cnd) { erl_drv_cond_signal(cnd); } void enif_cond_broadcast(ErlNifCond *cnd) { erl_drv_cond_broadcast(cnd); } void enif_cond_wait(ErlNifCond *cnd, ErlNifMutex *mtx) { erl_drv_cond_wait(cnd,mtx); } ErlNifRWLock* enif_rwlock_create(char *name) { return erl_drv_rwlock_create(name); } void enif_rwlock_destroy(ErlNifRWLock *rwlck) { erl_drv_rwlock_destroy(rwlck); } int enif_rwlock_tryrlock(ErlNifRWLock *rwlck) { return erl_drv_rwlock_tryrlock(rwlck); } void enif_rwlock_rlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rlock(rwlck); } void enif_rwlock_runlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_runlock(rwlck); } int enif_rwlock_tryrwlock(ErlNifRWLock *rwlck) { return erl_drv_rwlock_tryrwlock(rwlck); } void enif_rwlock_rwlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rwlock(rwlck); } void enif_rwlock_rwunlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rwunlock(rwlck); } int enif_tsd_key_create(char *name, ErlNifTSDKey *key) { return erl_drv_tsd_key_create(name,key); } void enif_tsd_key_destroy(ErlNifTSDKey key) { erl_drv_tsd_key_destroy(key); } void enif_tsd_set(ErlNifTSDKey key, void *data) { erl_drv_tsd_set(key,data); } void* enif_tsd_get(ErlNifTSDKey key) { return erl_drv_tsd_get(key); } ErlNifThreadOpts* enif_thread_opts_create(char *name) { return (ErlNifThreadOpts*) erl_drv_thread_opts_create(name); } void enif_thread_opts_destroy(ErlNifThreadOpts *opts) { erl_drv_thread_opts_destroy((ErlDrvThreadOpts*)opts); } int enif_thread_create(char *name, ErlNifTid *tid, void* (*func)(void *), void *args, ErlNifThreadOpts *opts) { return erl_drv_thread_create(name,tid,func,args,(ErlDrvThreadOpts*)opts); } ErlNifTid enif_thread_self(void) { return erl_drv_thread_self(); } int enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2) { return erl_drv_equal_tids(tid1,tid2); } void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); } int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); } char* enif_mutex_name(ErlNifMutex *mtx) {return erl_drv_mutex_name(mtx); } char* enif_cond_name(ErlNifCond *cnd) { return erl_drv_cond_name(cnd); } char* enif_rwlock_name(ErlNifRWLock* rwlck) { return erl_drv_rwlock_name(rwlck); } char* enif_thread_name(ErlNifTid tid) { return erl_drv_thread_name(tid); } int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); } ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit) { return (ErlNifTime) erts_napi_monotonic_time((int) time_unit); } ErlNifTime enif_time_offset(ErlNifTimeUnit time_unit) { return (ErlNifTime) erts_napi_time_offset((int) time_unit); } ErlNifTime enif_convert_time_unit(ErlNifTime val, ErlNifTimeUnit from, ErlNifTimeUnit to) { return (ErlNifTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val, (int) from, (int) to); } int enif_fprintf(FILE* filep, const char* format, ...) { int ret; va_list arglist; va_start(arglist, format); ret = erts_vfprintf(filep, format, arglist); va_end(arglist); return ret; } int enif_vfprintf(FILE* filep, const char *format, va_list ap) { return erts_vfprintf(filep, format, ap); } int enif_snprintf(char *buffer, size_t size, const char* format, ...) { int ret; va_list arglist; va_start(arglist, format); ret = erts_vsnprintf(buffer, size, format, arglist); va_end(arglist); return ret; } int enif_vsnprintf(char* buffer, size_t size, const char *format, va_list ap) { return erts_vsnprintf(buffer, size, format, ap); } /*********************************************************** ** Memory managed (GC'ed) "resource" objects ** ***********************************************************/ /* dummy node in circular list */ struct enif_resource_type_t resource_type_list; static ErlNifResourceType* find_resource_type(Eterm module, Eterm name) { ErlNifResourceType* type; for (type = resource_type_list.next; type != &resource_type_list; type = type->next) { if (type->module == module && type->name == name) { return type; } } return NULL; } #define in_area(ptr,start,nbytes) \ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes)) static void close_lib(struct erl_module_nif* lib) { ASSERT(lib != NULL); ASSERT(lib->handle != NULL); ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0); if (lib->entry.unload != NULL) { struct enif_msg_environment_t msg_env; pre_nif_noproc(&msg_env, lib, NULL); lib->entry.unload(&msg_env.env, lib->priv_data); post_nif_noproc(&msg_env); } if (!erts_is_static_nif(lib->handle)) erts_sys_ddll_close(lib->handle); lib->handle = NULL; } static void steal_resource_type(ErlNifResourceType* type) { struct erl_module_nif* lib = type->owner; if (type->dtor != NULL && erts_refc_dectest(&lib->rt_dtor_cnt, 0) == 0 && lib->mod == NULL) { /* last type with destructor gone, close orphan lib */ close_lib(lib); } if (erts_refc_dectest(&lib->rt_cnt, 0) == 0 && lib->mod == NULL) { erts_free(ERTS_ALC_T_NIF, lib); } } /* The opened_rt_list is used by enif_open_resource_type() * in order to rollback "creates" and "take-overs" in case the load fails. */ struct opened_resource_type { struct opened_resource_type* next; ErlNifResourceFlags op; ErlNifResourceType* type; ErlNifResourceTypeInit new_callbacks; }; static struct opened_resource_type* opened_rt_list = NULL; static ErlNifResourceType* open_resource_type(ErlNifEnv* env, const char* name_str, const ErlNifResourceTypeInit* init, ErlNifResourceFlags flags, ErlNifResourceFlags* tried, size_t sizeof_init) { ErlNifResourceType* type = NULL; ErlNifResourceFlags op = flags; Eterm module_am, name_am; ASSERT(erts_thr_progress_is_blocking()); module_am = make_atom(env->mod_nif->mod->module); name_am = enif_make_atom(env, name_str); type = find_resource_type(module_am, name_am); if (type == NULL) { if (flags & ERL_NIF_RT_CREATE) { type = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct enif_resource_type_t)); type->module = module_am; type->name = name_am; erts_refc_init(&type->refc, 1); op = ERL_NIF_RT_CREATE; #ifdef DEBUG type->dtor = (void*)1; type->owner = (void*)2; type->prev = (void*)3; type->next = (void*)4; #endif } } else { if (flags & ERL_NIF_RT_TAKEOVER) { op = ERL_NIF_RT_TAKEOVER; } else { type = NULL; } } if (type != NULL) { struct opened_resource_type* ort = erts_alloc(ERTS_ALC_T_TMP, sizeof(struct opened_resource_type)); ort->op = op; ort->type = type; sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit)); ASSERT(sizeof_init > 0 && sizeof_init <= sizeof(ErlNifResourceTypeInit)); sys_memcpy(&ort->new_callbacks, init, sizeof_init); ort->next = opened_rt_list; opened_rt_list = ort; } if (tried != NULL) { *tried = op; } return type; } ErlNifResourceType* enif_open_resource_type(ErlNifEnv* env, const char* module_str, const char* name_str, ErlNifResourceDtor* dtor, ErlNifResourceFlags flags, ErlNifResourceFlags* tried) { ErlNifResourceTypeInit init = {dtor, NULL}; ASSERT(module_str == NULL); /* for now... */ return open_resource_type(env, name_str, &init, flags, tried, sizeof(init)); } ErlNifResourceType* enif_open_resource_type_x(ErlNifEnv* env, const char* name_str, const ErlNifResourceTypeInit* init, ErlNifResourceFlags flags, ErlNifResourceFlags* tried) { return open_resource_type(env, name_str, init, flags, tried, env->mod_nif->entry.sizeof_ErlNifResourceTypeInit); } static void commit_opened_resource_types(struct erl_module_nif* lib) { while (opened_rt_list) { struct opened_resource_type* ort = opened_rt_list; ErlNifResourceType* type = ort->type; if (ort->op == ERL_NIF_RT_CREATE) { type->prev = &resource_type_list; type->next = resource_type_list.next; type->next->prev = type; type->prev->next = type; } else { /* ERL_NIF_RT_TAKEOVER */ steal_resource_type(type); } type->owner = lib; type->dtor = ort->new_callbacks.dtor; type->stop = ort->new_callbacks.stop; type->down = ort->new_callbacks.down; if (type->dtor != NULL) { erts_refc_inc(&lib->rt_dtor_cnt, 1); } erts_refc_inc(&lib->rt_cnt, 1); opened_rt_list = ort->next; erts_free(ERTS_ALC_T_TMP, ort); } } static void rollback_opened_resource_types(void) { while (opened_rt_list) { struct opened_resource_type* ort = opened_rt_list; if (ort->op == ERL_NIF_RT_CREATE) { erts_free(ERTS_ALC_T_NIF, ort->type); } opened_rt_list = ort->next; erts_free(ERTS_ALC_T_TMP, ort); } } #ifdef ARCH_64 # define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 63) #else # define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 31) #endif #define ERTS_RESOURCE_REFC_MASK (~ERTS_RESOURCE_DYING_FLAG) static ERTS_INLINE void rmon_set_dying(ErtsResourceMonitors *rms) { rms->refc |= ERTS_RESOURCE_DYING_FLAG; } static ERTS_INLINE int rmon_is_dying(ErtsResourceMonitors *rms) { return !!(rms->refc & ERTS_RESOURCE_DYING_FLAG); } static ERTS_INLINE void rmon_refc_inc(ErtsResourceMonitors *rms) { rms->refc++; } static ERTS_INLINE Uint rmon_refc_dec_read(ErtsResourceMonitors *rms) { Uint res; ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0); res = --rms->refc; return res & ERTS_RESOURCE_REFC_MASK; } static ERTS_INLINE void rmon_refc_dec(ErtsResourceMonitors *rms) { ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0); --rms->refc; } static ERTS_INLINE Uint rmon_refc_read(ErtsResourceMonitors *rms) { return rms->refc & ERTS_RESOURCE_REFC_MASK; } static void dtor_demonitor(ErtsMonitor* mon, void* context) { ASSERT(erts_monitor_is_origin(mon)); ASSERT(is_internal_pid(mon->other.item)); erts_proc_sig_send_demonitor(mon); } # define NIF_RESOURCE_DTOR &nif_resource_dtor static int nif_resource_dtor(Binary* bin) { ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin); ErlNifResourceType* type = resource->type; ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR); if (resource->monitors) { ErtsResourceMonitors* rm = resource->monitors; int kill; ErtsMonitor *root; Uint refc; ASSERT(type->down); erts_mtx_lock(&rm->lock); ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0); kill = !rmon_is_dying(rm); if (kill) { rmon_set_dying(rm); root = rm->root; rm->root = NULL; } refc = rmon_refc_read(rm); erts_mtx_unlock(&rm->lock); if (kill) erts_monitor_tree_foreach_delete(&root, dtor_demonitor, NULL); /* * If resource->monitors->refc != 0 there are * outstanding references to the resource from * monitors that has not been removed yet. * nif_resource_dtor() will be called again this * reference count reach zero. */ if (refc != 0) return 0; /* we'll be back... */ erts_mtx_destroy(&rm->lock); } if (type->dtor != NULL) { struct enif_msg_environment_t msg_env; pre_nif_noproc(&msg_env, type->owner, NULL); type->dtor(&msg_env.env, resource->data); post_nif_noproc(&msg_env); } if (erts_refc_dectest(&type->refc, 0) == 0) { ASSERT(type->next == NULL); ASSERT(type->owner != NULL); ASSERT(type->owner->mod == NULL); steal_resource_type(type); erts_free(ERTS_ALC_T_NIF, type); } return 1; } void erts_resource_stop(ErtsResource* resource, ErlNifEvent e, int is_direct_call) { struct enif_msg_environment_t msg_env; ASSERT(resource->type->stop); pre_nif_noproc(&msg_env, resource->type->owner, NULL); resource->type->stop(&msg_env.env, resource->data, e, is_direct_call); post_nif_noproc(&msg_env); } void erts_nif_demonitored(ErtsResource* resource) { ErtsResourceMonitors* rmp = resource->monitors; ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); int free_me; ASSERT(rmp); ASSERT(resource->type->down); erts_mtx_lock(&rmp->lock); free_me = ((rmon_refc_dec_read(rmp) == 0) & !!rmon_is_dying(rmp)); erts_mtx_unlock(&rmp->lock); if (free_me) erts_bin_free(&bin->binary); } void erts_fire_nif_monitor(ErtsMonitor *tmon) { ErtsResource* resource; ErtsMonitorData *mdp; ErtsMonitor *omon; ErtsBinary* bin; struct enif_msg_environment_t msg_env; ErlNifPid nif_pid; ErlNifMonitor nif_monitor; ErtsResourceMonitors* rmp; Uint mrefc, brefc; int active, is_dying; ASSERT(tmon->type == ERTS_MON_TYPE_RESOURCE); ASSERT(erts_monitor_is_target(tmon)); resource = tmon->other.ptr; bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); rmp = resource->monitors; mdp = erts_monitor_to_data(tmon); omon = &mdp->origin; ASSERT(rmp); ASSERT(resource->type->down); erts_mtx_lock(&rmp->lock); mrefc = rmon_refc_dec_read(rmp); is_dying = rmon_is_dying(rmp); active = !is_dying && erts_monitor_is_in_table(omon); if (active) { erts_monitor_tree_delete(&rmp->root, omon); brefc = (Uint) erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0); } erts_mtx_unlock(&rmp->lock); if (!active) { ASSERT(!is_dying || erts_refc_read(&bin->binary.intern.refc, 0) == 0); if (is_dying && mrefc == 0) erts_bin_free(&bin->binary); erts_monitor_release(tmon); } else { if (brefc > 0) { ASSERT(is_internal_pid(omon->other.item)); erts_ref_to_driver_monitor(mdp->ref, &nif_monitor); nif_pid.pid = omon->other.item; pre_nif_noproc(&msg_env, resource->type->owner, NULL); resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor); post_nif_noproc(&msg_env); erts_bin_release(&bin->binary); } erts_monitor_release_both(mdp); } } void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz) { size_t magic_sz = offsetof(ErtsResource,data); Binary* bin; ErtsResource* resource; size_t monitors_offs; if (type->down) { /* Put ErtsResourceMonitors after user data and properly aligned */ monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1) & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1)); magic_sz += monitors_offs + sizeof(ErtsResourceMonitors); } else { ERTS_UNDEF(monitors_offs, 0); magic_sz += data_sz; } bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR, ERTS_ALC_T_BINARY, 1); /* unaligned */ resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin); ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */ resource->type = type; erts_refc_inc(&bin->intern.refc, 1); #ifdef DEBUG erts_refc_init(&resource->nif_refc, 1); #endif erts_refc_inc(&resource->type->refc, 2); if (type->down) { resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs); erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, ERTS_LOCK_FLAGS_CATEGORY_GENERIC); resource->monitors->root = NULL; resource->monitors->refc = 0; resource->monitors->user_data_sz = data_sz; } else { resource->monitors = NULL; } return resource->data; } void enif_release_resource(void* obj) { ErtsResource* resource = DATA_TO_RESOURCE(obj); ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR); ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0); #ifdef DEBUG erts_refc_dec(&resource->nif_refc, 0); #endif erts_bin_release(&bin->binary); } void enif_keep_resource(void* obj) { ErtsResource* resource = DATA_TO_RESOURCE(obj); ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR); ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0); #ifdef DEBUG erts_refc_inc(&resource->nif_refc, 1); #endif erts_refc_inc(&bin->binary.intern.refc, 2); } Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource) { ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0); return erts_mk_magic_ref(hpp, oh, &bin->binary); } ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj) { ErtsResource* resource = DATA_TO_RESOURCE(obj); ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE); ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0); return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary); } ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj, const void* data, size_t size) { ErtsResource* resource = DATA_TO_RESOURCE(obj); ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); ErlOffHeap *ohp = &MSO(env->proc); Eterm* hp = alloc_heap(env,PROC_BIN_SIZE); ProcBin* pb = (ProcBin *) hp; pb->thing_word = HEADER_PROC_BIN; pb->size = size; pb->next = ohp->first; ohp->first = (struct erl_off_heap_header*) pb; pb->val = &bin->binary; pb->bytes = (byte*) data; pb->flags = 0; OH_OVERHEAD(ohp, size / sizeof(Eterm)); erts_refc_inc(&bin->binary.intern.refc, 1); return make_binary(hp); } int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp) { Binary* mbin; ErtsResource* resource; if (is_internal_magic_ref(term)) mbin = erts_magic_ref2bin(term); else { Eterm *hp; if (!is_binary(term)) return 0; hp = binary_val(term); if (thing_subtag(*hp) != REFC_BINARY_SUBTAG) return 0; /* if (((ProcBin *) hp)->size != 0) { return 0; / * Or should we allow "resource binaries" as handles? * / } */ mbin = ((ProcBin *) hp)->val; if (!(mbin->intern.flags & BIN_FLAG_MAGIC)) return 0; } resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin); if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR || resource->type != type) { return 0; } *objp = resource->data; return 1; } size_t enif_sizeof_resource(void* obj) { ErtsResource* resource = DATA_TO_RESOURCE(obj); if (resource->monitors) { return resource->monitors->user_data_sz; } else { Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary; return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data); } } void* enif_dlopen(const char* lib, void (*err_handler)(void*,const char*), void* err_arg) { ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; void* handle; void* init_func; if (erts_sys_ddll_open(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) { if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) { erts_sys_ddll_call_nif_init(init_func); } } else { if (err_handler != NULL) { (*err_handler)(err_arg, errdesc.str); } handle = NULL; } erts_sys_ddll_free_error(&errdesc); return handle; } void* enif_dlsym(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg) { ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; void* ret; if (erts_sys_ddll_sym2(handle, symbol, &ret, &errdesc) != ERL_DE_NO_ERROR) { if (err_handler != NULL) { (*err_handler)(err_arg, errdesc.str); } erts_sys_ddll_free_error(&errdesc); return NULL; } return ret; } int enif_consume_timeslice(ErlNifEnv* env, int percent) { Process *proc; Sint reds; execution_state(env, &proc, NULL); ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100); if (percent < 1) percent = 1; else if (percent > 100) percent = 100; reds = ((CONTEXT_REDS+99) / 100) * percent; ASSERT(reds > 0 && reds <= CONTEXT_REDS); BUMP_REDS(proc, reds); return ERTS_BIF_REDS_LEFT(proc) == 0; } static ERTS_INLINE void nif_export_cleanup_nif_mod(NifExport *ep) { if (erts_refc_dectest(&ep->m->rt_dtor_cnt, 0) == 0 && ep->m->mod == NULL) close_lib(ep->m); ep->m = NULL; } void erts_nif_export_cleanup_nif_mod(NifExport *ep) { nif_export_cleanup_nif_mod(ep); } static ERTS_INLINE void nif_export_restore(Process *c_p, NifExport *ep, Eterm res) { erts_nif_export_restore(c_p, ep, res); ASSERT(ep->m); nif_export_cleanup_nif_mod(ep); } /* * Finalize a dirty NIF call. This function is scheduled to cause the VM to * switch the process off a dirty scheduler thread and back onto a regular * scheduler thread, and then return the result from the dirty NIF. It also * restores the original NIF MFA when necessary based on the value of * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL * means restore, NULL means do not restore. */ static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { Process* proc; NifExport* ep; execution_state(env, &proc, NULL); ASSERT(argc == 1); ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); nif_export_restore(proc, ep, argv[0]); return argv[0]; } /* Finalize a dirty NIF call that raised an exception. Otherwise same as * the dirty_nif_finalizer() function. */ static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM ret; Process* proc; NifExport* ep; Eterm exception; execution_state(env, &proc, NULL); ASSERT(argc == 1); ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); exception = argv[0]; /* argv overwritten by restore below... */ nif_export_cleanup_nif_mod(ep); ret = enif_raise_exception(env, exception); /* Restore orig info for error and clear nif export in handle_error() */ proc->freason |= EXF_RESTORE_NIF; return ret; } /* * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute. * The dirty scheduler thread type (CPU or I/O) is indicated in flags * parameter. */ static ERTS_INLINE ERL_NIF_TERM schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp, Eterm func_name, int argc, const ERL_NIF_TERM argv[]) { Process* proc; ASSERT(is_atom(func_name)); ASSERT(fp); ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND); execution_state(env, &proc, NULL); (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND ? ERTS_PSFLG_DIRTY_CPU_PROC : ERTS_PSFLG_DIRTY_IO_PROC)); return schedule(env, fp, NULL, proc->current->module, func_name, argc, argv); } static ERTS_INLINE ERL_NIF_TERM static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg, int argc, const ERL_NIF_TERM argv[]) { Process *proc; NifExport *ep; Eterm mod, func; NativeFunPtr fp; execution_state(env, &proc, NULL); /* * Called in order to schedule statically determined * dirty NIF calls... * * Note that 'current' does not point into a NifExport * structure; only a structure with similar * parts (located in code). */ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa); mod = proc->current->module; func = proc->current->function; fp = (NativeFunPtr) ep->func; ASSERT(is_atom(mod) && is_atom(func)); ASSERT(fp); (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), dirty_psflg); return schedule(env, fp, NULL, mod, func, argc, argv); } static ERL_NIF_TERM static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv); } static ERL_NIF_TERM static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv); } /* * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It * calls the actual NIF, restores original NIF MFA if necessary, and * then returns the NIF result. */ static ERL_NIF_TERM execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { Process* proc; NativeFunPtr fp; NifExport* ep; ERL_NIF_TERM result; execution_state(env, &proc, NULL); ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa); fp = ep->func; ASSERT(ep); ASSERT(!env->exception_thrown); fp = (NativeFunPtr) ep->func; #ifdef DEBUG ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER; #endif result = (*fp)(env, argc, argv); ASSERT(ep == ERTS_PROC_GET_NIF_TRAP_EXPORT(proc)); if (is_value(result) || proc->freason != TRAP) { /* Done (not rescheduled)... */ ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER); if (!env->exception_thrown) nif_export_restore(proc, ep, result); else { nif_export_cleanup_nif_mod(ep); /* * Restore orig info for error and clear nif * export in handle_error() */ proc->freason |= EXF_RESTORE_NIF; } } #ifdef DEBUG if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER) ep->func = NULL; #endif return result; } ERL_NIF_TERM enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), int argc, const ERL_NIF_TERM argv[]) { Process* proc; ERL_NIF_TERM fun_name_atom, result; int scheduler; if (argc > MAX_ARG) return enif_make_badarg(env); fun_name_atom = enif_make_atom(env, fun_name); if (enif_is_exception(env, fun_name_atom)) return fun_name_atom; execution_state(env, &proc, &scheduler); if (scheduler <= 0) { if (scheduler == 0) enif_make_badarg(env); erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); } if (flags == 0) result = schedule(env, execute_nif, fp, proc->current->module, fun_name_atom, argc, argv); else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) { result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv); } else result = enif_make_badarg(env); if (scheduler < 0) erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); return result; } int enif_thread_type(void) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); if (!esdp) return ERL_NIF_THR_UNDEFINED; switch (esdp->type) { case ERTS_SCHED_NORMAL: return ERL_NIF_THR_NORMAL_SCHEDULER; case ERTS_SCHED_DIRTY_CPU: return ERL_NIF_THR_DIRTY_CPU_SCHEDULER; case ERTS_SCHED_DIRTY_IO: return ERL_NIF_THR_DIRTY_IO_SCHEDULER; default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); return -1; } } /* Maps */ int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term) { return is_map(term); } int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size) { if (is_flatmap(term)) { flatmap_t *mp; mp = (flatmap_t*)flatmap_val(term); *size = flatmap_get_size(mp); return 1; } else if (is_hashmap(term)) { *size = hashmap_size(term); return 1; } return 0; } ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env) { Eterm* hp = alloc_heap(env,MAP_HEADER_FLATMAP_SZ+1); Eterm tup; flatmap_t *mp; tup = make_tuple(hp); *hp++ = make_arityval(0); mp = (flatmap_t*)hp; mp->thing_word = MAP_HEADER_FLATMAP; mp->size = 0; mp->keys = tup; return make_flatmap(mp); } int enif_make_map_from_arrays(ErlNifEnv *env, ERL_NIF_TERM keys[], ERL_NIF_TERM values[], size_t cnt, ERL_NIF_TERM *map_out) { ErtsHeapFactory factory; int succeeded; #ifdef ERTS_NIF_ASSERT_IN_ENV size_t index = 0; while (index < cnt) { ASSERT_IN_ENV(env, keys[index], index, "key"); ASSERT_IN_ENV(env, values[index], index, "value"); index++; } #endif flush_env(env); erts_factory_proc_prealloc_init(&factory, env->proc, cnt * 2 + MAP_HEADER_FLATMAP_SZ + 1); (*map_out) = erts_map_from_ks_and_vs(&factory, keys, values, cnt); succeeded = (*map_out) != THE_NON_VALUE; if (!succeeded) { erts_factory_undo(&factory); } erts_factory_close(&factory); cache_env(env); return succeeded; } int enif_make_map_put(ErlNifEnv* env, Eterm map_in, Eterm key, Eterm value, Eterm *map_out) { if (!is_map(map_in)) { return 0; } ASSERT_IN_ENV(env, map_in, 0, "old map"); ASSERT_IN_ENV(env, key, 0, "key"); ASSERT_IN_ENV(env, value, 0, "value"); flush_env(env); *map_out = erts_maps_put(env->proc, key, value, map_in); cache_env(env); return 1; } int enif_get_map_value(ErlNifEnv* env, Eterm map, Eterm key, Eterm *value) { const Eterm *ret; if (!is_map(map)) { return 0; } ret = erts_maps_get(key, map); if (ret) { *value = *ret; return 1; } return 0; } int enif_make_map_update(ErlNifEnv* env, Eterm map_in, Eterm key, Eterm value, Eterm *map_out) { int res; if (!is_map(map_in)) { return 0; } ASSERT_IN_ENV(env, map_in, 0, "old map"); ASSERT_IN_ENV(env, key, 0, "key"); ASSERT_IN_ENV(env, value, 0, "value"); flush_env(env); res = erts_maps_update(env->proc, key, value, map_in, map_out); cache_env(env); return res; } int enif_make_map_remove(ErlNifEnv* env, Eterm map_in, Eterm key, Eterm *map_out) { if (!is_map(map_in)) { return 0; } flush_env(env); (void) erts_maps_take(env->proc, key, map_in, map_out, NULL); cache_env(env); return 1; } int enif_map_iterator_create(ErlNifEnv *env, Eterm map, ErlNifMapIterator *iter, ErlNifMapIteratorEntry entry) { if (is_flatmap(map)) { flatmap_t *mp = (flatmap_t*)flatmap_val(map); size_t offset; switch (entry) { case ERL_NIF_MAP_ITERATOR_FIRST: offset = 0; break; case ERL_NIF_MAP_ITERATOR_LAST: offset = flatmap_get_size(mp) - 1; break; default: goto error; } /* empty maps are ok but will leave the iterator * in bad shape. */ iter->map = map; iter->u.flat.ks = ((Eterm *)flatmap_get_keys(mp)) + offset; iter->u.flat.vs = ((Eterm *)flatmap_get_values(mp)) + offset; iter->size = flatmap_get_size(mp); iter->idx = offset + 1; return 1; } else if (is_hashmap(map)) { iter->map = map; iter->size = hashmap_size(map); iter->u.hash.wstack = erts_alloc(ERTS_ALC_T_NIF, sizeof(ErtsDynamicWStack)); WSTACK_INIT(iter->u.hash.wstack, ERTS_ALC_T_NIF); switch (entry) { case ERL_NIF_MAP_ITERATOR_FIRST: iter->idx = 1; hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 0); iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws); break; case ERL_NIF_MAP_ITERATOR_LAST: iter->idx = hashmap_size(map); hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 1); iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws); break; default: goto error; } ASSERT(!!iter->u.hash.kv == (iter->idx >= 1 && iter->idx <= iter->size)); return 1; } error: #ifdef DEBUG iter->map = THE_NON_VALUE; #endif return 0; } void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter) { if (is_hashmap(iter->map)) { WSTACK_DESTROY(iter->u.hash.wstack->ws); erts_free(ERTS_ALC_T_NIF, iter->u.hash.wstack); } else ASSERT(is_flatmap(iter->map)); #ifdef DEBUG iter->map = THE_NON_VALUE; #endif } int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter) { ASSERT(iter); if (is_flatmap(iter->map)) { ASSERT(iter->idx >= 0); ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1); return (iter->size == 0 || iter->idx > iter->size); } else { ASSERT(is_hashmap(iter->map)); return iter->idx > iter->size; } } int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter) { ASSERT(iter); if (is_flatmap(iter->map)) { ASSERT(iter->idx >= 0); ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1); return (iter->size == 0 || iter->idx == 0); } else { ASSERT(is_hashmap(iter->map)); return iter->idx == 0; } } int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter) { ASSERT(iter); if (is_flatmap(iter->map)) { if (iter->idx <= iter->size) { iter->idx++; iter->u.flat.ks++; iter->u.flat.vs++; } return (iter->idx <= iter->size); } else { ASSERT(is_hashmap(iter->map)); if (iter->idx <= hashmap_size(iter->map)) { if (iter->idx < 1) { hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 0); } iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws); iter->idx++; ASSERT(!!iter->u.hash.kv == (iter->idx <= iter->size)); } return iter->idx <= iter->size; } } int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter) { ASSERT(iter); if (is_flatmap(iter->map)) { if (iter->idx > 0) { iter->idx--; iter->u.flat.ks--; iter->u.flat.vs--; } return iter->idx > 0; } else { ASSERT(is_hashmap(iter->map)); if (iter->idx > 0) { if (iter->idx > iter->size) { hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 1); } iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws); iter->idx--; ASSERT(!!iter->u.hash.kv == (iter->idx > 0)); } return iter->idx > 0; } } int enif_map_iterator_get_pair(ErlNifEnv *env, ErlNifMapIterator *iter, Eterm *key, Eterm *value) { ASSERT(iter); if (is_flatmap(iter->map)) { if (iter->idx > 0 && iter->idx <= iter->size) { ASSERT(iter->u.flat.ks >= flatmap_get_keys(flatmap_val(iter->map)) && iter->u.flat.ks < (flatmap_get_keys(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map)))); ASSERT(iter->u.flat.vs >= flatmap_get_values(flatmap_val(iter->map)) && iter->u.flat.vs < (flatmap_get_values(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map)))); *key = *(iter->u.flat.ks); *value = *(iter->u.flat.vs); return 1; } } else { ASSERT(is_hashmap(iter->map)); if (iter->idx > 0 && iter->idx <= iter->size) { *key = CAR(iter->u.hash.kv); *value = CDR(iter->u.hash.kv); return 1; } } return 0; } int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, ErlNifMonitor* monitor) { ErtsResource* rsrc = DATA_TO_RESOURCE(obj); Eterm tmp[ERTS_REF_THING_SIZE]; Eterm ref; ErtsResourceMonitors *rm; ErtsMonitorData *mdp; ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor == NIF_RESOURCE_DTOR); ASSERT(erts_refc_read(&ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->binary.intern.refc, 0) != 0); ASSERT(!rsrc->monitors == !rsrc->type->down); rm = rsrc->monitors; if (!rm) { ASSERT(!rsrc->type->down); return -1; } ASSERT(rsrc->type->down); ref = erts_make_ref_in_buffer(tmp); mdp = erts_monitor_create(ERTS_MON_TYPE_RESOURCE, ref, (Eterm) rsrc, target_pid->pid, NIL); erts_mtx_lock(&rm->lock); ASSERT(!rmon_is_dying(rm)); erts_monitor_tree_insert(&rm->root, &mdp->origin); rmon_refc_inc(rm); erts_mtx_unlock(&rm->lock); if (!erts_proc_sig_send_monitor(&mdp->target, target_pid->pid)) { /* Failed to send monitor signal; cleanup... */ #ifdef DEBUG ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc); #endif erts_mtx_lock(&rm->lock); ASSERT(!rmon_is_dying(rm)); erts_monitor_tree_delete(&rm->root, &mdp->origin); rmon_refc_dec(rm); ASSERT(erts_refc_read(&bin->binary.intern.refc, 1) != 0); erts_mtx_unlock(&rm->lock); erts_monitor_release_both(mdp); return 1; } if (monitor) erts_ref_to_driver_monitor(ref,monitor); return 0; } int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor) { ErtsResource* rsrc = DATA_TO_RESOURCE(obj); #ifdef DEBUG ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc); #endif ErtsResourceMonitors *rm; ErtsMonitor *mon; Eterm ref_heap[ERTS_REF_THING_SIZE]; Eterm ref; ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR); ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0); ref = erts_driver_monitor_to_ref(ref_heap, monitor); rm = rsrc->monitors; erts_mtx_lock(&rm->lock); ASSERT(!rmon_is_dying(rm)); mon = erts_monitor_tree_lookup(rm->root, ref); if (mon) erts_monitor_tree_delete(&rm->root, mon); erts_mtx_unlock(&rm->lock); if (!mon) return 1; ASSERT(erts_monitor_is_origin(mon)); ASSERT(is_internal_pid(mon->other.item)); erts_proc_sig_send_demonitor(mon); return 0; } int enif_compare_monitors(const ErlNifMonitor *monitor1, const ErlNifMonitor *monitor2) { return sys_memcmp((void *) monitor1, (void *) monitor2, ERTS_REF_THING_SIZE*sizeof(Eterm)); } ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts) { ErlNifIOQueue *q; if (opts != ERL_NIF_IOQ_NORMAL) return NULL; q = enif_alloc(sizeof(ErlNifIOQueue)); if (!q) return NULL; erts_ioq_init(q, ERTS_ALC_T_NIF, 0); return q; } void enif_ioq_destroy(ErlNifIOQueue *q) { erts_ioq_clear(q); enif_free(q); } /* If the iovec was preallocated (Stack or otherwise) it needs to be marked as * such to perform a proper free. */ #define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0) void enif_free_iovec(ErlNifIOVec *iov) { int i; /* Decrement the refc of all the binaries */ for (i = 0; i < iov->iovcnt; i++) { Binary *bptr = ((Binary**)iov->ref_bins)[i]; /* bptr can be null if enq_binary was used */ if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) { erts_bin_free(bptr); } } if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) { enif_free(iov); } } typedef struct { UWord sublist_length; Eterm sublist_start; Eterm sublist_end; UWord referenced_size; UWord copied_size; UWord iovec_len; } iovec_slice_t; static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) { Eterm lookahead; result->sublist_start = list; result->sublist_length = 0; result->referenced_size = 0; result->copied_size = 0; result->iovec_len = 0; lookahead = result->sublist_start; while (is_list(lookahead)) { UWord byte_size; Eterm binary; Eterm *cell; cell = list_val(lookahead); binary = CAR(cell); if (!is_binary(binary)) { return 0; } byte_size = binary_size(binary); if (byte_size > 0) { int bit_offset, bit_size; Eterm parent_binary; UWord byte_offset; int requires_copying; ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size); (void)byte_offset; if (bit_size != 0) { return 0; } /* If we're unaligned or an on-heap binary we'll need to copy * ourselves over to a temporary buffer. */ requires_copying = (bit_offset != 0) || thing_subtag(*binary_val(parent_binary)) == HEAP_BINARY_SUBTAG; if (requires_copying) { result->copied_size += byte_size; } else { result->referenced_size += byte_size; } result->iovec_len += 1 + byte_size / MAX_SYSIOVEC_IOVLEN; } result->sublist_length += 1; lookahead = CDR(cell); if (result->sublist_length >= max_length) { break; } } if (!is_nil(lookahead) && !is_list(lookahead)) { return 0; } result->sublist_end = lookahead; return 1; } static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer, UWord *copy_offset, ErlNifBinary *result) { Eterm *parent_header; Eterm parent_binary; int bit_offset, bit_size; Uint byte_offset; ASSERT(is_binary(binary)); ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size); ASSERT(bit_size == 0); parent_header = binary_val(parent_binary); result->size = binary_size(binary); if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) { ProcBin *pb = (ProcBin*)parent_header; if (pb->flags & (PB_IS_WRITABLE | PB_ACTIVE_WRITER)) { erts_emasculate_writable_binary(pb); } ASSERT(pb->val != NULL); ASSERT(byte_offset < pb->size); ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes); result->data = (unsigned char*)&pb->bytes[byte_offset]; result->ref_bin = (void*)pb->val; } else { ErlHeapBin *hb = (ErlHeapBin*)parent_header; ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG); result->data = &((unsigned char*)&hb->data)[byte_offset]; result->ref_bin = NULL; } /* If this isn't an *aligned* refc binary, copy its contents to the buffer * and reference that instead. */ if (result->ref_bin == NULL || bit_offset != 0) { ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL); ASSERT(result->size <= (copy_buffer->size - *copy_offset)); if (bit_offset == 0) { sys_memcpy(&copy_buffer->data[*copy_offset], result->data, result->size); } else { erts_copy_bits(result->data, bit_offset, 1, (byte*)&copy_buffer->data[*copy_offset], 0, 1, result->size * 8); } result->data = &copy_buffer->data[*copy_offset]; result->ref_bin = copy_buffer->ref_bin; *copy_offset += result->size; } } static int fill_iovec_with_slice(ErlNifEnv *env, iovec_slice_t *slice, ErlNifIOVec *iovec) { ErlNifBinary copy_buffer = {0}; UWord copy_offset, iovec_idx; Eterm sublist_iterator; /* Set up a common refc binary for all on-heap and unaligned binaries. */ if (slice->copied_size > 0) { if (!enif_alloc_binary(slice->copied_size, &copy_buffer)) { return 0; } ASSERT(copy_buffer.ref_bin != NULL); } sublist_iterator = slice->sublist_start; copy_offset = 0; iovec_idx = 0; while (sublist_iterator != slice->sublist_end) { ErlNifBinary raw_data; Eterm *cell; cell = list_val(sublist_iterator); marshal_iovec_binary(CAR(cell), &copy_buffer, &copy_offset, &raw_data); while (raw_data.size > 0) { UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN); ASSERT(iovec_idx < iovec->iovcnt); ASSERT(raw_data.ref_bin != NULL); iovec->iov[iovec_idx].iov_base = raw_data.data; iovec->iov[iovec_idx].iov_len = chunk_len; iovec->ref_bins[iovec_idx] = raw_data.ref_bin; raw_data.data += chunk_len; raw_data.size -= chunk_len; iovec_idx += 1; } sublist_iterator = CDR(cell); } ASSERT(iovec_idx == iovec->iovcnt); if (env == NULL) { int i; for (i = 0; i < iovec->iovcnt; i++) { Binary *refc_binary = (Binary*)(iovec->ref_bins[i]); erts_refc_inc(&refc_binary->intern.refc, 1); } if (slice->copied_size > 0) { /* Transfer ownership to the iovec; we've taken references to it in * the above loop. */ enif_release_binary(&copy_buffer); } } else { if (slice->copied_size > 0) { /* Attach the binary to our environment and let the next minor GC * get rid of it. This is slightly faster than using the tmp object * list since it avoids off-heap allocations. */ erts_build_proc_bin(&MSO(env->proc), alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin); } } return 1; } static int create_iovec_from_slice(ErlNifEnv *env, iovec_slice_t *slice, ErlNifIOVec **result) { ErlNifIOVec *iovec = *result; if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) { iovec->iov = iovec->small_iov; iovec->ref_bins = iovec->small_ref_bin; iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC; } else { UWord iov_offset, binv_offset, alloc_size; char *alloc_base; iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec)); binv_offset = iov_offset; binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec)); alloc_size = binv_offset; alloc_size += slice->iovec_len * sizeof(Binary*); /* When the user passes an environment, we attach the iovec to it so * the user won't have to bother managing it (similar to * enif_inspect_binary). It'll disappear once the environment is * cleaned up. */ if (env != NULL) { alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor); } else { alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size); } iovec = (ErlNifIOVec*)alloc_base; iovec->iov = (SysIOVec*)(alloc_base + iov_offset); iovec->ref_bins = (void**)(alloc_base + binv_offset); iovec->flags = 0; } iovec->size = slice->referenced_size + slice->copied_size; iovec->iovcnt = slice->iovec_len; if(!fill_iovec_with_slice(env, slice, iovec)) { if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) { erts_free(ERTS_ALC_T_NIF, iovec); } return 0; } *result = iovec; return 1; } int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements, ERL_NIF_TERM list, ERL_NIF_TERM *tail, ErlNifIOVec **iov) { iovec_slice_t slice; if(!examine_iovec_term(list, max_elements, &slice)) { return 0; } else if(!create_iovec_from_slice(env, &slice, iov)) { return 0; } (*tail) = slice.sublist_end; return 1; } /* */ int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip) { if(skip <= iov->size) { return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip); } return 0; } int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip) { ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC }; Binary *ref_bin = (Binary*)bin->ref_bin; int res; vec.iov = vec.small_iov; vec.ref_bins = vec.small_ref_bin; vec.iov[0].iov_base = bin->data; vec.iov[0].iov_len = bin->size; ((Binary**)(vec.ref_bins))[0] = ref_bin; res = enif_ioq_enqv(q, &vec, skip); enif_release_binary(bin); return res; } size_t enif_ioq_size(ErlNifIOQueue *q) { return erts_ioq_size(q); } int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size) { if (erts_ioq_deq(q, elems) == -1) return 0; if (size) *size = erts_ioq_size(q); return 1; } int enif_ioq_peek_head(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *bin_term) { SysIOVec *iov_entry; Binary *ref_bin; if (q->size == 0) { return 0; } ASSERT(q->b_head != q->b_tail && q->v_head != q->v_tail); ref_bin = &q->b_head[0]->nif; iov_entry = &q->v_head[0]; if (size != NULL) { *size = iov_entry->iov_len; } if (iov_entry->iov_len > ERL_ONHEAP_BIN_LIMIT) { ProcBin *pb = (ProcBin*)alloc_heap(env, PROC_BIN_SIZE); pb->thing_word = HEADER_PROC_BIN; pb->next = MSO(env->proc).first; pb->val = ref_bin; pb->flags = 0; ASSERT((byte*)iov_entry->iov_base >= (byte*)ref_bin->orig_bytes); ASSERT(iov_entry->iov_len <= ref_bin->orig_size); pb->bytes = (byte*)iov_entry->iov_base; pb->size = iov_entry->iov_len; MSO(env->proc).first = (struct erl_off_heap_header*) pb; OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm)); erts_refc_inc(&ref_bin->intern.refc, 2); *bin_term = make_binary(pb); } else { ErlHeapBin* hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(iov_entry->iov_len)); hb->thing_word = header_heap_bin(iov_entry->iov_len); hb->size = iov_entry->iov_len; sys_memcpy(hb->data, iov_entry->iov_base, iov_entry->iov_len); *bin_term = make_binary(hb); } return 1; } SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen) { return erts_ioq_peekq(q, iovlen); } /*************************************************************************** ** load_nif/2 ** ***************************************************************************/ static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity) { int n = (int) mod_code->num_functions; int j; for (j = 0; j < n; ++j) { ErtsCodeInfo* ci = mod_code->functions[j]; ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI)); if (f_atom == ci->mfa.function && arity == ci->mfa.arity) { return mod_code->functions+j; } } return NULL; } static Eterm mkatom(const char *str) { return am_atom_put(str, sys_strlen(str)); } struct tainted_module_t { struct tainted_module_t* next; Eterm module_atom; }; erts_atomic_t first_taint; /* struct tainted_module_t* */ void erts_add_taint(Eterm mod_atom) { #ifdef ERTS_ENABLE_LOCK_CHECK extern erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ #endif struct tainted_module_t *first, *t; ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) || erts_thr_progress_is_blocking()); first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint); for (t=first ; t; t=t->next) { if (t->module_atom == mod_atom) { return; } } t = erts_alloc_fnf(ERTS_ALC_T_TAINT, sizeof(*t)); if (t != NULL) { t->module_atom = mod_atom; t->next = first; erts_atomic_set_nob(&first_taint, (erts_aint_t)t); } } Eterm erts_nif_taints(Process* p) { struct tainted_module_t *first, *t; unsigned cnt = 0; Eterm list = NIL; Eterm* hp; first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint); for (t=first ; t!=NULL; t=t->next) { cnt++; } hp = HAlloc(p,cnt*2); for (t=first ; t!=NULL; t=t->next) { list = CONS(hp, t->module_atom, list); hp += 2; } return list; } void erts_print_nif_taints(fmtfn_t to, void* to_arg) { struct tainted_module_t *t; const char* delim = ""; t = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint); for ( ; t; t = t->next) { const Atom* atom = atom_tab(atom_val(t->module_atom)); erts_cbprintf(to,to_arg,"%s%.*s", delim, atom->len, atom->name); delim = ","; } erts_cbprintf(to,to_arg,"\n"); } static Eterm load_nif_error(Process* p, const char* atom, const char* format, ...) { erts_dsprintf_buf_t* dsbufp = erts_create_tmp_dsbuf(0); Eterm ret; Eterm* hp; Eterm** hpp = NULL; Uint sz = 0; Uint* szp = &sz; va_list arglist; va_start(arglist, format); erts_vdsprintf(dsbufp, format, arglist); va_end(arglist); for (;;) { Eterm txt = erts_bld_string_n(hpp, &sz, dsbufp->str, dsbufp->str_len); ret = erts_bld_tuple(hpp, szp, 2, am_error, erts_bld_tuple(hpp, szp, 2, mkatom(atom), txt)); if (hpp != NULL) { break; } hp = HAlloc(p,sz); hpp = &hp; szp = NULL; } erts_destroy_tmp_dsbuf(dsbufp); return ret; } #define AT_LEAST_VERSION(E,MAJ,MIN) \ (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN))) /* * Allocate erl_module_nif and make a _modern_ copy of the lib entry. */ static struct erl_module_nif* create_lib(const ErlNifEntry* src) { struct erl_module_nif* lib; ErlNifEntry* dst; Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_); if (!AT_LEAST_VERSION(src, 2, 7)) bytes += src->num_of_funcs * sizeof(ErlNifFunc); lib = erts_alloc(ERTS_ALC_T_NIF, bytes); dst = &lib->entry; sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant)); if (AT_LEAST_VERSION(src, 2, 1)) { dst->vm_variant = src->vm_variant; } else { dst->vm_variant = "beam.vanilla"; } if (AT_LEAST_VERSION(src, 2, 7)) { dst->options = src->options; } else { /* * Make a modern copy of the ErlNifFunc array */ struct ErlNifFunc_V1 { const char* name; unsigned arity; ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); }*src_funcs = (struct ErlNifFunc_V1*) src->funcs; int i; for (i = 0; i < src->num_of_funcs; ++i) { sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs)); lib->_funcs_copy_[i].flags = 0; } dst->funcs = lib->_funcs_copy_; dst->options = 0; } if (AT_LEAST_VERSION(src, 2, 12)) { dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit; } else { dst->sizeof_ErlNifResourceTypeInit = 0; } if (AT_LEAST_VERSION(src, 2, 14)) { dst->min_erts = src->min_erts; } else { dst->min_erts = "erts-?"; } return lib; }; BIF_RETTYPE load_nif_2(BIF_ALIST_2) { static const char bad_lib[] = "bad_lib"; static const char upgrade[] = "upgrade"; char* lib_name = NULL; void* handle = NULL; void* init_func = NULL; ErlNifEntry* entry = NULL; ErlNifEnv env; int i, err, encoding; Module* module_p; Eterm mod_atom; const Atom* mod_atomp; Eterm f_atom; ErtsCodeMFA* caller; ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; Eterm ret = am_ok; int veto; int taint = 1; struct erl_module_nif* lib = NULL; struct erl_module_instance* this_mi; struct erl_module_instance* prev_mi; if (BIF_P->flags & F_HIPE_MODE) { ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled " "modules not supported"); BIF_RET(ret); } encoding = erts_get_native_filename_encoding(); if (encoding == ERL_FILENAME_WIN_WCHAR) { /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */ /* since lib_name is used in error messages */ encoding = ERL_FILENAME_UTF8; } lib_name = erts_convert_filename_to_encoding(BIF_ARG_1, NULL, 0, ERTS_ALC_T_TMP, 1, 0, encoding, NULL, 0); if (!lib_name) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { erts_free(ERTS_ALC_T_TMP, lib_name); ERTS_BIF_YIELD2(bif_export[BIF_load_nif_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* Block system (is this the right place to do it?) */ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_thr_progress_block(); /* Find calling module */ ASSERT(BIF_P->current != NULL); ASSERT(BIF_P->current->module == am_erlang && BIF_P->current->function == am_load_nif && BIF_P->current->arity == 2); caller = find_function_from_pc(BIF_P->cp); ASSERT(caller != NULL); mod_atom = caller->module; ASSERT(is_atom(mod_atom)); module_p = erts_get_module(mod_atom, erts_active_code_ix()); ASSERT(module_p != NULL); mod_atomp = atom_tab(atom_val(mod_atom)); { ErtsStaticNifEntry* sne; sne = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len); if (sne != NULL) { init_func = sne->nif_init; handle = init_func; taint = sne->taint; } } this_mi = &module_p->curr; prev_mi = &module_p->old; if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) { ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old " "module '%T' not allowed", mod_atom); goto error; } else if (module_p->on_load) { ASSERT(module_p->on_load->code_hdr->on_load_function_ptr); if (module_p->curr.code_hdr) { prev_mi = &module_p->curr; } else { prev_mi = &module_p->old; } this_mi = module_p->on_load; } if (this_mi->nif != NULL) { ret = load_nif_error(BIF_P,"reload","NIF library already loaded" " (reload disallowed since OTP 20)."); } else if (init_func == NULL && (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { const char slogan[] = "Failed to load NIF library"; if (strstr(errdesc.str, lib_name) != NULL) { ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str); } else { ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str); } } else if (init_func == NULL && erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) { ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init" " function: '%s'", errdesc.str); } else if ((taint ? erts_add_taint(mod_atom) : 0, (entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) { ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful"); } else if (entry->major > ERL_NIF_MAJOR_VERSION || (entry->major == ERL_NIF_MAJOR_VERSION && entry->minor > ERL_NIF_MINOR_VERSION)) { char* fmt = "That '%T' NIF library needs %s or newer. Either try to" " recompile the NIF lib or use a newer erts runtime."; ret = load_nif_error(BIF_P, bad_lib, fmt, mod_atom, entry->min_erts); } else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD || (entry->major==2 && entry->minor == 5)) { /* experimental maps */ char* fmt = "That old NIF library (%d.%d) is not compatible with this " "erts runtime (%d.%d). Try recompile the NIF lib."; ret = load_nif_error(BIF_P, bad_lib, fmt, entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION); } else if (AT_LEAST_VERSION(entry, 2, 1) && sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) { ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for " "this vm variant (%s).", entry->vm_variant, ERL_NIF_VM_VARIANT); } else if (!erts_is_atom_str((char*)entry->name, mod_atom, 1)) { ret = load_nif_error(BIF_P, bad_lib, "Library module name '%s' does not" " match calling module '%T'", entry->name, mod_atom); } else { lib = create_lib(entry); entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */ lib->handle = handle; erts_refc_init(&lib->rt_cnt, 0); erts_refc_init(&lib->rt_dtor_cnt, 0); ASSERT(opened_rt_list == NULL); lib->mod = module_p; for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) { ErtsCodeInfo** ci_pp; ErlNifFunc* f = &entry->funcs[i]; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1) || (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); } else if (f->flags) { /* * If the flags field is non-zero and this emulator was * built with dirty scheduler support, check that the flags * value is legal. But if this emulator was built without * dirty scheduler support, treat a non-zero flags field as * a load error. */ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u", f->flags, mod_atom, f->name, f->arity); } else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0]) < BEAM_NIF_MIN_FUNC_SZ) { ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif" " in module (%T:%s/%u too small)", mod_atom, f->name, f->arity); } /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n", mod_atom, f->name, f->arity);*/ } } if (ret != am_ok) { goto error; } /* Call load or upgrade: */ env.mod_nif = lib; lib->priv_data = NULL; if (prev_mi->nif != NULL) { /**************** Upgrade ***************/ void* prev_old_data = prev_mi->nif->priv_data; if (entry->upgrade == NULL) { ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); goto error; } erts_pre_nif(&env, BIF_P, lib, NULL); veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { prev_mi->nif->priv_data = prev_old_data; ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto); } } else if (entry->load != NULL) { /********* Initial load ***********/ erts_pre_nif(&env, BIF_P, lib, NULL); veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto); } } if (ret == am_ok) { commit_opened_resource_types(lib); /* ** Everything ok, patch the beam code with op_call_nif */ this_mi->nif = lib; for (i=0; i < entry->num_of_funcs; i++) { ErlNifFunc* f = &entry->funcs[i]; ErtsCodeInfo* ci; BeamInstr *code_ptr; erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1); ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity); code_ptr = erts_codeinfo_to_code(ci); if (ci->u.gen_bp == NULL) { code_ptr[0] = BeamOpCodeAddr(op_call_nif); } else { /* Function traced, patch the original instruction word */ GenericBp* g = ci->u.gen_bp; ASSERT(BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint)); g->orig_instr = BeamOpCodeAddr(op_call_nif); } if (f->flags) { code_ptr[3] = (BeamInstr) f->fptr; code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? (BeamInstr) static_schedule_dirty_io_nif : (BeamInstr) static_schedule_dirty_cpu_nif; } else code_ptr[1] = (BeamInstr) f->fptr; code_ptr[2] = (BeamInstr) lib; } } else { error: rollback_opened_resource_types(); ASSERT(ret != am_ok); if (lib != NULL) { erts_free(ERTS_ALC_T_NIF, lib); } if (handle != NULL && !erts_is_static_nif(handle)) { erts_sys_ddll_close(handle); } erts_sys_ddll_free_error(&errdesc); } erts_thr_progress_unblock(); erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); erts_free(ERTS_ALC_T_TMP, lib_name); BIF_RET(ret); } void erts_unload_nif(struct erl_module_nif* lib) { ErlNifResourceType* rt; ErlNifResourceType* next; ASSERT(erts_thr_progress_is_blocking()); ASSERT(lib != NULL); ASSERT(lib->mod != NULL); erts_tracer_nif_clear(); for (rt = resource_type_list.next; rt != &resource_type_list; rt = next) { next = rt->next; if (rt->owner == lib) { rt->next->prev = rt->prev; rt->prev->next = rt->next; rt->next = NULL; rt->prev = NULL; if (erts_refc_dectest(&rt->refc, 0) == 0) { if (rt->dtor != NULL) { erts_refc_dec(&lib->rt_dtor_cnt, 0); } erts_refc_dec(&lib->rt_cnt, 0); erts_free(ERTS_ALC_T_NIF, rt); } } } if (erts_refc_read(&lib->rt_dtor_cnt, 0) == 0) { close_lib(lib); if (erts_refc_read(&lib->rt_cnt, 0) == 0) { erts_free(ERTS_ALC_T_NIF, lib); return; } } else { ASSERT(erts_refc_read(&lib->rt_cnt, 1) > 0); } lib->mod = NULL; /* orphan lib */ } void erl_nif_init() { ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8) == ERTS_MAGIC_BIN_BYTES_TO_ALIGN); resource_type_list.next = &resource_type_list; resource_type_list.prev = &resource_type_list; resource_type_list.dtor = NULL; resource_type_list.owner = NULL; resource_type_list.module = THE_NON_VALUE; resource_type_list.name = THE_NON_VALUE; } int erts_nif_get_funcs(struct erl_module_nif* mod, ErlNifFunc **funcs) { *funcs = mod->entry.funcs; return mod->entry.num_of_funcs; } Module *erts_nif_get_module(struct erl_module_nif *nif_mod) { return nif_mod->mod; } Eterm erts_nif_call_function(Process *p, Process *tracee, struct erl_module_nif* mod, ErlNifFunc *fun, int argc, Eterm *argv) { Eterm nif_result; #ifdef DEBUG /* Verify that function is part of this module */ int i; for (i = 0; i < mod->entry.num_of_funcs; i++) if (fun == &(mod->entry.funcs[i])) break; ASSERT(i < mod->entry.num_of_funcs); if (p) ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN || erts_thr_progress_is_blocking()); #endif if (p) { /* This is almost a normal nif call like in beam_emu, except that any heap consumed by the nif will be released without checking if anything in it is live. This is because we cannot do a GC here as we don't know the number of live registers that have to be preserved. This means that any heap part of the returned term may not be used outside this function. */ struct enif_environment_t env; ErlHeapFragment *orig_hf = MBUF(p); ErlOffHeap orig_oh = MSO(p); Eterm *orig_htop = HEAP_TOP(p); ASSERT(is_internal_pid(p->common.id)); MBUF(p) = NULL; clear_offheap(&MSO(p)); erts_pre_nif(&env, p, mod, tracee); #ifdef ERTS_NIF_ASSERT_IN_ENV env.dbg_disable_assert_in_env = 1; #endif nif_result = (*fun->fptr)(&env, argc, argv); if (env.exception_thrown) nif_result = THE_NON_VALUE; erts_post_nif(&env); /* Free any offheap and heap fragments created in nif */ if (MSO(p).first) { erts_cleanup_offheap(&MSO(p)); clear_offheap(&MSO(p)); } if (MBUF(p)) free_message_buffer(MBUF(p)); /* restore original heap fragment list */ MBUF(p) = orig_hf; MSO(p) = orig_oh; HEAP_TOP(p) = orig_htop; } else { /* Nif call was done without a process context, so we create a phony one. */ struct enif_msg_environment_t msg_env; pre_nif_noproc(&msg_env, mod, tracee); #ifdef ERTS_NIF_ASSERT_IN_ENV msg_env.env.dbg_disable_assert_in_env = 1; #endif nif_result = (*fun->fptr)(&msg_env.env, argc, argv); if (msg_env.env.exception_thrown) nif_result = THE_NON_VALUE; post_nif_noproc(&msg_env); } return nif_result; } #ifdef USE_VM_PROBES void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf) { dtrace_pid_str(env->proc->common.id, process_buf); } #endif #ifdef READONLY_CHECK /* Use checksums to assert that NIFs do not write into inspected binaries */ static void readonly_check_dtor(struct enif_tmp_obj_t*); static unsigned calc_checksum(unsigned char* ptr, unsigned size); struct readonly_check_t { unsigned char* ptr; unsigned size; unsigned checksum; }; static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz) { struct readonly_check_t* obj; obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t), &readonly_check_dtor); obj->ptr = ptr; obj->size = sz; obj->checksum = calc_checksum(ptr, sz); } static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj) { struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1]; unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size); if (chksum != ro_check->checksum) { fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ" " %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum); abort(); } erts_free(tmp_obj->allocator, tmp_obj); } static unsigned calc_checksum(unsigned char* ptr, unsigned size) { unsigned i, sum = 0; for (i=0; i<size; i++) { sum ^= ptr[i] << ((i % 4)*8); } return sum; } #endif /* READONLY_CHECK */ #ifdef ERTS_NIF_ASSERT_IN_ENV static void dbg_assert_in_env(ErlNifEnv* env, Eterm term, int nr, const char* type, const char* func) { Uint saved_used_size; Eterm* real_htop; if (is_immed(term) || (is_non_value(term) && env->exception_thrown) || erts_is_literal(term, ptr_val(term))) return; if (env->dbg_disable_assert_in_env) { /* * Trace nifs may cheat as built terms are discarded after return. * ToDo: Check if 'term' is part of argv[]. */ return; } if (env->heap_frag) { ASSERT(env->heap_frag == MBUF(env->proc)); ASSERT(env->hp >= env->heap_frag->mem); ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size); saved_used_size = env->heap_frag->used_size; env->heap_frag->used_size = env->hp - env->heap_frag->mem; real_htop = NULL; } else { real_htop = env->hp; } if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) { fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func); if (nr) { fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.", nr, type); } else { fprintf(stderr, "The %s is not from the same ErlNifEnv.", type); } fprintf(stderr, "\r\nABORTING\r\n"); abort(); } if (env->heap_frag) { env->heap_frag->used_size = saved_used_size; } } #endif #ifdef HAVE_USE_DTRACE #define MESSAGE_BUFSIZ 1024 static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term, char **ptr, char *buf, int bufsiz) { ErlNifBinary str_bin; if (!enif_inspect_iolist_as_binary(env, term, &str_bin) || str_bin.size > bufsiz) { *ptr = NULL; } else { sys_memcpy(buf, (char *) str_bin.data, str_bin.size); buf[str_bin.size] = '\0'; *ptr = buf; } } ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary message_bin; DTRACE_CHARBUF(messagebuf, MESSAGE_BUFSIZ + 1); if (DTRACE_ENABLED(user_trace_s1)) { if (!enif_inspect_iolist_as_binary(env, argv[0], &message_bin) || message_bin.size > MESSAGE_BUFSIZ) { return am_badarg; } sys_memcpy(messagebuf, (char *) message_bin.data, message_bin.size); messagebuf[message_bin.size] = '\0'; DTRACE1(user_trace_s1, messagebuf); return am_true; } else { return am_false; } } ERL_NIF_TERM erl_nif_user_trace_i4s4(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { DTRACE_CHARBUF(procbuf, 32 + 1); DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1); char *utbuf = NULL; ErlNifSInt64 i1, i2, i3, i4; DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1); DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1); DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1); DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1); char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL; if (DTRACE_ENABLED(user_trace_i4s4)) { dtrace_nifenv_str(env, procbuf); get_string_maybe(env, argv[0], &utbuf, user_tagbuf, MESSAGE_BUFSIZ); if (! enif_get_int64(env, argv[1], &i1)) i1 = 0; if (! enif_get_int64(env, argv[2], &i2)) i2 = 0; if (! enif_get_int64(env, argv[3], &i3)) i3 = 0; if (! enif_get_int64(env, argv[4], &i4)) i4 = 0; get_string_maybe(env, argv[5], &mbuf1, messagebuf1, MESSAGE_BUFSIZ); get_string_maybe(env, argv[6], &mbuf2, messagebuf2, MESSAGE_BUFSIZ); get_string_maybe(env, argv[7], &mbuf3, messagebuf3, MESSAGE_BUFSIZ); get_string_maybe(env, argv[8], &mbuf4, messagebuf4, MESSAGE_BUFSIZ); DTRACE10(user_trace_i4s4, procbuf, utbuf, i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4); return am_true; } else { return am_false; } } #define DTRACE10_LABEL(name, label, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \ erlang_##name##label((a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8), (a9)) #define N_STATEMENT(the_label) \ case the_label: \ if (DTRACE_ENABLED(user_trace_n##the_label)) { \ dtrace_nifenv_str(env, procbuf); \ get_string_maybe(env, argv[1], &utbuf, user_tagbuf, MESSAGE_BUFSIZ); \ if (! enif_get_int64(env, argv[2], &i1)) \ i1 = 0; \ if (! enif_get_int64(env, argv[3], &i2)) \ i2 = 0; \ if (! enif_get_int64(env, argv[4], &i3)) \ i3 = 0; \ if (! enif_get_int64(env, argv[5], &i4)) \ i4 = 0; \ get_string_maybe(env, argv[6], &mbuf1, messagebuf1, MESSAGE_BUFSIZ); \ get_string_maybe(env, argv[7], &mbuf2, messagebuf2, MESSAGE_BUFSIZ); \ get_string_maybe(env, argv[8], &mbuf3, messagebuf3, MESSAGE_BUFSIZ); \ get_string_maybe(env, argv[9], &mbuf4, messagebuf4, MESSAGE_BUFSIZ); \ DTRACE10_LABEL(user_trace_n, the_label, procbuf, utbuf, \ i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4); \ return am_true; \ } else { \ return am_false; \ } \ break ERL_NIF_TERM erl_nif_user_trace_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { DTRACE_CHARBUF(procbuf, 32 + 1); DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1); char *utbuf = NULL; ErlNifSInt64 i1, i2, i3, i4; DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1); DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1); DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1); DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1); char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL; ErlNifSInt64 label = 0; if (! enif_get_int64(env, argv[0], &label) || label < 0 || label > 1023) { return am_badarg; } switch (label) { N_STATEMENT(0); N_STATEMENT(1); N_STATEMENT(2); N_STATEMENT(3); N_STATEMENT(4); N_STATEMENT(5); N_STATEMENT(6); N_STATEMENT(7); N_STATEMENT(8); N_STATEMENT(9); N_STATEMENT(10); N_STATEMENT(11); N_STATEMENT(12); N_STATEMENT(13); N_STATEMENT(14); N_STATEMENT(15); N_STATEMENT(16); N_STATEMENT(17); N_STATEMENT(18); N_STATEMENT(19); N_STATEMENT(20); N_STATEMENT(21); N_STATEMENT(22); N_STATEMENT(23); N_STATEMENT(24); N_STATEMENT(25); N_STATEMENT(26); N_STATEMENT(27); N_STATEMENT(28); N_STATEMENT(29); N_STATEMENT(30); N_STATEMENT(31); N_STATEMENT(32); N_STATEMENT(33); N_STATEMENT(34); N_STATEMENT(35); N_STATEMENT(36); N_STATEMENT(37); N_STATEMENT(38); N_STATEMENT(39); N_STATEMENT(40); N_STATEMENT(41); N_STATEMENT(42); N_STATEMENT(43); N_STATEMENT(44); N_STATEMENT(45); N_STATEMENT(46); N_STATEMENT(47); N_STATEMENT(48); N_STATEMENT(49); N_STATEMENT(50); N_STATEMENT(51); N_STATEMENT(52); N_STATEMENT(53); N_STATEMENT(54); N_STATEMENT(55); N_STATEMENT(56); N_STATEMENT(57); N_STATEMENT(58); N_STATEMENT(59); N_STATEMENT(60); N_STATEMENT(61); N_STATEMENT(62); N_STATEMENT(63); N_STATEMENT(64); N_STATEMENT(65); N_STATEMENT(66); N_STATEMENT(67); N_STATEMENT(68); N_STATEMENT(69); N_STATEMENT(70); N_STATEMENT(71); N_STATEMENT(72); N_STATEMENT(73); N_STATEMENT(74); N_STATEMENT(75); N_STATEMENT(76); N_STATEMENT(77); N_STATEMENT(78); N_STATEMENT(79); N_STATEMENT(80); N_STATEMENT(81); N_STATEMENT(82); N_STATEMENT(83); N_STATEMENT(84); N_STATEMENT(85); N_STATEMENT(86); N_STATEMENT(87); N_STATEMENT(88); N_STATEMENT(89); N_STATEMENT(90); N_STATEMENT(91); N_STATEMENT(92); N_STATEMENT(93); N_STATEMENT(94); N_STATEMENT(95); N_STATEMENT(96); N_STATEMENT(97); N_STATEMENT(98); N_STATEMENT(99); N_STATEMENT(100); N_STATEMENT(101); N_STATEMENT(102); N_STATEMENT(103); N_STATEMENT(104); N_STATEMENT(105); N_STATEMENT(106); N_STATEMENT(107); N_STATEMENT(108); N_STATEMENT(109); N_STATEMENT(110); N_STATEMENT(111); N_STATEMENT(112); N_STATEMENT(113); N_STATEMENT(114); N_STATEMENT(115); N_STATEMENT(116); N_STATEMENT(117); N_STATEMENT(118); N_STATEMENT(119); N_STATEMENT(120); N_STATEMENT(121); N_STATEMENT(122); N_STATEMENT(123); N_STATEMENT(124); N_STATEMENT(125); N_STATEMENT(126); N_STATEMENT(127); N_STATEMENT(128); N_STATEMENT(129); N_STATEMENT(130); N_STATEMENT(131); N_STATEMENT(132); N_STATEMENT(133); N_STATEMENT(134); N_STATEMENT(135); N_STATEMENT(136); N_STATEMENT(137); N_STATEMENT(138); N_STATEMENT(139); N_STATEMENT(140); N_STATEMENT(141); N_STATEMENT(142); N_STATEMENT(143); N_STATEMENT(144); N_STATEMENT(145); N_STATEMENT(146); N_STATEMENT(147); N_STATEMENT(148); N_STATEMENT(149); N_STATEMENT(150); N_STATEMENT(151); N_STATEMENT(152); N_STATEMENT(153); N_STATEMENT(154); N_STATEMENT(155); N_STATEMENT(156); N_STATEMENT(157); N_STATEMENT(158); N_STATEMENT(159); N_STATEMENT(160); N_STATEMENT(161); N_STATEMENT(162); N_STATEMENT(163); N_STATEMENT(164); N_STATEMENT(165); N_STATEMENT(166); N_STATEMENT(167); N_STATEMENT(168); N_STATEMENT(169); N_STATEMENT(170); N_STATEMENT(171); N_STATEMENT(172); N_STATEMENT(173); N_STATEMENT(174); N_STATEMENT(175); N_STATEMENT(176); N_STATEMENT(177); N_STATEMENT(178); N_STATEMENT(179); N_STATEMENT(180); N_STATEMENT(181); N_STATEMENT(182); N_STATEMENT(183); N_STATEMENT(184); N_STATEMENT(185); N_STATEMENT(186); N_STATEMENT(187); N_STATEMENT(188); N_STATEMENT(189); N_STATEMENT(190); N_STATEMENT(191); N_STATEMENT(192); N_STATEMENT(193); N_STATEMENT(194); N_STATEMENT(195); N_STATEMENT(196); N_STATEMENT(197); N_STATEMENT(198); N_STATEMENT(199); N_STATEMENT(200); N_STATEMENT(201); N_STATEMENT(202); N_STATEMENT(203); N_STATEMENT(204); N_STATEMENT(205); N_STATEMENT(206); N_STATEMENT(207); N_STATEMENT(208); N_STATEMENT(209); N_STATEMENT(210); N_STATEMENT(211); N_STATEMENT(212); N_STATEMENT(213); N_STATEMENT(214); N_STATEMENT(215); N_STATEMENT(216); N_STATEMENT(217); N_STATEMENT(218); N_STATEMENT(219); N_STATEMENT(220); N_STATEMENT(221); N_STATEMENT(222); N_STATEMENT(223); N_STATEMENT(224); N_STATEMENT(225); N_STATEMENT(226); N_STATEMENT(227); N_STATEMENT(228); N_STATEMENT(229); N_STATEMENT(230); N_STATEMENT(231); N_STATEMENT(232); N_STATEMENT(233); N_STATEMENT(234); N_STATEMENT(235); N_STATEMENT(236); N_STATEMENT(237); N_STATEMENT(238); N_STATEMENT(239); N_STATEMENT(240); N_STATEMENT(241); N_STATEMENT(242); N_STATEMENT(243); N_STATEMENT(244); N_STATEMENT(245); N_STATEMENT(246); N_STATEMENT(247); N_STATEMENT(248); N_STATEMENT(249); N_STATEMENT(250); N_STATEMENT(251); N_STATEMENT(252); N_STATEMENT(253); N_STATEMENT(254); N_STATEMENT(255); N_STATEMENT(256); N_STATEMENT(257); N_STATEMENT(258); N_STATEMENT(259); N_STATEMENT(260); N_STATEMENT(261); N_STATEMENT(262); N_STATEMENT(263); N_STATEMENT(264); N_STATEMENT(265); N_STATEMENT(266); N_STATEMENT(267); N_STATEMENT(268); N_STATEMENT(269); N_STATEMENT(270); N_STATEMENT(271); N_STATEMENT(272); N_STATEMENT(273); N_STATEMENT(274); N_STATEMENT(275); N_STATEMENT(276); N_STATEMENT(277); N_STATEMENT(278); N_STATEMENT(279); N_STATEMENT(280); N_STATEMENT(281); N_STATEMENT(282); N_STATEMENT(283); N_STATEMENT(284); N_STATEMENT(285); N_STATEMENT(286); N_STATEMENT(287); N_STATEMENT(288); N_STATEMENT(289); N_STATEMENT(290); N_STATEMENT(291); N_STATEMENT(292); N_STATEMENT(293); N_STATEMENT(294); N_STATEMENT(295); N_STATEMENT(296); N_STATEMENT(297); N_STATEMENT(298); N_STATEMENT(299); N_STATEMENT(300); N_STATEMENT(301); N_STATEMENT(302); N_STATEMENT(303); N_STATEMENT(304); N_STATEMENT(305); N_STATEMENT(306); N_STATEMENT(307); N_STATEMENT(308); N_STATEMENT(309); N_STATEMENT(310); N_STATEMENT(311); N_STATEMENT(312); N_STATEMENT(313); N_STATEMENT(314); N_STATEMENT(315); N_STATEMENT(316); N_STATEMENT(317); N_STATEMENT(318); N_STATEMENT(319); N_STATEMENT(320); N_STATEMENT(321); N_STATEMENT(322); N_STATEMENT(323); N_STATEMENT(324); N_STATEMENT(325); N_STATEMENT(326); N_STATEMENT(327); N_STATEMENT(328); N_STATEMENT(329); N_STATEMENT(330); N_STATEMENT(331); N_STATEMENT(332); N_STATEMENT(333); N_STATEMENT(334); N_STATEMENT(335); N_STATEMENT(336); N_STATEMENT(337); N_STATEMENT(338); N_STATEMENT(339); N_STATEMENT(340); N_STATEMENT(341); N_STATEMENT(342); N_STATEMENT(343); N_STATEMENT(344); N_STATEMENT(345); N_STATEMENT(346); N_STATEMENT(347); N_STATEMENT(348); N_STATEMENT(349); N_STATEMENT(350); N_STATEMENT(351); N_STATEMENT(352); N_STATEMENT(353); N_STATEMENT(354); N_STATEMENT(355); N_STATEMENT(356); N_STATEMENT(357); N_STATEMENT(358); N_STATEMENT(359); N_STATEMENT(360); N_STATEMENT(361); N_STATEMENT(362); N_STATEMENT(363); N_STATEMENT(364); N_STATEMENT(365); N_STATEMENT(366); N_STATEMENT(367); N_STATEMENT(368); N_STATEMENT(369); N_STATEMENT(370); N_STATEMENT(371); N_STATEMENT(372); N_STATEMENT(373); N_STATEMENT(374); N_STATEMENT(375); N_STATEMENT(376); N_STATEMENT(377); N_STATEMENT(378); N_STATEMENT(379); N_STATEMENT(380); N_STATEMENT(381); N_STATEMENT(382); N_STATEMENT(383); N_STATEMENT(384); N_STATEMENT(385); N_STATEMENT(386); N_STATEMENT(387); N_STATEMENT(388); N_STATEMENT(389); N_STATEMENT(390); N_STATEMENT(391); N_STATEMENT(392); N_STATEMENT(393); N_STATEMENT(394); N_STATEMENT(395); N_STATEMENT(396); N_STATEMENT(397); N_STATEMENT(398); N_STATEMENT(399); N_STATEMENT(400); N_STATEMENT(401); N_STATEMENT(402); N_STATEMENT(403); N_STATEMENT(404); N_STATEMENT(405); N_STATEMENT(406); N_STATEMENT(407); N_STATEMENT(408); N_STATEMENT(409); N_STATEMENT(410); N_STATEMENT(411); N_STATEMENT(412); N_STATEMENT(413); N_STATEMENT(414); N_STATEMENT(415); N_STATEMENT(416); N_STATEMENT(417); N_STATEMENT(418); N_STATEMENT(419); N_STATEMENT(420); N_STATEMENT(421); N_STATEMENT(422); N_STATEMENT(423); N_STATEMENT(424); N_STATEMENT(425); N_STATEMENT(426); N_STATEMENT(427); N_STATEMENT(428); N_STATEMENT(429); N_STATEMENT(430); N_STATEMENT(431); N_STATEMENT(432); N_STATEMENT(433); N_STATEMENT(434); N_STATEMENT(435); N_STATEMENT(436); N_STATEMENT(437); N_STATEMENT(438); N_STATEMENT(439); N_STATEMENT(440); N_STATEMENT(441); N_STATEMENT(442); N_STATEMENT(443); N_STATEMENT(444); N_STATEMENT(445); N_STATEMENT(446); N_STATEMENT(447); N_STATEMENT(448); N_STATEMENT(449); N_STATEMENT(450); N_STATEMENT(451); N_STATEMENT(452); N_STATEMENT(453); N_STATEMENT(454); N_STATEMENT(455); N_STATEMENT(456); N_STATEMENT(457); N_STATEMENT(458); N_STATEMENT(459); N_STATEMENT(460); N_STATEMENT(461); N_STATEMENT(462); N_STATEMENT(463); N_STATEMENT(464); N_STATEMENT(465); N_STATEMENT(466); N_STATEMENT(467); N_STATEMENT(468); N_STATEMENT(469); N_STATEMENT(470); N_STATEMENT(471); N_STATEMENT(472); N_STATEMENT(473); N_STATEMENT(474); N_STATEMENT(475); N_STATEMENT(476); N_STATEMENT(477); N_STATEMENT(478); N_STATEMENT(479); N_STATEMENT(480); N_STATEMENT(481); N_STATEMENT(482); N_STATEMENT(483); N_STATEMENT(484); N_STATEMENT(485); N_STATEMENT(486); N_STATEMENT(487); N_STATEMENT(488); N_STATEMENT(489); N_STATEMENT(490); N_STATEMENT(491); N_STATEMENT(492); N_STATEMENT(493); N_STATEMENT(494); N_STATEMENT(495); N_STATEMENT(496); N_STATEMENT(497); N_STATEMENT(498); N_STATEMENT(499); N_STATEMENT(500); N_STATEMENT(501); N_STATEMENT(502); N_STATEMENT(503); N_STATEMENT(504); N_STATEMENT(505); N_STATEMENT(506); N_STATEMENT(507); N_STATEMENT(508); N_STATEMENT(509); N_STATEMENT(510); N_STATEMENT(511); N_STATEMENT(512); N_STATEMENT(513); N_STATEMENT(514); N_STATEMENT(515); N_STATEMENT(516); N_STATEMENT(517); N_STATEMENT(518); N_STATEMENT(519); N_STATEMENT(520); N_STATEMENT(521); N_STATEMENT(522); N_STATEMENT(523); N_STATEMENT(524); N_STATEMENT(525); N_STATEMENT(526); N_STATEMENT(527); N_STATEMENT(528); N_STATEMENT(529); N_STATEMENT(530); N_STATEMENT(531); N_STATEMENT(532); N_STATEMENT(533); N_STATEMENT(534); N_STATEMENT(535); N_STATEMENT(536); N_STATEMENT(537); N_STATEMENT(538); N_STATEMENT(539); N_STATEMENT(540); N_STATEMENT(541); N_STATEMENT(542); N_STATEMENT(543); N_STATEMENT(544); N_STATEMENT(545); N_STATEMENT(546); N_STATEMENT(547); N_STATEMENT(548); N_STATEMENT(549); N_STATEMENT(550); N_STATEMENT(551); N_STATEMENT(552); N_STATEMENT(553); N_STATEMENT(554); N_STATEMENT(555); N_STATEMENT(556); N_STATEMENT(557); N_STATEMENT(558); N_STATEMENT(559); N_STATEMENT(560); N_STATEMENT(561); N_STATEMENT(562); N_STATEMENT(563); N_STATEMENT(564); N_STATEMENT(565); N_STATEMENT(566); N_STATEMENT(567); N_STATEMENT(568); N_STATEMENT(569); N_STATEMENT(570); N_STATEMENT(571); N_STATEMENT(572); N_STATEMENT(573); N_STATEMENT(574); N_STATEMENT(575); N_STATEMENT(576); N_STATEMENT(577); N_STATEMENT(578); N_STATEMENT(579); N_STATEMENT(580); N_STATEMENT(581); N_STATEMENT(582); N_STATEMENT(583); N_STATEMENT(584); N_STATEMENT(585); N_STATEMENT(586); N_STATEMENT(587); N_STATEMENT(588); N_STATEMENT(589); N_STATEMENT(590); N_STATEMENT(591); N_STATEMENT(592); N_STATEMENT(593); N_STATEMENT(594); N_STATEMENT(595); N_STATEMENT(596); N_STATEMENT(597); N_STATEMENT(598); N_STATEMENT(599); N_STATEMENT(600); N_STATEMENT(601); N_STATEMENT(602); N_STATEMENT(603); N_STATEMENT(604); N_STATEMENT(605); N_STATEMENT(606); N_STATEMENT(607); N_STATEMENT(608); N_STATEMENT(609); N_STATEMENT(610); N_STATEMENT(611); N_STATEMENT(612); N_STATEMENT(613); N_STATEMENT(614); N_STATEMENT(615); N_STATEMENT(616); N_STATEMENT(617); N_STATEMENT(618); N_STATEMENT(619); N_STATEMENT(620); N_STATEMENT(621); N_STATEMENT(622); N_STATEMENT(623); N_STATEMENT(624); N_STATEMENT(625); N_STATEMENT(626); N_STATEMENT(627); N_STATEMENT(628); N_STATEMENT(629); N_STATEMENT(630); N_STATEMENT(631); N_STATEMENT(632); N_STATEMENT(633); N_STATEMENT(634); N_STATEMENT(635); N_STATEMENT(636); N_STATEMENT(637); N_STATEMENT(638); N_STATEMENT(639); N_STATEMENT(640); N_STATEMENT(641); N_STATEMENT(642); N_STATEMENT(643); N_STATEMENT(644); N_STATEMENT(645); N_STATEMENT(646); N_STATEMENT(647); N_STATEMENT(648); N_STATEMENT(649); N_STATEMENT(650); N_STATEMENT(651); N_STATEMENT(652); N_STATEMENT(653); N_STATEMENT(654); N_STATEMENT(655); N_STATEMENT(656); N_STATEMENT(657); N_STATEMENT(658); N_STATEMENT(659); N_STATEMENT(660); N_STATEMENT(661); N_STATEMENT(662); N_STATEMENT(663); N_STATEMENT(664); N_STATEMENT(665); N_STATEMENT(666); N_STATEMENT(667); N_STATEMENT(668); N_STATEMENT(669); N_STATEMENT(670); N_STATEMENT(671); N_STATEMENT(672); N_STATEMENT(673); N_STATEMENT(674); N_STATEMENT(675); N_STATEMENT(676); N_STATEMENT(677); N_STATEMENT(678); N_STATEMENT(679); N_STATEMENT(680); N_STATEMENT(681); N_STATEMENT(682); N_STATEMENT(683); N_STATEMENT(684); N_STATEMENT(685); N_STATEMENT(686); N_STATEMENT(687); N_STATEMENT(688); N_STATEMENT(689); N_STATEMENT(690); N_STATEMENT(691); N_STATEMENT(692); N_STATEMENT(693); N_STATEMENT(694); N_STATEMENT(695); N_STATEMENT(696); N_STATEMENT(697); N_STATEMENT(698); N_STATEMENT(699); N_STATEMENT(700); N_STATEMENT(701); N_STATEMENT(702); N_STATEMENT(703); N_STATEMENT(704); N_STATEMENT(705); N_STATEMENT(706); N_STATEMENT(707); N_STATEMENT(708); N_STATEMENT(709); N_STATEMENT(710); N_STATEMENT(711); N_STATEMENT(712); N_STATEMENT(713); N_STATEMENT(714); N_STATEMENT(715); N_STATEMENT(716); N_STATEMENT(717); N_STATEMENT(718); N_STATEMENT(719); N_STATEMENT(720); N_STATEMENT(721); N_STATEMENT(722); N_STATEMENT(723); N_STATEMENT(724); N_STATEMENT(725); N_STATEMENT(726); N_STATEMENT(727); N_STATEMENT(728); N_STATEMENT(729); N_STATEMENT(730); N_STATEMENT(731); N_STATEMENT(732); N_STATEMENT(733); N_STATEMENT(734); N_STATEMENT(735); N_STATEMENT(736); N_STATEMENT(737); N_STATEMENT(738); N_STATEMENT(739); N_STATEMENT(740); N_STATEMENT(741); N_STATEMENT(742); N_STATEMENT(743); N_STATEMENT(744); N_STATEMENT(745); N_STATEMENT(746); N_STATEMENT(747); N_STATEMENT(748); N_STATEMENT(749); N_STATEMENT(750); N_STATEMENT(751); N_STATEMENT(752); N_STATEMENT(753); N_STATEMENT(754); N_STATEMENT(755); N_STATEMENT(756); N_STATEMENT(757); N_STATEMENT(758); N_STATEMENT(759); N_STATEMENT(760); N_STATEMENT(761); N_STATEMENT(762); N_STATEMENT(763); N_STATEMENT(764); N_STATEMENT(765); N_STATEMENT(766); N_STATEMENT(767); N_STATEMENT(768); N_STATEMENT(769); N_STATEMENT(770); N_STATEMENT(771); N_STATEMENT(772); N_STATEMENT(773); N_STATEMENT(774); N_STATEMENT(775); N_STATEMENT(776); N_STATEMENT(777); N_STATEMENT(778); N_STATEMENT(779); N_STATEMENT(780); N_STATEMENT(781); N_STATEMENT(782); N_STATEMENT(783); N_STATEMENT(784); N_STATEMENT(785); N_STATEMENT(786); N_STATEMENT(787); N_STATEMENT(788); N_STATEMENT(789); N_STATEMENT(790); N_STATEMENT(791); N_STATEMENT(792); N_STATEMENT(793); N_STATEMENT(794); N_STATEMENT(795); N_STATEMENT(796); N_STATEMENT(797); N_STATEMENT(798); N_STATEMENT(799); N_STATEMENT(800); N_STATEMENT(801); N_STATEMENT(802); N_STATEMENT(803); N_STATEMENT(804); N_STATEMENT(805); N_STATEMENT(806); N_STATEMENT(807); N_STATEMENT(808); N_STATEMENT(809); N_STATEMENT(810); N_STATEMENT(811); N_STATEMENT(812); N_STATEMENT(813); N_STATEMENT(814); N_STATEMENT(815); N_STATEMENT(816); N_STATEMENT(817); N_STATEMENT(818); N_STATEMENT(819); N_STATEMENT(820); N_STATEMENT(821); N_STATEMENT(822); N_STATEMENT(823); N_STATEMENT(824); N_STATEMENT(825); N_STATEMENT(826); N_STATEMENT(827); N_STATEMENT(828); N_STATEMENT(829); N_STATEMENT(830); N_STATEMENT(831); N_STATEMENT(832); N_STATEMENT(833); N_STATEMENT(834); N_STATEMENT(835); N_STATEMENT(836); N_STATEMENT(837); N_STATEMENT(838); N_STATEMENT(839); N_STATEMENT(840); N_STATEMENT(841); N_STATEMENT(842); N_STATEMENT(843); N_STATEMENT(844); N_STATEMENT(845); N_STATEMENT(846); N_STATEMENT(847); N_STATEMENT(848); N_STATEMENT(849); N_STATEMENT(850); N_STATEMENT(851); N_STATEMENT(852); N_STATEMENT(853); N_STATEMENT(854); N_STATEMENT(855); N_STATEMENT(856); N_STATEMENT(857); N_STATEMENT(858); N_STATEMENT(859); N_STATEMENT(860); N_STATEMENT(861); N_STATEMENT(862); N_STATEMENT(863); N_STATEMENT(864); N_STATEMENT(865); N_STATEMENT(866); N_STATEMENT(867); N_STATEMENT(868); N_STATEMENT(869); N_STATEMENT(870); N_STATEMENT(871); N_STATEMENT(872); N_STATEMENT(873); N_STATEMENT(874); N_STATEMENT(875); N_STATEMENT(876); N_STATEMENT(877); N_STATEMENT(878); N_STATEMENT(879); N_STATEMENT(880); N_STATEMENT(881); N_STATEMENT(882); N_STATEMENT(883); N_STATEMENT(884); N_STATEMENT(885); N_STATEMENT(886); N_STATEMENT(887); N_STATEMENT(888); N_STATEMENT(889); N_STATEMENT(890); N_STATEMENT(891); N_STATEMENT(892); N_STATEMENT(893); N_STATEMENT(894); N_STATEMENT(895); N_STATEMENT(896); N_STATEMENT(897); N_STATEMENT(898); N_STATEMENT(899); N_STATEMENT(900); N_STATEMENT(901); N_STATEMENT(902); N_STATEMENT(903); N_STATEMENT(904); N_STATEMENT(905); N_STATEMENT(906); N_STATEMENT(907); N_STATEMENT(908); N_STATEMENT(909); N_STATEMENT(910); N_STATEMENT(911); N_STATEMENT(912); N_STATEMENT(913); N_STATEMENT(914); N_STATEMENT(915); N_STATEMENT(916); N_STATEMENT(917); N_STATEMENT(918); N_STATEMENT(919); N_STATEMENT(920); N_STATEMENT(921); N_STATEMENT(922); N_STATEMENT(923); N_STATEMENT(924); N_STATEMENT(925); N_STATEMENT(926); N_STATEMENT(927); N_STATEMENT(928); N_STATEMENT(929); N_STATEMENT(930); N_STATEMENT(931); N_STATEMENT(932); N_STATEMENT(933); N_STATEMENT(934); N_STATEMENT(935); N_STATEMENT(936); N_STATEMENT(937); N_STATEMENT(938); N_STATEMENT(939); N_STATEMENT(940); N_STATEMENT(941); N_STATEMENT(942); N_STATEMENT(943); N_STATEMENT(944); N_STATEMENT(945); N_STATEMENT(946); N_STATEMENT(947); N_STATEMENT(948); N_STATEMENT(949); N_STATEMENT(950); } return am_error; /* NOTREACHED, shut up the compiler */ } #endif /* HAVE_USE_DTRACE */
814905.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif //void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim); //void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim); void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask); void multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { if (target_qubit_index_count == 1) { single_qubit_dense_matrix_gate(target_qubit_index_list[0], matrix, state, dim); } else if (target_qubit_index_count == 2) { double_qubit_dense_matrix_gate_c(target_qubit_index_list[0], target_qubit_index_list[1], matrix, state, dim); } else { //multi_qubit_dense_matrix_gate_old_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); //multi_qubit_dense_matrix_gate_old_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); //multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); //multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); //return; #ifdef _OPENMP UINT threshold = 10; if (dim < (((ITYPE)1) << threshold)) { multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); } else { multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); } #else multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim); #endif } } void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask) { memcpy(dst_array, array, sizeof(UINT)*count); sort_ui(dst_array, count); for (UINT i = 0; i < count; ++i) { dst_mask[i] = (1UL << dst_array[i]) - 1; } } void multi_qubit_dense_matrix_gate_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array); // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim)); ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } // compute matrix-vector multiply for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { state[basis_0 ^ matrix_mask_list[y]] = buffer[y]; } } free(buffer); free((ITYPE*)matrix_mask_list); } #ifdef _OPENMP void multi_qubit_dense_matrix_gate_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array); // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; const UINT thread_count = omp_get_max_threads(); CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count)); const ITYPE block_size = loop_dim / thread_count; const ITYPE residual = loop_dim % thread_count; #pragma omp parallel { UINT thread_id = omp_get_thread_num(); ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual); ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual); CTYPE* buffer = buffer_list + thread_id * matrix_dim; ITYPE state_index; for (state_index = start_index; state_index < end_index; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } // compute matrix-vector multiply for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { state[basis_0 ^ matrix_mask_list[y]] = buffer[y]; } } } free(buffer_list); free((ITYPE*)matrix_mask_list); } #endif /* void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // insert index const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim)); ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index); } // compute matrix-vector multiply for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { state[basis_0 ^ matrix_mask_list[y]] = buffer[y]; } } free(buffer); free((UINT*)sorted_insert_index_list); free((ITYPE*)matrix_mask_list); } #ifdef _OPENMP void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) { // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); // insert index const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> target_qubit_index_count; const UINT thread_count = omp_get_max_threads(); CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count)); const ITYPE block_size = loop_dim / thread_count; const ITYPE residual = loop_dim % thread_count; #pragma omp parallel { UINT thread_id = omp_get_thread_num(); ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual); ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual); CTYPE* buffer = buffer_list + thread_id * matrix_dim; ITYPE state_index; for (state_index = start_index; state_index < end_index; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) { UINT insert_index = sorted_insert_index_list[cursor]; basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index); } // compute matrix-vector multiply for (ITYPE y = 0; y < matrix_dim; ++y) { buffer[y] = 0; for (ITYPE x = 0; x < matrix_dim; ++x) { buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]]; } } // set result for (ITYPE y = 0; y < matrix_dim; ++y) { state[basis_0 ^ matrix_mask_list[y]] = buffer[y]; } } } free(buffer_list); free((UINT*)sorted_insert_index_list); free((ITYPE*)matrix_mask_list); } #endif */
221234.c
/*============================================================================= * Copyright (c) 2021, Franco Bucafusco <[email protected]> * Martin N. Menendez <[email protected]> * All rights reserved. * License: Free * Date: 2021/10/03 * Version: v1.2 *===========================================================================*/ /*==================[inclusiones]============================================*/ #include "FreeRTOS.h" #include "task.h" #include "semphr.h" #include "sapi.h" #include "FreeRTOSConfig.h" #include "keys.h" /*==================[definiciones y macros]==================================*/ #define RATE 1000 #define LED_RATE pdMS_TO_TICKS(RATE) #define WELCOME_MSG "Ejercicio D_2.\r\n" #define USED_UART UART_USB #define UART_RATE 115200 #define MALLOC_ERROR "Malloc Failed Hook!\n" #define MSG_ERROR_SEM "Error al crear los semaforos.\r\n" #define LED_ERROR LEDR /*==================[definiciones de datos internos]=========================*/ gpioMap_t leds_t[] = {LEDB,LED1,LED2,LED3}; gpioMap_t gpio_t[] = {GPIO7,GPIO5,GPIO3,GPIO1}; /*==================[definiciones de datos externos]=========================*/ DEBUG_PRINT_ENABLE; extern t_key_config keys_config[]; #define LED_COUNT sizeof(leds_t)/sizeof(leds_t[0]) /*==================[declaraciones de funciones internas]====================*/ void gpio_init( void ); /*==================[declaraciones de funciones externas]====================*/ TickType_t get_diff(); void clear_diff(); // Prototipo de funcion de la tarea void tarea_led( void* taskParmPtr ); void tarea_tecla( void* taskParmPtr ); /*==================[funcion principal]======================================*/ // FUNCION PRINCIPAL, PUNTO DE ENTRADA AL PROGRAMA LUEGO DE ENCENDIDO O RESET. int main( void ) { // ---------- CONFIGURACIONES ------------------------------ boardConfig(); // Inicializar y configurar la plataforma gpio_init(); debugPrintConfigUart( USED_UART, UART_RATE ); // UART for debug messages printf( WELCOME_MSG ); BaseType_t res; uint32_t i; // Crear tarea en freeRTOS for ( i = 0 ; i < LED_COUNT ; i++ ) { res = xTaskCreate( tarea_led, // Funcion de la tarea a ejecutar ( const char * )"tarea_led", // Nombre de la tarea como String amigable para el usuario configMINIMAL_STACK_SIZE*2, // Cantidad de stack de la tarea i, // Parametros de tarea tskIDLE_PRIORITY+1, // Prioridad de la tarea 0 // Puntero a la tarea creada en el sistema ); // Gestion de errores configASSERT( res == pdPASS ); } // Inicializo driver de teclas keys_Init(); // Iniciar scheduler vTaskStartScheduler(); // Enciende tick | Crea idle y pone en ready | Evalua las tareas creadas | Prioridad mas alta pasa a running // ---------- REPETIR POR SIEMPRE -------------------------- configASSERT( 0 ); // NO DEBE LLEGAR NUNCA AQUI, debido a que a este programa se ejecuta // directamenteno sobre un microcontroladore y no es llamado por ningun // Sistema Operativo, como en el caso de un programa para PC. return TRUE; } /*==================[definiciones de funciones internas]=====================*/ void gpio_init( void ) { uint32_t i; gpioInit ( GPIO0 , GPIO_OUTPUT ); for( i = 0 ; i < LED_COUNT; i++ ) { gpioInit ( gpio_t[i], GPIO_OUTPUT ); } } /*==================[definiciones de funciones externas]=====================*/ // Implementacion de funcion de la tarea void tarea_led( void* taskParmPtr ) { uint32_t index = ( uint32_t ) taskParmPtr; // ---------- CONFIGURACIONES ------------------------------ TickType_t xPeriodicity = LED_RATE; // Tarea periodica cada 1000 ms TickType_t xLastWakeTime = xTaskGetTickCount(); TickType_t dif; // ---------- REPETIR POR SIEMPRE -------------------------- while( TRUE ) { xSemaphoreTake( keys_config[index].sem_btn, portMAX_DELAY ); // Esperamos tecla dif = get_diff( index ); if ( dif > LED_RATE ) dif = LED_RATE; clear_diff( index ); gpioWrite( leds_t[index], ON ); gpioWrite( gpio_t[index], ON ); vTaskDelay( dif ); gpioWrite( leds_t[index], OFF ); gpioWrite( gpio_t[index], OFF ); //vTaskDelayUntil( &xLastWakeTime , xPeriodicity ); } } /* hook que se ejecuta si al necesitar un objeto dinamico, no hay memoria disponible */ void vApplicationMallocFailedHook() { printf( MALLOC_ERROR ); configASSERT( 0 ); } /*==================[fin del archivo]========================================*/
995674.c
/* Note: */ #include "Driver_SPI.h" #include "string.h" #include "spi.h" #include "stm32f1xx_hal.h" #include "usbd_conf.h" #include "flashchips.h" extern const struct flashchip * flschip; extern uint16_t page_sze; bool in_4ba_mode = false; int address_high_byte = 0; static uint32_t address_to_bits(uint32_t addr); void delay_mic(void) { volatile uint32_t mic = 100; while(mic-->0){}; } //id - UID of chip, if not use -> id=0U uint32_t CalcCRC32(uint8_t *Buf, uint32_t Len, uint32_t id) //calc CRC hardware { unsigned int i; unsigned int Temp; __HAL_RCC_CRC_CLK_ENABLE(); //Разрешить тактирование CRC-юнита CRC->CR = 1; __asm("nop"); //Аппаратная готовность за 4 такта, жду... __asm("nop"); __asm("nop"); // Аппаратный CRC-расчёт работает с 32-битными словами. Т.е. сразу по 4 байта из входной последовательности i = Len >> 2; while(i--) { Temp = *((uint32_t*)Buf); //Temp = revbit(Temp); //Переставить биты во входных данных if (id) Temp = Temp ^ id; Temp = __RBIT(Temp); CRC->DR = Temp; Buf += 4; } Temp = CRC->DR; //Temp = revbit(Temp); //Переставить биты в выходных данных Temp = __RBIT(Temp); // Обработать оставшиеся байты (классическим не аппаратным методом), если их число не было кратно 4 i = Len & 3; while(i--) { Temp ^= (uint32_t)*Buf++; for(int j=0; j<8; j++) if (Temp & 1) Temp = (Temp >> 1) ^ 0xEDB88320; else Temp >>= 1; } Temp ^= 0xFFFFFFFFul; return Temp; } /* Send cmd to chip */ int32_t SendCmd (uint8_t * cmd, uint8_t bytes) { int32_t status; /* Select slave */ status = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_ACTIVE); if (status == ARM_DRIVER_OK) { status = ptrSPI->Send(cmd, bytes); if (status == ARM_DRIVER_OK) { while (ptrSPI->GetDataCount() != bytes){ __asm("nop"); }; } } /* Deselect slave */ ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_INACTIVE); //delay_mic(); return (status); } /* Send command with optional data and wait until busy */ int32_t SendCommand_at45 (uint8_t cmd, uint32_t addr, const uint8_t *data, uint32_t size) { uint32_t page_addr; uint32_t page_offs; uint8_t buf[4]; uint8_t sr; int32_t result, addr_; addr_ = at45db_convert_addr(addr, page_sze); //FLASH_PAGE_SIZE_ /* Prepare Command with address */ buf[0] = cmd; buf[1] = (uint8_t)(addr_ >> 16); buf[2] = (uint8_t)(addr_ >> 8); buf[3] = (uint8_t)(addr_ >> 0); /* Select Slave */ result = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_ACTIVE); if (result != ARM_DRIVER_OK) return result; /* Send Command with address */ result = ptrSPI->Send(buf, 4); if (result != ARM_DRIVER_OK) goto transfer_error; while (ptrSPI->GetDataCount() != 4){ __asm("nop"); } /* Send Data */ if ((data != NULL) && (size != 0)) { result = ptrSPI->Send(data, size); if (result != ARM_DRIVER_OK) goto transfer_error; while (ptrSPI->GetDataCount() != size){ __asm("nop"); }; } /* Deselect Slave */ result = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_INACTIVE); if (result != ARM_DRIVER_OK) return result; /* Prepare Read Status Command */ buf[0] = AT45DB_STATUS; buf[1] = 0xFF; /* Dummy byte */ /* Select Slave */ result = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_ACTIVE); if (result != ARM_DRIVER_OK) return result; /* Send Command */ result = ptrSPI->Send(buf, 2); if (result != ARM_DRIVER_OK) goto transfer_error; while (ptrSPI->GetDataCount() != 2){ __asm("nop"); }; /* Check Status Register */ do { result = ptrSPI->Receive(&sr, 1); if (result != ARM_DRIVER_OK) goto transfer_error; while (ptrSPI->GetDataCount() != 1){ __asm("nop"); }; } while ((sr & 0x80) == 0); /* Deselect Slave */ result = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_INACTIVE); if (result != ARM_DRIVER_OK) return result; return ARM_DRIVER_OK; transfer_error: ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_INACTIVE); return ARM_DRIVER_ERROR; } /* Read status or flag status register */ int32_t ReadStatusReg (uint8_t cmd, uint8_t *stat) { int32_t status; /* driver execution status */ uint8_t buf[4]; /* Select Slave */ status = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_ACTIVE); if (status == ARM_DRIVER_OK) { /* Set command */ buf[0] = cmd; /* Send command and receive register value */ status = ptrSPI->Transfer (&buf[0], &buf[2], 2U); if (status == ARM_DRIVER_OK) { /* Wait till transfer done */ while (ptrSPI->GetDataCount() != 2U) { __asm("nop"); }; *stat = buf[3]; } } /* Deselect Slave */ ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_INACTIVE); return (status); } /* Read flag config register */ int32_t ReadConfigReg (uint8_t cmd, uint8_t *stat) { int32_t status; /* driver execution status */ uint8_t buf[4]; /* Select Slave */ status = ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_ACTIVE); if (status == ARM_DRIVER_OK) { /* Set command */ buf[0] = cmd; /* Send command and receive register value */ status = ptrSPI->Transfer (&buf[0], &buf[2], 2U); if (status == ARM_DRIVER_OK) { /* Wait till transfer done */ while (ptrSPI->GetDataCount() != 2U) { __asm("nop"); }; *stat = buf[3]; } } /* Deselect Slave */ ptrSPI->Control(ARM_SPI_CONTROL_SS, ARM_SPI_SS_INACTIVE); return (status); } /* Write status or flag status register */ static int32_t WriteStatusReg (uint8_t sr) { int32_t status; /* driver execution status */ uint8_t buf[4] = {0}; int32_t feature_bits = flschip->feature_bits; if (!(feature_bits & (FEATURE_WRSR_WREN | FEATURE_WRSR_EWSR))) { SPI_UsrLog("Missing status register write definition, assuming " "EWSR is needed\n"); feature_bits |= FEATURE_WRSR_EWSR; } if (feature_bits & FEATURE_WRSR_WREN) SetWriteEnable(); if (feature_bits & FEATURE_WRSR_EWSR) { /* Set command */ buf[0] = CMD_EWSR; status = SendCmd(buf, 1U); if (status) return status; /* Set command */ buf[0] = CMD_WRSR; buf[1] = sr; status = SendCmd(buf, 2U); } return (status); } /* Set "Write enable latch" bit in status register */ int32_t SetWriteEnable (void) { int32_t status; uint8_t val; status = ReadStatusReg(CMD_READ_STATUS, &val); if (status == ARM_DRIVER_OK) { /* Check if "Write enable latch" bit set */ if (val & 0x02U) { return ARM_DRIVER_OK; //no need Write enable latch } } /* Set command */ val = CMD_WRITE_ENABLE; status = SendCmd(&val, 1U); if (status == ARM_DRIVER_OK) { /* Read status */ val = 0U; status = ReadStatusReg(CMD_READ_STATUS, &val); if (status == ARM_DRIVER_OK) { /* Check if "Write enable latch" bit set */ if ((val & 0x02U) == 0x00U) { status = ARM_DRIVER_ERROR; } } } return (status); } /* Reset "Write enable latch" bit in status register */ int32_t SetWriteDisable (void) { int32_t status; uint8_t val; val = CMD_WRITE_DISABLE; status = SendCmd(&val, 1U); if (status == ARM_DRIVER_OK) { /* Read status */ val = 0U; status = ReadStatusReg(CMD_READ_STATUS, &val); if (status == ARM_DRIVER_OK) { /* Check if "Write enable latch" bit set */ if ((val & 0x02U) != 0x00U) { status = ARM_DRIVER_ERROR; } } } return (status); } /* A generic block protection disable. * Tests if a protection is enabled with the block protection mask (bp_mask) and returns success otherwise. * Tests if the register bits are locked with the lock_mask (lock_mask). * Tests if a hardware protection is active (i.e. low pin/high bit value) with the write protection mask * (wp_mask) and bails out in that case. * If there are register lock bits set we try to disable them by unsetting those bits of the previous register * contents that are set in the lock_mask. We then check if removing the lock bits has worked and continue as if * they never had been engaged: * If the lock bits are out of the way try to disable engaged protections. * To support uncommon global unprotects (e.g. on most AT2[56]xx1(A)) unprotect_mask can be used to force * bits to 0 additionally to those set in bp_mask and lock_mask. Only bits set in unprotect_mask are potentially * preserved when doing the final unprotect. * * To sum up: * bp_mask: set those bits that correspond to the bits in the status register that indicate an active protection * (which should be unset after this function returns). * lock_mask: set the bits that correspond to the bits that lock changing the bits above. * wp_mask: set the bits that correspond to bits indicating non-software removable protections. * unprotect_mask: set the bits that should be preserved if possible when unprotecting. */ static int32_t spi_disable_blockprotect_generic(uint8_t bp_mask, uint8_t lock_mask, uint8_t wp_mask, uint8_t unprotect_mask) { uint8_t status, stat; status = ReadStatusReg(CMD_READ_STATUS, &stat); if (status != ARM_DRIVER_OK) return status; if ((stat & bp_mask) == 0) { SPI_UsrLog("\nBlock protection is disabled."); return ARM_DRIVER_OK; } SPI_UsrLog("Some block protection in effect, disabling... "); if ((stat & lock_mask) != 0) { SPI_UsrLog("\n\tNeed to disable the register lock first... "); if (wp_mask != 0 && (stat & wp_mask) == 0) { SPI_UsrLog("Hardware protection is active, disabling write protection is impossible.\n"); return 1; } /* All bits except the register lock bit (often called SPRL, SRWD, WPEN) are readonly. */ status = WriteStatusReg(stat & ~lock_mask); if (status) { SPI_UsrLog("spi_write_status_register failed.\n"); return status; } status = ReadStatusReg(CMD_READ_STATUS, &stat); if (status != ARM_DRIVER_OK) return status; if ((stat & lock_mask) != 0) { SPI_UsrLog("Unsetting lock bit(s) failed.\n"); return 1; } SPI_UsrLog("done.\n"); } /* Global unprotect. Make sure to mask the register lock bit as well. */ status = WriteStatusReg (stat & ~(bp_mask | lock_mask) & unprotect_mask); if (status) { SPI_UsrLog("write_status_register failed.\n"); return status; } status = ReadStatusReg(CMD_READ_STATUS, &stat); if (status != ARM_DRIVER_OK) return status; if ((stat & bp_mask) != 0) { SPI_UsrLog("Block protection could not be disabled!\n"); //flash->chip->printlock(flash); return ARM_DRIVER_ERROR_SPECIFIC; } SPI_UsrLog("disabled.\n"); return ARM_DRIVER_OK; } /* A common block protection disable that tries to unset the status register bits masked by 0x3C. */ int32_t spi_disable_blockprotect(void) { return spi_disable_blockprotect_generic( 0x3C, 0, 0, 0xFF); } /* Some Atmel DataFlash chips support per sector protection bits and the write protection bits in the status * register do indicate if none, some or all sectors are protected. It is possible to globally (un)lock all * sectors at once by writing 0 not only the protection bits (2 and 3) but also completely unrelated bits (4 and * 5) which normally are not touched. * Affected are all known Atmel chips matched by AT2[56]D[FLQ]..1A? but the AT26DF041. */ int32_t spi_disable_blockprotect_at2x_global_unprotect(void) { return spi_disable_blockprotect_generic(0x0C, 1 << 7, 1 << 4, 0x00); } int32_t spi_disable_blockprotect_at25f512a(void) { return spi_disable_blockprotect_generic(0x04, 1 << 7, 0, 0xFF); } /* A common block protection disable that tries to unset the status register bits masked by 0x3C (BP0-3) and * protected/locked by bit #7. */ int32_t spi_disable_blockprotect_bp3_srwd(void) { return spi_disable_blockprotect_generic(0x3C, 1 << 7, 0, 0xFF); } /* A common block protection disable that tries to unset the status register bits masked by 0x7C (BP0-4) and * protected/locked by bit #7. */ int32_t spi_disable_blockprotect_bp4_srwd(void) { return spi_disable_blockprotect_generic(0x7C, 1 << 7, 0, 0xFF); } int32_t spi_disable_blockprotect_at25f512b(void) { return spi_disable_blockprotect_generic(0x04, 1 << 7, 1 << 4, 0xFF); } /* A common block protection disable that tries to unset the status register bits masked by 0x0C (BP0-1) and * protected/locked by bit #7. Useful when bits 4-5 may be non-0). */ int32_t spi_disable_blockprotect_bp1_srwd(void) { return spi_disable_blockprotect_generic(0x0C, 1 << 7, 0, 0xFF); } /* A common block protection disable that tries to unset the status register bits masked by 0x1C (BP0-2) and * protected/locked by bit #7. Useful when bit #5 is neither a protection bit nor reserved (and hence possibly * non-0). */ int32_t spi_disable_blockprotect_bp2_srwd(void) { return spi_disable_blockprotect_generic(0x1C, 1 << 7, 0, 0xFF); } /* === Intel/Numonyx/Micron - Spansion === */ int32_t spi_disable_blockprotect_n25q(void) { return spi_disable_blockprotect_generic(0x5C, 1 << 7, 0, 0xFF); } int32_t spi_disable_blockprotect_at25f(void) { return spi_disable_blockprotect_generic(0x0C, 1 << 7, 0, 0xFF); } int32_t spi_disable_blockprotect_at25fs010(void) { return spi_disable_blockprotect_generic(0x6C, 1 << 7, 0, 0xFF); } int32_t spi_disable_blockprotect_at25fs040(void) { return spi_disable_blockprotect_generic(0x7C, 1 << 7, 0, 0xFF); } int spi_disable_blockprotect_at45db(void) { static const uint8_t cmd[4] = { AT45DB_DISABLE_PROTECT }; /* NB: 4 bytes magic number */ int ret = SendCmd ((uint8_t *)&cmd[0], 4); if (ret != 0) { SPI_UsrLog("Sending disable lockdown failed!\n"); return ret; } uint8_t status; ret = ReadStatusReg(AT45DB_STATUS, &status); if (ret != 0 || ((status & AT45DB_PROT) != 0)) { SPI_UsrLog("Disabling lockdown failed!\n"); return 1; } return 0; } static int spi_write_extended_address_register(const uint8_t regdata) { int32_t status; uint8_t cmd[2] = {JEDEC_WRITE_EXT_ADDR_REG, 0}; /* Enable data write */ status = SetWriteEnable(); status = SendCmd(cmd, 2U); if (status) SPI_UsrLog("%s :failed during command execution\n", __func__); return status; } static int spi_set_extended_address(const uint8_t addr_high) { if (address_high_byte != addr_high && spi_write_extended_address_register(addr_high)) return ARM_DRIVER_ERROR; address_high_byte = addr_high; return ARM_DRIVER_OK; } static int spi_enter_exit_4ba(const bool enter) { uint8_t cmd = enter ? JEDEC_ENTER_4_BYTE_ADDR_MODE : JEDEC_EXIT_4_BYTE_ADDR_MODE; //int ret = 1; int32_t status; if (flschip->feature_bits & FEATURE_4BA_ENTER) { status = SendCmd(&cmd, 1U); } else if (flschip->feature_bits & FEATURE_4BA_ENTER_WREN) { /* Enable data write */ status = SetWriteEnable(); if (status) return ARM_DRIVER_ERROR; status = SendCmd(&cmd, 1U); } else if (flschip->feature_bits & FEATURE_4BA_ENTER_EAR7) status = spi_set_extended_address(enter ? 0x80 : 0x00); return status; } int spi_enter_4ba(void) { int32_t status; uint8_t buf = 0xff; status = spi_enter_exit_4ba(true); if (!status) status = ReadConfigReg(CMD_READ_CONF_REG, &buf); if (status != ARM_DRIVER_OK) return status; /* Check Flags Config register value */ if (buf & (1<<5)) { SPI_UsrLog("4BA mode enter OK!\n"); in_4ba_mode = true; } else return ARM_DRIVER_ERROR; return status; } int spi_exit_4ba(void) { int32_t status; uint8_t buf = 0xff; status = spi_enter_exit_4ba(false); if (!status) status = ReadConfigReg(CMD_READ_CONF_REG, &buf); if (status != ARM_DRIVER_OK) return status; /* Check Flags Config register value */ if (!(buf & (1<<5))) { SPI_UsrLog("4BA mode exit OK!\n"); in_4ba_mode = false; } else return ARM_DRIVER_ERROR; return status; } /* Returns the minimum number of bits needed to represent the given address. * FIXME: use mind-blowing implementation. */ static uint32_t address_to_bits(uint32_t addr) { unsigned int lzb = 0; while (((1 << (31 - lzb)) & ~addr) != 0) lzb++; return 32 - lzb; } unsigned int at45db_convert_addr(unsigned int addr, unsigned int page_size) { unsigned int page_bits = address_to_bits(page_size - 1); unsigned int at45db_addr = ((addr / page_size) << page_bits) | (addr % page_size); return at45db_addr; } /************************************************END************************************************/
629994.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2010 DENX Software Engineering * * Anatolij Gustschin, <[email protected]> * * PDM360NG board setup */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/ipic.h> #include "mpc512x.h" #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) #include <linux/interrupt.h> #include <linux/spi/ads7846.h> #include <linux/spi/spi.h> #include <linux/notifier.h> static void *pdm360ng_gpio_base; static int pdm360ng_get_pendown_state(void) { u32 reg; reg = in_be32(pdm360ng_gpio_base + 0xc); if (reg & 0x40) setbits32(pdm360ng_gpio_base + 0xc, 0x40); reg = in_be32(pdm360ng_gpio_base + 0x8); /* return 1 if pen is down */ return (reg & 0x40) == 0; } static struct ads7846_platform_data pdm360ng_ads7846_pdata = { .model = 7845, .get_pendown_state = pdm360ng_get_pendown_state, .irq_flags = IRQF_TRIGGER_LOW, }; static int __init pdm360ng_penirq_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-gpio"); if (!np) { pr_err("%s: Can't find 'mpc5121-gpio' node\n", __func__); return -ENODEV; } pdm360ng_gpio_base = of_iomap(np, 0); of_node_put(np); if (!pdm360ng_gpio_base) { pr_err("%s: Can't map gpio regs.\n", __func__); return -ENODEV; } out_be32(pdm360ng_gpio_base + 0xc, 0xffffffff); setbits32(pdm360ng_gpio_base + 0x18, 0x2000); setbits32(pdm360ng_gpio_base + 0x10, 0x40); return 0; } static int pdm360ng_touchscreen_notifier_call(struct notifier_block *nb, unsigned long event, void *__dev) { struct device *dev = __dev; if ((event == BUS_NOTIFY_ADD_DEVICE) && of_device_is_compatible(dev->of_node, "ti,ads7846")) { dev->platform_data = &pdm360ng_ads7846_pdata; return NOTIFY_OK; } return NOTIFY_DONE; } static struct notifier_block pdm360ng_touchscreen_nb = { .notifier_call = pdm360ng_touchscreen_notifier_call, }; static void __init pdm360ng_touchscreen_init(void) { if (pdm360ng_penirq_init()) return; bus_register_notifier(&spi_bus_type, &pdm360ng_touchscreen_nb); } #else static inline void __init pdm360ng_touchscreen_init(void) { } #endif /* CONFIG_TOUCHSCREEN_ADS7846 */ void __init pdm360ng_init(void) { mpc512x_init(); pdm360ng_touchscreen_init(); } static int __init pdm360ng_probe(void) { if (!of_machine_is_compatible("ifm,pdm360ng")) return 0; mpc512x_init_early(); return 1; } define_machine(pdm360ng) { .name = "PDM360NG", .probe = pdm360ng_probe, .setup_arch = mpc512x_setup_arch, .init = pdm360ng_init, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, .calibrate_decr = generic_calibrate_decr, .restart = mpc512x_restart, };
328241.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE121_Stack_Based_Buffer_Overflow__dest_char_alloca_cat_31.c Label Definition File: CWE121_Stack_Based_Buffer_Overflow__dest.label.xml Template File: sources-sink-31.tmpl.c */ /* * @description * CWE: 121 Stack Based Buffer Overflow * BadSource: Set data pointer to the bad buffer * GoodSource: Set data pointer to the good buffer * Sinks: cat * BadSink : Copy string to data using strcat * Flow Variant: 31 Data flow using a copy of data within the same function * * */ #include "std_testcase.h" #include <wchar.h> #ifndef OMITBAD void CWE121_Stack_Based_Buffer_Overflow__dest_char_alloca_cat_31_bad() { char * data; char * dataBadBuffer = (char *)ALLOCA(50*sizeof(char)); char * dataGoodBuffer = (char *)ALLOCA(100*sizeof(char)); /* FLAW: Set a pointer to a "small" buffer. This buffer will be used in the sinks as a destination * buffer in various memory copying functions using a "large" source buffer. */ data = dataBadBuffer; data[0] = '\0'; /* null terminate */ { char * dataCopy = data; char * data = dataCopy; { char source[100]; memset(source, 'C', 100-1); /* fill with 'C's */ source[100-1] = '\0'; /* null terminate */ /* POTENTIAL FLAW: Possible buffer overflow if the sizeof(data)-strlen(data) is less than the length of source */ strcat(data, source); printLine(data); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B() uses the GoodSource with the BadSink */ static void goodG2B() { char * data; char * dataBadBuffer = (char *)ALLOCA(50*sizeof(char)); char * dataGoodBuffer = (char *)ALLOCA(100*sizeof(char)); /* FIX: Set a pointer to a "large" buffer, thus avoiding buffer overflows in the sinks. */ data = dataGoodBuffer; data[0] = '\0'; /* null terminate */ { char * dataCopy = data; char * data = dataCopy; { char source[100]; memset(source, 'C', 100-1); /* fill with 'C's */ source[100-1] = '\0'; /* null terminate */ /* POTENTIAL FLAW: Possible buffer overflow if the sizeof(data)-strlen(data) is less than the length of source */ strcat(data, source); printLine(data); } } } void CWE121_Stack_Based_Buffer_Overflow__dest_char_alloca_cat_31_good() { goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE121_Stack_Based_Buffer_Overflow__dest_char_alloca_cat_31_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE121_Stack_Based_Buffer_Overflow__dest_char_alloca_cat_31_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
667718.c
/* * FreeRTOS Kernel V10.1.1 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * http://www.FreeRTOS.org * http://aws.amazon.com/freertos * * 1 tab == 4 spaces! */ /* Standard includes. */ #include <stdint.h> #include <string.h> /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining all the API functions to use the MPU wrappers. That should only be done when task.h is included from an application file. */ #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE /* FreeRTOS includes. */ #include "Arduino_FreeRTOS.h" #include "task.h" #include "stream_buffer.h" #if( configUSE_TASK_NOTIFICATIONS != 1 ) #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c #endif /* Lint e961, e9021 and e750 are suppressed as a MISRA exception justified because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the header files above, but not in this file, in order to generate the correct privileged Vs unprivileged linkage and placement. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ /* If the user has not provided application specific Rx notification macros, or #defined the notification macros away, them provide default implementations that uses task notifications. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overidden. */ #ifndef sbRECEIVE_COMPLETED #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ vTaskSuspendAll(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ ( uint32_t ) 0, \ eNoAction ); \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ ( void ) xTaskResumeAll(); #endif /* sbRECEIVE_COMPLETED */ #ifndef sbRECEIVE_COMPLETED_FROM_ISR #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ { \ UBaseType_t uxSavedInterruptStatus; \ \ uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \ ( uint32_t ) 0, \ eNoAction, \ pxHigherPriorityTaskWoken ); \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ } #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ /* If the user has not provided an application specific Tx notification macro, or #defined the notification macro away, them provide a default implementation that uses task notifications. */ #ifndef sbSEND_COMPLETED #define sbSEND_COMPLETED( pxStreamBuffer ) \ vTaskSuspendAll(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ ( uint32_t ) 0, \ eNoAction ); \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ ( void ) xTaskResumeAll(); #endif /* sbSEND_COMPLETED */ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ { \ UBaseType_t uxSavedInterruptStatus; \ \ uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \ ( uint32_t ) 0, \ eNoAction, \ pxHigherPriorityTaskWoken ); \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ } #endif /* sbSEND_COMPLETE_FROM_ISR */ /*lint -restore (9026) */ /* The number of bytes used to hold the length of a message in the buffer. */ #define sbBYTES_TO_STORE_MESSAGE_LENGTH ( sizeof( configMESSAGE_BUFFER_LENGTH_TYPE ) ) /* Bits stored in the ucFlags field of the stream buffer. */ #define sbFLAGS_IS_MESSAGE_BUFFER ( ( uint8_t ) 1 ) /* Set if the stream buffer was created as a message buffer, in which case it holds discrete messages rather than a stream. */ #define sbFLAGS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 2 ) /* Set if the stream buffer was created using statically allocated memory. */ /*-----------------------------------------------------------*/ /* Structure that hold state information on the buffer. */ typedef struct StreamBufferDef_t /*lint !e9058 Style convention uses tag. */ { volatile size_t xTail; /* Index to the next item to read within the buffer. */ volatile size_t xHead; /* Index to the next item to write within the buffer. */ size_t xLength; /* The length of the buffer pointed to by pucBuffer. */ size_t xTriggerLevelBytes; /* The number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */ volatile TaskHandle_t xTaskWaitingToReceive; /* Holds the handle of a task waiting for data, or NULL if no tasks are waiting. */ volatile TaskHandle_t xTaskWaitingToSend; /* Holds the handle of a task waiting to send data to a message buffer that is full. */ uint8_t *pucBuffer; /* Points to the buffer itself - that is - the RAM that stores the data passed through the buffer. */ uint8_t ucFlags; #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ #endif } StreamBuffer_t; /* * The number of bytes available to be read from the buffer. */ static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; /* * Add xCount bytes from pucData into the pxStreamBuffer message buffer. * Returns the number of bytes written, which will either equal xCount in the * success case, or 0 if there was not enough space in the buffer (in which case * no data is written into the buffer). */ static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) PRIVILEGED_FUNCTION; /* * If the stream buffer is being used as a message buffer, then reads an entire * message out of the buffer. If the stream buffer is being used as a stream * buffer then read as many bytes as possible from the buffer. * prvReadBytesFromBuffer() is called to actually extract the bytes from the * buffer's data storage area. */ static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, size_t xBytesAvailable, size_t xBytesToStoreMessageLength ) PRIVILEGED_FUNCTION; /* * If the stream buffer is being used as a message buffer, then writes an entire * message to the buffer. If the stream buffer is being used as a stream * buffer then write as many bytes as possible to the buffer. * prvWriteBytestoBuffer() is called to actually send the bytes to the buffer's * data storage area. */ static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, const void * pvTxData, size_t xDataLengthBytes, size_t xSpace, size_t xRequiredSpace ) PRIVILEGED_FUNCTION; /* * Read xMaxCount bytes from the pxStreamBuffer message buffer and write them * to pucData. */ static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable ) PRIVILEGED_FUNCTION; /* * Called by both pxStreamBufferCreate() and pxStreamBufferCreateStatic() to * initialise the members of the newly created stream buffer structure. */ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, uint8_t * const pucBuffer, size_t xBufferSizeBytes, size_t xTriggerLevelBytes, uint8_t ucFlags ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) { uint8_t *pucAllocatedMemory; uint8_t ucFlags; /* In case the stream buffer is going to be used as a message buffer (that is, it will hold discrete messages with a little meta data that says how big the next message is) check the buffer will be large enough to hold at least one message. */ if( xIsMessageBuffer == pdTRUE ) { /* Is a message buffer but not statically allocated. */ ucFlags = sbFLAGS_IS_MESSAGE_BUFFER; configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); } else { /* Not a message buffer and not statically allocated. */ ucFlags = 0; configASSERT( xBufferSizeBytes > 0 ); } configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); /* A trigger level of 0 would cause a waiting task to unblock even when the buffer was empty. */ if( xTriggerLevelBytes == ( size_t ) 0 ) { xTriggerLevelBytes = ( size_t ) 1; } /* A stream buffer requires a StreamBuffer_t structure and a buffer. Both are allocated in a single call to pvPortMalloc(). The StreamBuffer_t structure is placed at the start of the allocated memory and the buffer follows immediately after. The requested size is incremented so the free space is returned as the user would expect - this is a quirk of the implementation that means otherwise the free space would be reported as one byte smaller than would be logically expected. */ xBufferSizeBytes++; pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ if( pucAllocatedMemory != NULL ) { prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ xBufferSizeBytes, xTriggerLevelBytes, ucFlags ); traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); } else { traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); } return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ /*-----------------------------------------------------------*/ #if( configSUPPORT_STATIC_ALLOCATION == 1 ) StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer ) { StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */ StreamBufferHandle_t xReturn; uint8_t ucFlags; configASSERT( pucStreamBufferStorageArea ); configASSERT( pxStaticStreamBuffer ); configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); /* A trigger level of 0 would cause a waiting task to unblock even when the buffer was empty. */ if( xTriggerLevelBytes == ( size_t ) 0 ) { xTriggerLevelBytes = ( size_t ) 1; } if( xIsMessageBuffer != pdFALSE ) { /* Statically allocated message buffer. */ ucFlags = sbFLAGS_IS_MESSAGE_BUFFER | sbFLAGS_IS_STATICALLY_ALLOCATED; } else { /* Statically allocated stream buffer. */ ucFlags = sbFLAGS_IS_STATICALLY_ALLOCATED; } /* In case the stream buffer is going to be used as a message buffer (that is, it will hold discrete messages with a little meta data that says how big the next message is) check the buffer will be large enough to hold at least one message. */ configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); #if( configASSERT_DEFINED == 1 ) { /* Sanity check that the size of the structure used to declare a variable of type StaticStreamBuffer_t equals the size of the real message buffer structure. */ volatile size_t xSize = sizeof( StaticStreamBuffer_t ); configASSERT( xSize == sizeof( StreamBuffer_t ) ); } /*lint !e529 xSize is referenced is configASSERT() is defined. */ #endif /* configASSERT_DEFINED */ if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) { prvInitialiseNewStreamBuffer( pxStreamBuffer, pucStreamBufferStorageArea, xBufferSizeBytes, xTriggerLevelBytes, ucFlags ); /* Remember this was statically allocated in case it is ever deleted again. */ pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ } else { xReturn = NULL; traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); } return xReturn; } #endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ /*-----------------------------------------------------------*/ void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) { StreamBuffer_t * pxStreamBuffer = xStreamBuffer; configASSERT( pxStreamBuffer ); traceSTREAM_BUFFER_DELETE( xStreamBuffer ); if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) pdFALSE ) { #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) { /* Both the structure and the buffer were allocated using a single call to pvPortMalloc(), hence only one call to vPortFree() is required. */ vPortFree( ( void * ) pxStreamBuffer ); /*lint !e9087 Standard free() semantics require void *, plus pxStreamBuffer was allocated by pvPortMalloc(). */ } #else { /* Should not be possible to get here, ucFlags must be corrupt. Force an assert. */ configASSERT( xStreamBuffer == ( StreamBufferHandle_t ) ~0 ); } #endif } else { /* The structure and buffer were not allocated dynamically and cannot be freed - just scrub the structure so future use will assert. */ ( void ) memset( pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); } } /*-----------------------------------------------------------*/ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn = pdFAIL; #if( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxStreamBufferNumber; #endif configASSERT( pxStreamBuffer ); #if( configUSE_TRACE_FACILITY == 1 ) { /* Store the stream buffer number so it can be restored after the reset. */ uxStreamBufferNumber = pxStreamBuffer->uxStreamBufferNumber; } #endif /* Can only reset a message buffer if there are no tasks blocked on it. */ taskENTER_CRITICAL(); { if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) { if( pxStreamBuffer->xTaskWaitingToSend == NULL ) { prvInitialiseNewStreamBuffer( pxStreamBuffer, pxStreamBuffer->pucBuffer, pxStreamBuffer->xLength, pxStreamBuffer->xTriggerLevelBytes, pxStreamBuffer->ucFlags ); xReturn = pdPASS; #if( configUSE_TRACE_FACILITY == 1 ) { pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; } #endif traceSTREAM_BUFFER_RESET( xStreamBuffer ); } } } taskEXIT_CRITICAL(); return xReturn; } /*-----------------------------------------------------------*/ BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; configASSERT( pxStreamBuffer ); /* It is not valid for the trigger level to be 0. */ if( xTriggerLevel == ( size_t ) 0 ) { xTriggerLevel = ( size_t ) 1; } /* The trigger level is the number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */ if( xTriggerLevel <= pxStreamBuffer->xLength ) { pxStreamBuffer->xTriggerLevelBytes = xTriggerLevel; xReturn = pdPASS; } else { xReturn = pdFALSE; } return xReturn; } /*-----------------------------------------------------------*/ size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) { const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xSpace; configASSERT( pxStreamBuffer ); xSpace = pxStreamBuffer->xLength + pxStreamBuffer->xTail; xSpace -= pxStreamBuffer->xHead; xSpace -= ( size_t ) 1; if( xSpace >= pxStreamBuffer->xLength ) { xSpace -= pxStreamBuffer->xLength; } else { mtCOVERAGE_TEST_MARKER(); } return xSpace; } /*-----------------------------------------------------------*/ size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) { const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xReturn; configASSERT( pxStreamBuffer ); xReturn = prvBytesInBuffer( pxStreamBuffer ); return xReturn; } /*-----------------------------------------------------------*/ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, TickType_t xTicksToWait ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xReturn, xSpace = 0; size_t xRequiredSpace = xDataLengthBytes; TimeOut_t xTimeOut; configASSERT( pvTxData ); configASSERT( pxStreamBuffer ); /* This send function is used to write to both message buffers and stream buffers. If this is a message buffer then the space needed must be increased by the amount of bytes needed to store the length of the message. */ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) { xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; /* Overflow? */ configASSERT( xRequiredSpace > xDataLengthBytes ); } else { mtCOVERAGE_TEST_MARKER(); } if( xTicksToWait != ( TickType_t ) 0 ) { vTaskSetTimeOutState( &xTimeOut ); do { /* Wait until the required number of bytes are free in the message buffer. */ taskENTER_CRITICAL(); { xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); if( xSpace < xRequiredSpace ) { /* Clear notification state as going to wait for space. */ ( void ) xTaskNotifyStateClear( NULL ); /* Should only be one writer. */ configASSERT( pxStreamBuffer->xTaskWaitingToSend == NULL ); pxStreamBuffer->xTaskWaitingToSend = xTaskGetCurrentTaskHandle(); } else { taskEXIT_CRITICAL(); break; } } taskEXIT_CRITICAL(); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); pxStreamBuffer->xTaskWaitingToSend = NULL; } while( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ); } else { mtCOVERAGE_TEST_MARKER(); } if( xSpace == ( size_t ) 0 ) { xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); } else { mtCOVERAGE_TEST_MARKER(); } xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); if( xReturn > ( size_t ) 0 ) { traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ); /* Was a task waiting for the data? */ if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) { sbSEND_COMPLETED( pxStreamBuffer ); } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ); } return xReturn; } /*-----------------------------------------------------------*/ size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, BaseType_t * const pxHigherPriorityTaskWoken ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xReturn, xSpace; size_t xRequiredSpace = xDataLengthBytes; configASSERT( pvTxData ); configASSERT( pxStreamBuffer ); /* This send function is used to write to both message buffers and stream buffers. If this is a message buffer then the space needed must be increased by the amount of bytes needed to store the length of the message. */ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) { xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; } else { mtCOVERAGE_TEST_MARKER(); } xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); if( xReturn > ( size_t ) 0 ) { /* Was a task waiting for the data? */ if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) { sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ); return xReturn; } /*-----------------------------------------------------------*/ static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, const void * pvTxData, size_t xDataLengthBytes, size_t xSpace, size_t xRequiredSpace ) { BaseType_t xShouldWrite; size_t xReturn; if( xSpace == ( size_t ) 0 ) { /* Doesn't matter if this is a stream buffer or a message buffer, there is no space to write. */ xShouldWrite = pdFALSE; } else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 ) { /* This is a stream buffer, as opposed to a message buffer, so writing a stream of bytes rather than discrete messages. Write as many bytes as possible. */ xShouldWrite = pdTRUE; xDataLengthBytes = configMIN( xDataLengthBytes, xSpace ); } else if( xSpace >= xRequiredSpace ) { /* This is a message buffer, as opposed to a stream buffer, and there is enough space to write both the message length and the message itself into the buffer. Start by writing the length of the data, the data itself will be written later in this function. */ xShouldWrite = pdTRUE; ( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH ); } else { /* There is space available, but not enough space. */ xShouldWrite = pdFALSE; } if( xShouldWrite != pdFALSE ) { /* Writes the data itself. */ xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alighment and access. */ } else { xReturn = 0; } return xReturn; } /*-----------------------------------------------------------*/ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; configASSERT( pvRxData ); configASSERT( pxStreamBuffer ); /* This receive function is used by both message buffers, which store discrete messages, and stream buffers, which store a continuous stream of bytes. Discrete messages include an additional sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) { xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; } else { xBytesToStoreMessageLength = 0; } if( xTicksToWait != ( TickType_t ) 0 ) { /* Checking if there is data and clearing the notification state must be performed atomically. */ taskENTER_CRITICAL(); { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); /* If this function was invoked by a message buffer read then xBytesToStoreMessageLength holds the number of bytes used to hold the length of the next discrete message. If this function was invoked by a stream buffer read then xBytesToStoreMessageLength will be 0. */ if( xBytesAvailable <= xBytesToStoreMessageLength ) { /* Clear notification state as going to wait for data. */ ( void ) xTaskNotifyStateClear( NULL ); /* Should only be one reader. */ configASSERT( pxStreamBuffer->xTaskWaitingToReceive == NULL ); pxStreamBuffer->xTaskWaitingToReceive = xTaskGetCurrentTaskHandle(); } else { mtCOVERAGE_TEST_MARKER(); } } taskEXIT_CRITICAL(); if( xBytesAvailable <= xBytesToStoreMessageLength ) { /* Wait for data to be available. */ traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); pxStreamBuffer->xTaskWaitingToReceive = NULL; /* Recheck the data available after blocking. */ xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); } else { mtCOVERAGE_TEST_MARKER(); } } else { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); } /* Whether receiving a discrete message (where xBytesToStoreMessageLength holds the number of bytes used to store the message length) or a stream of bytes (where xBytesToStoreMessageLength is zero), the number of bytes available must be greater than xBytesToStoreMessageLength to be able to read bytes from the buffer. */ if( xBytesAvailable > xBytesToStoreMessageLength ) { xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); /* Was a task waiting for space in the buffer? */ if( xReceivedLength != ( size_t ) 0 ) { traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ); sbRECEIVE_COMPLETED( pxStreamBuffer ); } else { mtCOVERAGE_TEST_MARKER(); } } else { traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ); mtCOVERAGE_TEST_MARKER(); } return xReceivedLength; } /*-----------------------------------------------------------*/ size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xReturn, xBytesAvailable, xOriginalTail; configMESSAGE_BUFFER_LENGTH_TYPE xTempReturn; configASSERT( pxStreamBuffer ); /* Ensure the stream buffer is being used as a message buffer. */ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); if( xBytesAvailable > sbBYTES_TO_STORE_MESSAGE_LENGTH ) { /* The number of bytes available is greater than the number of bytes required to hold the length of the next message, so another message is available. Return its length without removing the length bytes from the buffer. A copy of the tail is stored so the buffer can be returned to its prior state as the message is not actually being removed from the buffer. */ xOriginalTail = pxStreamBuffer->xTail; ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempReturn, sbBYTES_TO_STORE_MESSAGE_LENGTH, xBytesAvailable ); xReturn = ( size_t ) xTempReturn; pxStreamBuffer->xTail = xOriginalTail; } else { /* The minimum amount of bytes in a message buffer is ( sbBYTES_TO_STORE_MESSAGE_LENGTH + 1 ), so if xBytesAvailable is less than sbBYTES_TO_STORE_MESSAGE_LENGTH the only other valid value is 0. */ configASSERT( xBytesAvailable == 0 ); xReturn = 0; } } else { xReturn = 0; } return xReturn; } /*-----------------------------------------------------------*/ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, BaseType_t * const pxHigherPriorityTaskWoken ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; configASSERT( pvRxData ); configASSERT( pxStreamBuffer ); /* This receive function is used by both message buffers, which store discrete messages, and stream buffers, which store a continuous stream of bytes. Discrete messages include an additional sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) { xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; } else { xBytesToStoreMessageLength = 0; } xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); /* Whether receiving a discrete message (where xBytesToStoreMessageLength holds the number of bytes used to store the message length) or a stream of bytes (where xBytesToStoreMessageLength is zero), the number of bytes available must be greater than xBytesToStoreMessageLength to be able to read bytes from the buffer. */ if( xBytesAvailable > xBytesToStoreMessageLength ) { xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); /* Was a task waiting for space in the buffer? */ if( xReceivedLength != ( size_t ) 0 ) { sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ); return xReceivedLength; } /*-----------------------------------------------------------*/ static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, size_t xBytesAvailable, size_t xBytesToStoreMessageLength ) { size_t xOriginalTail, xReceivedLength, xNextMessageLength; configMESSAGE_BUFFER_LENGTH_TYPE xTempNextMessageLength; if( xBytesToStoreMessageLength != ( size_t ) 0 ) { /* A discrete message is being received. First receive the length of the message. A copy of the tail is stored so the buffer can be returned to its prior state if the length of the message is too large for the provided buffer. */ xOriginalTail = pxStreamBuffer->xTail; ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempNextMessageLength, xBytesToStoreMessageLength, xBytesAvailable ); xNextMessageLength = ( size_t ) xTempNextMessageLength; /* Reduce the number of bytes available by the number of bytes just read out. */ xBytesAvailable -= xBytesToStoreMessageLength; /* Check there is enough space in the buffer provided by the user. */ if( xNextMessageLength > xBufferLengthBytes ) { /* The user has provided insufficient space to read the message so return the buffer to its previous state (so the length of the message is in the buffer again). */ pxStreamBuffer->xTail = xOriginalTail; xNextMessageLength = 0; } else { mtCOVERAGE_TEST_MARKER(); } } else { /* A stream of bytes is being received (as opposed to a discrete message), so read as many bytes as possible. */ xNextMessageLength = xBufferLengthBytes; } /* Read the actual data. */ xReceivedLength = prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) pvRxData, xNextMessageLength, xBytesAvailable ); /*lint !e9079 Data storage area is implemented as uint8_t array for ease of sizing, indexing and alignment. */ return xReceivedLength; } /*-----------------------------------------------------------*/ BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) { const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; size_t xTail; configASSERT( pxStreamBuffer ); /* True if no bytes are available. */ xTail = pxStreamBuffer->xTail; if( pxStreamBuffer->xHead == xTail ) { xReturn = pdTRUE; } else { xReturn = pdFALSE; } return xReturn; } /*-----------------------------------------------------------*/ BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) { BaseType_t xReturn; size_t xBytesToStoreMessageLength; const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; configASSERT( pxStreamBuffer ); /* This generic version of the receive function is used by both message buffers, which store discrete messages, and stream buffers, which store a continuous stream of bytes. Discrete messages include an additional sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) { xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; } else { xBytesToStoreMessageLength = 0; } /* True if the available space equals zero. */ if( xStreamBufferSpacesAvailable( xStreamBuffer ) <= xBytesToStoreMessageLength ) { xReturn = pdTRUE; } else { xReturn = pdFALSE; } return xReturn; } /*-----------------------------------------------------------*/ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, ( uint32_t ) 0, eNoAction, pxHigherPriorityTaskWoken ); ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; xReturn = pdTRUE; } else { xReturn = pdFALSE; } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } /*-----------------------------------------------------------*/ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, ( uint32_t ) 0, eNoAction, pxHigherPriorityTaskWoken ); ( pxStreamBuffer )->xTaskWaitingToSend = NULL; xReturn = pdTRUE; } else { xReturn = pdFALSE; } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } /*-----------------------------------------------------------*/ static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) { size_t xNextHead, xFirstLength; configASSERT( xCount > ( size_t ) 0 ); xNextHead = pxStreamBuffer->xHead; /* Calculate the number of bytes that can be added in the first write - which may be less than the total number of bytes that need to be added if the buffer will wrap back to the beginning. */ xFirstLength = configMIN( pxStreamBuffer->xLength - xNextHead, xCount ); /* Write as many bytes as can be written in the first write. */ configASSERT( ( xNextHead + xFirstLength ) <= pxStreamBuffer->xLength ); ( void ) memcpy( ( void* ) ( &( pxStreamBuffer->pucBuffer[ xNextHead ] ) ), ( const void * ) pucData, xFirstLength ); /*lint !e9087 memcpy() requires void *. */ /* If the number of bytes written was less than the number that could be written in the first write... */ if( xCount > xFirstLength ) { /* ...then write the remaining bytes to the start of the buffer. */ configASSERT( ( xCount - xFirstLength ) <= pxStreamBuffer->xLength ); ( void ) memcpy( ( void * ) pxStreamBuffer->pucBuffer, ( const void * ) &( pucData[ xFirstLength ] ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ } else { mtCOVERAGE_TEST_MARKER(); } xNextHead += xCount; if( xNextHead >= pxStreamBuffer->xLength ) { xNextHead -= pxStreamBuffer->xLength; } else { mtCOVERAGE_TEST_MARKER(); } pxStreamBuffer->xHead = xNextHead; return xCount; } /*-----------------------------------------------------------*/ static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable ) { size_t xCount, xFirstLength, xNextTail; /* Use the minimum of the wanted bytes and the available bytes. */ xCount = configMIN( xBytesAvailable, xMaxCount ); if( xCount > ( size_t ) 0 ) { xNextTail = pxStreamBuffer->xTail; /* Calculate the number of bytes that can be read - which may be less than the number wanted if the data wraps around to the start of the buffer. */ xFirstLength = configMIN( pxStreamBuffer->xLength - xNextTail, xCount ); /* Obtain the number of bytes it is possible to obtain in the first read. Asserts check bounds of read and write. */ configASSERT( xFirstLength <= xMaxCount ); configASSERT( ( xNextTail + xFirstLength ) <= pxStreamBuffer->xLength ); ( void ) memcpy( ( void * ) pucData, ( const void * ) &( pxStreamBuffer->pucBuffer[ xNextTail ] ), xFirstLength ); /*lint !e9087 memcpy() requires void *. */ /* If the total number of wanted bytes is greater than the number that could be read in the first read... */ if( xCount > xFirstLength ) { /*...then read the remaining bytes from the start of the buffer. */ configASSERT( xCount <= xMaxCount ); ( void ) memcpy( ( void * ) &( pucData[ xFirstLength ] ), ( void * ) ( pxStreamBuffer->pucBuffer ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ } else { mtCOVERAGE_TEST_MARKER(); } /* Move the tail pointer to effectively remove the data read from the buffer. */ xNextTail += xCount; if( xNextTail >= pxStreamBuffer->xLength ) { xNextTail -= pxStreamBuffer->xLength; } pxStreamBuffer->xTail = xNextTail; } else { mtCOVERAGE_TEST_MARKER(); } return xCount; } /*-----------------------------------------------------------*/ static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) { /* Returns the distance between xTail and xHead. */ size_t xCount; xCount = pxStreamBuffer->xLength + pxStreamBuffer->xHead; xCount -= pxStreamBuffer->xTail; if ( xCount >= pxStreamBuffer->xLength ) { xCount -= pxStreamBuffer->xLength; } else { mtCOVERAGE_TEST_MARKER(); } return xCount; } /*-----------------------------------------------------------*/ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, uint8_t * const pucBuffer, size_t xBufferSizeBytes, size_t xTriggerLevelBytes, uint8_t ucFlags ) { /* Assert here is deliberately writing to the entire buffer to ensure it can be written to without generating exceptions, and is setting the buffer to a known value to assist in development/debugging. */ #if( configASSERT_DEFINED == 1 ) { /* The value written just has to be identifiable when looking at the memory. Don't use 0xA5 as that is the stack fill value and could result in confusion as to what is actually being observed. */ const BaseType_t xWriteValue = 0x55; configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ #endif ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ pxStreamBuffer->pucBuffer = pucBuffer; pxStreamBuffer->xLength = xBufferSizeBytes; pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; pxStreamBuffer->ucFlags = ucFlags; } #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) { return xStreamBuffer->uxStreamBufferNumber; } #endif /* configUSE_TRACE_FACILITY */ /*-----------------------------------------------------------*/ #if ( configUSE_TRACE_FACILITY == 1 ) void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) { xStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; } #endif /* configUSE_TRACE_FACILITY */ /*-----------------------------------------------------------*/ #if ( configUSE_TRACE_FACILITY == 1 ) uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) { return ( xStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ); } #endif /* configUSE_TRACE_FACILITY */ /*-----------------------------------------------------------*/
168013.c
//------------------------------------------------------------------------------ // GB_AxB__bor_bxor_uint32.c: matrix multiply for a single semiring //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated1/ or Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB_dev.h" #ifndef GBCOMPACT #include "GB.h" #include "GB_control.h" #include "GB_bracket.h" #include "GB_sort.h" #include "GB_atomics.h" #include "GB_AxB_saxpy.h" #if 1 #include "GB_AxB__include2.h" #else #include "GB_AxB__include1.h" #endif #include "GB_unused.h" #include "GB_bitmap_assign_methods.h" #include "GB_ek_slice_search.c" // This C=A*B semiring is defined by the following types and operators: // A'*B (dot2): GB (_Adot2B__bor_bxor_uint32) // A'*B (dot3): GB (_Adot3B__bor_bxor_uint32) // C+=A'*B (dot4): GB (_Adot4B__bor_bxor_uint32) // A*B (saxpy bitmap): GB (_AsaxbitB__bor_bxor_uint32) // A*B (saxpy3): GB (_Asaxpy3B__bor_bxor_uint32) // A*B (saxpy4): GB (_Asaxpy4B__bor_bxor_uint32) // no mask: GB (_Asaxpy3B_noM__bor_bxor_uint32) // mask M: GB (_Asaxpy3B_M__bor_bxor_uint32) // mask !M: GB (_Asaxpy3B_notM__bor_bxor_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // Multiply: z = (aik ^ bkj) // Add: cij |= z // 'any' monoid? 0 // atomic? 1 // OpenMP atomic? 1 // MultAdd: uint32_t x_op_y = (aik ^ bkj) ; cij |= x_op_y // Identity: 0 // Terminal: if (cij == 0xFFFFFFFF) { break ; } #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t #define GB_ASIZE \ sizeof (uint32_t) #define GB_BSIZE \ sizeof (uint32_t) #define GB_CSIZE \ sizeof (uint32_t) // true for int64, uint64, float, double, float complex, and double complex #define GB_CTYPE_IGNORE_OVERFLOW \ 0 // aik = Ax [pA] #define GB_GETA(aik,Ax,pA,A_iso) \ uint32_t aik = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bkj = Bx [pB] #define GB_GETB(bkj,Bx,pB,B_iso) \ uint32_t bkj = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // Gx [pG] = Ax [pA] #define GB_LOADA(Gx,pG,Ax,pA,A_iso) \ Gx [pG] = GBX (Ax, pA, A_iso) // Gx [pG] = Bx [pB] #define GB_LOADB(Gx,pG,Bx,pB,B_iso) \ Gx [pG] = GBX (Bx, pB, B_iso) #define GB_CX(p) \ Cx [p] // multiply operator #define GB_MULT(z, x, y, i, k, j) \ z = (x ^ y) // cast from a real scalar (or 2, if C is complex) to the type of C #define GB_CTYPE_CAST(x,y) \ ((uint32_t) x) // cast from a real scalar (or 2, if A is complex) to the type of A #define GB_ATYPE_CAST(x,y) \ ((uint32_t) x) // multiply-add #define GB_MULTADD(z, x, y, i, k, j) \ uint32_t x_op_y = (x ^ y) ; z |= x_op_y // monoid identity value #define GB_IDENTITY \ 0 // 1 if the identity value can be assigned via memset, with all bytes the same #define GB_HAS_IDENTITY_BYTE \ 1 // identity byte, for memset #define GB_IDENTITY_BYTE \ 0 // break if cij reaches the terminal value (dot product only) #define GB_DOT_TERMINAL(cij) \ if (cij == 0xFFFFFFFF) { break ; } // simd pragma for dot-product loop vectorization #define GB_PRAGMA_SIMD_DOT(cij) \ ; // simd pragma for other loop vectorization #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // 1 for the PLUS_PAIR_(real) semirings, not for the complex case #define GB_IS_PLUS_PAIR_REAL_SEMIRING \ 0 // declare the cij scalar (initialize cij to zero for PLUS_PAIR) #define GB_CIJ_DECLARE(cij) \ uint32_t cij // cij = Cx [pC] for dot4 method only #define GB_GET4C(cij,p) \ cij = (C_in_iso) ? cinput : Cx [p] // Cx [pC] = cij #define GB_PUTC(cij,p) \ Cx [p] = cij // Cx [p] = t #define GB_CIJ_WRITE(p,t) \ Cx [p] = t // C(i,j) += t #define GB_CIJ_UPDATE(p,t) \ Cx [p] |= t // x + y #define GB_ADD_FUNCTION(x,y) \ (x | y) // bit pattern for bool, 8-bit, 16-bit, and 32-bit integers #define GB_CTYPE_BITS \ 0xffffffffL // 1 if monoid update can skipped entirely (the ANY monoid) #define GB_IS_ANY_MONOID \ 0 // 1 if monoid update is EQ #define GB_IS_EQ_MONOID \ 0 // 1 if monoid update can be done atomically, 0 otherwise #define GB_HAS_ATOMIC \ 1 // 1 if monoid update can be done with an OpenMP atomic update, 0 otherwise #if GB_MICROSOFT #define GB_HAS_OMP_ATOMIC \ 0 #else #define GB_HAS_OMP_ATOMIC \ 1 #endif // 1 for the ANY_PAIR_ISO semiring #define GB_IS_ANY_PAIR_SEMIRING \ 0 // 1 if PAIR is the multiply operator #define GB_IS_PAIR_MULTIPLIER \ 0 // 1 if monoid is PLUS_FC32 #define GB_IS_PLUS_FC32_MONOID \ 0 // 1 if monoid is PLUS_FC64 #define GB_IS_PLUS_FC64_MONOID \ 0 // 1 if monoid is ANY_FC32 #define GB_IS_ANY_FC32_MONOID \ 0 // 1 if monoid is ANY_FC64 #define GB_IS_ANY_FC64_MONOID \ 0 // 1 if monoid is MIN for signed or unsigned integers #define GB_IS_IMIN_MONOID \ 0 // 1 if monoid is MAX for signed or unsigned integers #define GB_IS_IMAX_MONOID \ 0 // 1 if monoid is MIN for float or double #define GB_IS_FMIN_MONOID \ 0 // 1 if monoid is MAX for float or double #define GB_IS_FMAX_MONOID \ 0 // 1 for the FIRSTI or FIRSTI1 multiply operator #define GB_IS_FIRSTI_MULTIPLIER \ 0 // 1 for the FIRSTJ or FIRSTJ1 multiply operator #define GB_IS_FIRSTJ_MULTIPLIER \ 0 // 1 for the SECONDJ or SECONDJ1 multiply operator #define GB_IS_SECONDJ_MULTIPLIER \ 0 // atomic compare-exchange #define GB_ATOMIC_COMPARE_EXCHANGE(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_32 (target, expected, desired) // Hx [i] = t #define GB_HX_WRITE(i,t) \ Hx [i] = t // Cx [p] = Hx [i] #define GB_CIJ_GATHER(p,i) \ Cx [p] = Hx [i] // Cx [p] += Hx [i] #define GB_CIJ_GATHER_UPDATE(p,i) \ Cx [p] |= Hx [i] // Hx [i] += t #define GB_HX_UPDATE(i,t) \ Hx [i] |= t // memcpy (&(Cx [p]), &(Hx [i]), len) #define GB_CIJ_MEMCPY(p,i,len) \ memcpy (Cx +(p), Hx +(i), (len) * sizeof(uint32_t)); // disable this semiring and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BOR_UINT32 || GxB_NO_BXOR_UINT32 || GxB_NO_BOR_BXOR_UINT32) //------------------------------------------------------------------------------ // GB_Adot2B: C=A'*B, C<M>=A'*B, or C<!M>=A'*B: dot product method, C is bitmap //------------------------------------------------------------------------------ // if A_not_transposed is true, then C=A*B is computed where A is bitmap or full GrB_Info GB (_Adot2B__bor_bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const bool A_not_transposed, const GrB_Matrix A, int64_t *restrict A_slice, const GrB_Matrix B, int64_t *restrict B_slice, int nthreads, int naslice, int nbslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot2_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // GB_Adot3B: C<M>=A'*B: masked dot product, C is sparse or hyper //------------------------------------------------------------------------------ GrB_Info GB (_Adot3B__bor_bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const GB_task_struct *restrict TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot3_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // GB_Adot4B: C+=A'*B: dense dot product (not used for ANY_PAIR_ISO) //------------------------------------------------------------------------------ #if 1 GrB_Info GB (_Adot4B__bor_bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict A_slice, int naslice, const GrB_Matrix B, int64_t *restrict B_slice, int nbslice, const int nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot4_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // GB_AsaxbitB: C=A*B, C<M>=A*B, C<!M>=A*B: saxpy method, C is bitmap/full //------------------------------------------------------------------------------ #include "GB_AxB_saxpy3_template.h" GrB_Info GB (_AsaxbitB__bor_bxor_uint32) ( GrB_Matrix C, // bitmap or full const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_AxB_saxpy_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // GB_Asaxpy4B: C += A*B when C is full //------------------------------------------------------------------------------ #if 1 GrB_Info GB (_Asaxpy4B__bor_bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int ntasks, const int nthreads, const int nfine_tasks_per_vector, const bool use_coarse_tasks, const bool use_atomics, const int64_t *A_slice, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_saxpy4_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // GB_Asaxpy3B: C=A*B, C<M>=A*B, C<!M>=A*B: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ GrB_Info GB (_Asaxpy3B__bor_bxor_uint32) ( GrB_Matrix C, // C<any M>=A*B, C sparse or hypersparse const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const bool M_in_place, const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; if (M == NULL) { // C = A*B, no mask return (GB (_Asaxpy3B_noM__bor_bxor_uint32) (C, A, B, SaxpyTasks, ntasks, nfine, nthreads, do_sort, Context)) ; } else if (!Mask_comp) { // C<M> = A*B return (GB (_Asaxpy3B_M__bor_bxor_uint32) (C, M, Mask_struct, M_in_place, A, B, SaxpyTasks, ntasks, nfine, nthreads, do_sort, Context)) ; } else { // C<!M> = A*B return (GB (_Asaxpy3B_notM__bor_bxor_uint32) (C, M, Mask_struct, M_in_place, A, B, SaxpyTasks, ntasks, nfine, nthreads, do_sort, Context)) ; } #endif } //------------------------------------------------------------------------------ // GB_Asaxpy3B_M: C<M>=A*Bi: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ #if ( !GB_DISABLE ) GrB_Info GB (_Asaxpy3B_M__bor_bxor_uint32) ( GrB_Matrix C, // C<M>=A*B, C sparse or hypersparse const GrB_Matrix M, const bool Mask_struct, const bool M_in_place, const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { if (GB_IS_SPARSE (A) && GB_IS_SPARSE (B)) { // both A and B are sparse #define GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 0 #define GB_A_IS_SPARSE 1 #define GB_A_IS_HYPER 0 #define GB_A_IS_BITMAP 0 #define GB_A_IS_FULL 0 #define GB_B_IS_SPARSE 1 #define GB_B_IS_HYPER 0 #define GB_B_IS_BITMAP 0 #define GB_B_IS_FULL 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } else { // general case #undef GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } return (GrB_SUCCESS) ; } #endif //------------------------------------------------------------------------------ //GB_Asaxpy3B_noM: C=A*B: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ #if ( !GB_DISABLE ) GrB_Info GB (_Asaxpy3B_noM__bor_bxor_uint32) ( GrB_Matrix C, // C=A*B, C sparse or hypersparse const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { if (GB_IS_SPARSE (A) && GB_IS_SPARSE (B)) { // both A and B are sparse #define GB_META16 #define GB_NO_MASK 1 #define GB_MASK_COMP 0 #define GB_A_IS_SPARSE 1 #define GB_A_IS_HYPER 0 #define GB_A_IS_BITMAP 0 #define GB_A_IS_FULL 0 #define GB_B_IS_SPARSE 1 #define GB_B_IS_HYPER 0 #define GB_B_IS_BITMAP 0 #define GB_B_IS_FULL 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } else { // general case #undef GB_META16 #define GB_NO_MASK 1 #define GB_MASK_COMP 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } return (GrB_SUCCESS) ; } #endif //------------------------------------------------------------------------------ //GB_Asaxpy3B_notM: C<!M>=A*B: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ #if ( !GB_DISABLE ) GrB_Info GB (_Asaxpy3B_notM__bor_bxor_uint32) ( GrB_Matrix C, // C<!M>=A*B, C sparse or hypersparse const GrB_Matrix M, const bool Mask_struct, const bool M_in_place, const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { if (GB_IS_SPARSE (A) && GB_IS_SPARSE (B)) { // both A and B are sparse #define GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 1 #define GB_A_IS_SPARSE 1 #define GB_A_IS_HYPER 0 #define GB_A_IS_BITMAP 0 #define GB_A_IS_FULL 0 #define GB_B_IS_SPARSE 1 #define GB_B_IS_HYPER 0 #define GB_B_IS_BITMAP 0 #define GB_B_IS_FULL 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } else { // general case #undef GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 1 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } return (GrB_SUCCESS) ; } #endif #endif
249876.c
/** * @file * lwIP network interface abstraction * * @defgroup netif Network interface (NETIF) * @ingroup callbackstyle_api * * @defgroup netif_ip4 IPv4 address handling * @ingroup netif * * @defgroup netif_ip6 IPv6 address handling * @ingroup netif * * @defgroup netif_cd Client data handling * Store data (void*) on a netif for application usage. * @see @ref LWIP_NUM_NETIF_CLIENT_DATA * @ingroup netif */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <[email protected]> */ #include "lwip/opt.h" #include <string.h> #include "lwip/def.h" #include "lwip/ip_addr.h" #include "lwip/ip6_addr.h" #include "lwip/netif.h" #include "lwip/priv/tcp_priv.h" #include "lwip/udp.h" #include "lwip/raw.h" #include "lwip/snmp.h" #include "lwip/igmp.h" #include "lwip/etharp.h" #include "lwip/stats.h" #include "lwip/sys.h" #include "lwip/ip.h" #if ENABLE_LOOPBACK #if LWIP_NETIF_LOOPBACK_MULTITHREADING #include "lwip/tcpip.h" #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ #endif /* ENABLE_LOOPBACK */ #include "netif/ethernet.h" #if LWIP_AUTOIP #include "lwip/autoip.h" #endif /* LWIP_AUTOIP */ #if LWIP_DHCP #include "lwip/dhcp.h" #endif /* LWIP_DHCP */ #if LWIP_IPV6_DHCP6 #include "lwip/dhcp6.h" #endif /* LWIP_IPV6_DHCP6 */ #if LWIP_IPV6_MLD #include "lwip/mld6.h" #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6 #include "lwip/nd6.h" #endif #if LWIP_NETIF_STATUS_CALLBACK #define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) #else #define NETIF_STATUS_CALLBACK(n) #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_LINK_CALLBACK #define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) #else #define NETIF_LINK_CALLBACK(n) #endif /* LWIP_NETIF_LINK_CALLBACK */ struct netif *netif_list; struct netif *netif_default; static u8_t netif_num; #if LWIP_NUM_NETIF_CLIENT_DATA > 0 static u8_t netif_client_id; #endif #define NETIF_REPORT_TYPE_IPV4 0x01 #define NETIF_REPORT_TYPE_IPV6 0x02 static void netif_issue_reports(struct netif* netif, u8_t report_type); #if LWIP_IPV6 static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); #endif /* LWIP_IPV6 */ #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr); #endif #if LWIP_IPV6 static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr); #endif static struct netif loop_netif; /** * Initialize a lwip network interface structure for a loopback interface * * @param netif the lwip network interface structure for this loopif * @return ERR_OK if the loopif is initialized * ERR_MEM if private data couldn't be allocated */ static err_t netif_loopif_init(struct netif *netif) { /* initialize the snmp variables and counters inside the struct netif * ifSpeed: no assumption can be made! */ MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); netif->name[0] = 'l'; netif->name[1] = 'o'; #if LWIP_IPV4 netif->output = netif_loop_output_ipv4; #endif #if LWIP_IPV6 netif->output_ip6 = netif_loop_output_ipv6; #endif #if LWIP_LOOPIF_MULTICAST netif->flags |= NETIF_FLAG_IGMP; #endif return ERR_OK; } #endif /* LWIP_HAVE_LOOPIF */ void netif_init(void) { #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 #define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; IP4_ADDR(&loop_gw, 127,0,0,1); IP4_ADDR(&loop_ipaddr, 127,0,0,1); IP4_ADDR(&loop_netmask, 255,0,0,0); #else /* LWIP_IPV4 */ #define LOOPIF_ADDRINIT #endif /* LWIP_IPV4 */ #if NO_SYS netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); #else /* NO_SYS */ netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); #endif /* NO_SYS */ #if LWIP_IPV6 IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; #endif /* LWIP_IPV6 */ netif_set_link_up(&loop_netif); netif_set_up(&loop_netif); #endif /* LWIP_HAVE_LOOPIF */ } /** * @ingroup lwip_nosys * Forwards a received packet for input processing with * ethernet_input() or ip_input() depending on netif flags. * Don't call directly, pass to netif_add() and call * netif->input(). * Only works if the netif driver correctly sets * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! */ err_t netif_input(struct pbuf *p, struct netif *inp) { #if LWIP_ETHERNET if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { return ethernet_input(p, inp); } else #endif /* LWIP_ETHERNET */ return ip_input(p, inp); } /** * @ingroup netif * Add a network interface to the list of lwIP netifs. * * @param netif a pre-allocated netif structure * @param ipaddr IP address for the new netif * @param netmask network mask for the new netif * @param gw default gateway IP address for the new netif * @param state opaque data passed to the new netif * @param init callback function that initializes the interface * @param input callback function that is called to pass * ingress packets up in the protocol layer stack.\n * It is recommended to use a function that passes the input directly * to the stack (netif_input(), NO_SYS=1 mode) or via sending a * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET * to decide whether to forward to ethernet_input() or ip_input(). * In other words, the functions only work when the netif * driver is implemented correctly!\n * Most members of struct netif should be be initialized by the * netif init function = netif driver (init parameter of this function).\n * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after * setting the MAC address in struct netif.hwaddr * (IPv6 requires a link-local address). * * @return netif, or NULL if failed. */ struct netif * netif_add(struct netif *netif, #if LWIP_IPV4 const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, #endif /* LWIP_IPV4 */ void *state, netif_init_fn init, netif_input_fn input) { #if LWIP_IPV6 s8_t i; #endif LWIP_ASSERT("No init function given", init != NULL); /* reset new interface configuration state */ #if LWIP_IPV4 ip_addr_set_zero_ip4(&netif->ip_addr); ip_addr_set_zero_ip4(&netif->netmask); ip_addr_set_zero_ip4(&netif->gw); #endif /* LWIP_IPV4 */ #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { ip_addr_set_zero_ip6(&netif->ip6_addr[i]); netif->ip6_addr_state[i] = IP6_ADDR_INVALID; } netif->output_ip6 = netif_null_output_ip6; #endif /* LWIP_IPV6 */ NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); netif->flags = 0; #ifdef netif_get_client_data memset(netif->client_data, 0, sizeof(netif->client_data)); #endif /* LWIP_NUM_NETIF_CLIENT_DATA */ #if LWIP_IPV6_AUTOCONFIG /* IPv6 address autoconfiguration not enabled by default */ netif->ip6_autoconfig_enabled = 0; #endif /* LWIP_IPV6_AUTOCONFIG */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ #if LWIP_NETIF_STATUS_CALLBACK netif->status_callback = NULL; #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_LINK_CALLBACK netif->link_callback = NULL; #endif /* LWIP_NETIF_LINK_CALLBACK */ #if LWIP_IGMP netif->igmp_mac_filter = NULL; #endif /* LWIP_IGMP */ #if LWIP_IPV6 && LWIP_IPV6_MLD netif->mld_mac_filter = NULL; #endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ #if ENABLE_LOOPBACK netif->loop_first = NULL; netif->loop_last = NULL; #endif /* ENABLE_LOOPBACK */ /* remember netif specific state information data */ netif->state = state; netif->num = netif_num++; netif->input = input; NETIF_SET_HWADDRHINT(netif, NULL); #if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS netif->loop_cnt_current = 0; #endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ #if LWIP_IPV4 netif_set_addr(netif, ipaddr, netmask, gw); #endif /* LWIP_IPV4 */ /* call user specified initialization function for netif */ if (init(netif) != ERR_OK) { return NULL; } /* add this netif to the list */ netif->next = netif_list; netif_list = netif; mib2_netif_added(netif); #if LWIP_IGMP /* start IGMP processing */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_start(netif); } #endif /* LWIP_IGMP */ LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", netif->name[0], netif->name[1])); #if LWIP_IPV4 LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); ip4_addr_debug_print(NETIF_DEBUG, ipaddr); LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); ip4_addr_debug_print(NETIF_DEBUG, netmask); LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); ip4_addr_debug_print(NETIF_DEBUG, gw); #endif /* LWIP_IPV4 */ LWIP_DEBUGF(NETIF_DEBUG, ("\n")); return netif; } #if LWIP_IPV4 /** * @ingroup netif_ip4 * Change IP address configuration for a network interface (including netmask * and default gateway). * * @param netif the network interface to change * @param ipaddr the new IP address * @param netmask the new netmask * @param gw the new default gateway */ void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw) { if (ip4_addr_isany(ipaddr)) { /* when removing an address, we have to remove it *before* changing netmask/gw to ensure that tcp RST segment can be sent correctly */ netif_set_ipaddr(netif, ipaddr); netif_set_netmask(netif, netmask); netif_set_gw(netif, gw); } else { netif_set_netmask(netif, netmask); netif_set_gw(netif, gw); /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ netif_set_ipaddr(netif, ipaddr); } } #endif /* LWIP_IPV4*/ /** * @ingroup netif * Remove a network interface from the list of lwIP netifs. * * @param netif the network interface to remove */ void netif_remove(struct netif *netif) { #if LWIP_IPV6 int i; #endif if (netif == NULL) { return; } #if LWIP_IPV4 if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_RAW */ } #if LWIP_IGMP /* stop IGMP processing */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_stop(netif); } #endif /* LWIP_IGMP */ #endif /* LWIP_IPV4*/ #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_RAW */ } } #if LWIP_IPV6_MLD /* stop MLD processing */ mld6_stop(netif); #endif /* LWIP_IPV6_MLD */ #endif /* LWIP_IPV6 */ if (netif_is_up(netif)) { /* set netif down before removing (call callback function) */ netif_set_down(netif); } mib2_remove_ip4(netif); /* this netif is default? */ if (netif_default == netif) { /* reset default netif */ netif_set_default(NULL); } /* is it the first netif? */ if (netif_list == netif) { netif_list = netif->next; } else { /* look for netif further down the list */ struct netif * tmp_netif; for (tmp_netif = netif_list; tmp_netif != NULL; tmp_netif = tmp_netif->next) { if (tmp_netif->next == netif) { tmp_netif->next = netif->next; break; } } if (tmp_netif == NULL) { return; /* netif is not on the list */ } } mib2_netif_removed(netif); #if LWIP_NETIF_REMOVE_CALLBACK if (netif->remove_callback) { netif->remove_callback(netif); } #endif /* LWIP_NETIF_REMOVE_CALLBACK */ LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); } /** * @ingroup netif * Find a network interface by searching for its name * * @param name the name of the netif (like netif->name) plus concatenated number * in ascii representation (e.g. 'en0') */ struct netif * netif_find(const char *name) { struct netif *netif; u8_t num; if (name == NULL) { return NULL; } num = (u8_t)(name[2] - '0'); for (netif = netif_list; netif != NULL; netif = netif->next) { if (num == netif->num && name[0] == netif->name[0] && name[1] == netif->name[1]) { LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); return netif; } } LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); return NULL; } #if LWIP_IPV4 /** * @ingroup netif_ip4 * Change the IP address of a network interface * * @param netif the network interface to change * @param ipaddr the new IP address * * @note call netif_set_addr() if you also want to change netmask and * default gateway */ void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) { ip_addr_t new_addr; *ip_2_ip4(&new_addr) = (ipaddr ? *ipaddr : *IP4_ADDR_ANY4); IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); /* address is actually being changed? */ if (ip4_addr_cmp(ip_2_ip4(&new_addr), netif_ip4_addr(netif)) == 0) { LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_RAW */ mib2_remove_ip4(netif); mib2_remove_route_ip4(0, netif); /* set new IP address to netif */ ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); mib2_add_ip4(netif); mib2_add_route_ip4(0, netif); netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); NETIF_STATUS_CALLBACK(netif); } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IP address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_addr(netif)), ip4_addr2_16(netif_ip4_addr(netif)), ip4_addr3_16(netif_ip4_addr(netif)), ip4_addr4_16(netif_ip4_addr(netif)))); } /** * @ingroup netif_ip4 * Change the default gateway for a network interface * * @param netif the network interface to change * @param gw the new default gateway * * @note call netif_set_addr() if you also want to change ip address and netmask */ void netif_set_gw(struct netif *netif, const ip4_addr_t *gw) { ip4_addr_set(ip_2_ip4(&netif->gw), gw); IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_gw(netif)), ip4_addr2_16(netif_ip4_gw(netif)), ip4_addr3_16(netif_ip4_gw(netif)), ip4_addr4_16(netif_ip4_gw(netif)))); } /** * @ingroup netif_ip4 * Change the netmask of a network interface * * @param netif the network interface to change * @param netmask the new netmask * * @note call netif_set_addr() if you also want to change ip address and * default gateway */ void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) { mib2_remove_route_ip4(0, netif); /* set new netmask to netif */ ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); mib2_add_route_ip4(0, netif); LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_netmask(netif)), ip4_addr2_16(netif_ip4_netmask(netif)), ip4_addr3_16(netif_ip4_netmask(netif)), ip4_addr4_16(netif_ip4_netmask(netif)))); } #endif /* LWIP_IPV4 */ /** * @ingroup netif * Set a network interface as the default network interface * (used to output all packets for which no specific route is found) * * @param netif the default network interface */ void netif_set_default(struct netif *netif) { if (netif == NULL) { /* remove default route */ mib2_remove_route_ip4(1, netif); } else { /* install default route */ mib2_add_route_ip4(1, netif); } netif_default = netif; LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); } /** * @ingroup netif * Bring an interface up, available for processing * traffic. */ void netif_set_up(struct netif *netif) { if (!(netif->flags & NETIF_FLAG_UP)) { netif->flags |= NETIF_FLAG_UP; MIB2_COPY_SYSUPTIME_TO(&netif->ts); NETIF_STATUS_CALLBACK(netif); if (netif->flags & NETIF_FLAG_LINK_UP) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); } } } /** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change */ static void netif_issue_reports(struct netif* netif, u8_t report_type) { #if LWIP_IPV4 if ((report_type & NETIF_REPORT_TYPE_IPV4) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { #if LWIP_ARP /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ if (netif->flags & (NETIF_FLAG_ETHARP)) { etharp_gratuitous(netif); } #endif /* LWIP_ARP */ #if LWIP_IGMP /* resend IGMP memberships */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_report_groups(netif); } #endif /* LWIP_IGMP */ } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 if (report_type & NETIF_REPORT_TYPE_IPV6) { #if LWIP_IPV6_MLD /* send mld memberships */ mld6_report_groups(netif); #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT /* Send Router Solicitation messages. */ netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ } #endif /* LWIP_IPV6 */ } /** * @ingroup netif * Bring an interface down, disabling any traffic processing. */ void netif_set_down(struct netif *netif) { if (netif->flags & NETIF_FLAG_UP) { netif->flags &= ~NETIF_FLAG_UP; MIB2_COPY_SYSUPTIME_TO(&netif->ts); #if LWIP_IPV4 && LWIP_ARP if (netif->flags & NETIF_FLAG_ETHARP) { etharp_cleanup_netif(netif); } #endif /* LWIP_IPV4 && LWIP_ARP */ #if LWIP_IPV6 nd6_cleanup_netif(netif); #endif /* LWIP_IPV6 */ NETIF_STATUS_CALLBACK(netif); } } #if LWIP_NETIF_STATUS_CALLBACK /** * @ingroup netif * Set callback to be called when interface is brought up/down or address is changed while up */ void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) { if (netif) { netif->status_callback = status_callback; } } #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_REMOVE_CALLBACK /** * @ingroup netif * Set callback to be called when the interface has been removed */ void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) { if (netif) { netif->remove_callback = remove_callback; } } #endif /* LWIP_NETIF_REMOVE_CALLBACK */ /** * @ingroup netif * Called by a driver when its link goes up */ void netif_set_link_up(struct netif *netif) { if (!(netif->flags & NETIF_FLAG_LINK_UP)) { netif->flags |= NETIF_FLAG_LINK_UP; #if LWIP_DHCP dhcp_network_changed(netif); #endif /* LWIP_DHCP */ #if LWIP_AUTOIP autoip_network_changed(netif); #endif /* LWIP_AUTOIP */ if (netif->flags & NETIF_FLAG_UP) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); } NETIF_LINK_CALLBACK(netif); } } /** * @ingroup netif * Called by a driver when its link goes down */ void netif_set_link_down(struct netif *netif ) { if (netif->flags & NETIF_FLAG_LINK_UP) { netif->flags &= ~NETIF_FLAG_LINK_UP; NETIF_LINK_CALLBACK(netif); } } #if LWIP_NETIF_LINK_CALLBACK /** * @ingroup netif * Set callback to be called when link is brought up/down */ void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) { if (netif) { netif->link_callback = link_callback; } } #endif /* LWIP_NETIF_LINK_CALLBACK */ #if ENABLE_LOOPBACK /** * @ingroup netif * Send an IP packet to be received on the same netif (loopif-like). * The pbuf is simply copied and handed back to netif->input. * In multithreaded mode, this is done directly since netif->input must put * the packet on a queue. * In callback mode, the packet is put on an internal queue and is fed to * netif->input by netif_poll(). * * @param netif the lwip network interface structure * @param p the (IP) packet to 'send' * @return ERR_OK if the packet has been sent * ERR_MEM if the pbuf used to copy the packet couldn't be allocated */ err_t netif_loop_output(struct netif *netif, struct pbuf *p) { struct pbuf *r; err_t err; struct pbuf *last; #if LWIP_LOOPBACK_MAX_PBUFS u16_t clen = 0; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if MIB2_STATS #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* MIB2_STATS */ SYS_ARCH_DECL_PROTECT(lev); /* Allocate a new pbuf */ r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); if (r == NULL) { LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return ERR_MEM; } #if LWIP_LOOPBACK_MAX_PBUFS clen = pbuf_clen(r); /* check for overflow or too many pbuf on queue */ if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return ERR_MEM; } netif->loop_cnt_current += clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* Copy the whole pbuf queue p into the single pbuf r */ if ((err = pbuf_copy(r, p)) != ERR_OK) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return err; } /* Put the packet on a linked list which gets emptied through calling netif_poll(). */ /* let last point to the last pbuf in chain r */ for (last = r; last->next != NULL; last = last->next); SYS_ARCH_PROTECT(lev); if (netif->loop_first != NULL) { LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); netif->loop_last->next = r; netif->loop_last = last; } else { netif->loop_first = r; netif->loop_last = last; } SYS_ARCH_UNPROTECT(lev); LINK_STATS_INC(link.xmit); MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); #if LWIP_NETIF_LOOPBACK_MULTITHREADING /* For multithreading environment, schedule a call to netif_poll */ tcpip_callback_with_block((tcpip_callback_fn)netif_poll, netif, 0); #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ return ERR_OK; } #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr) { LWIP_UNUSED_ARG(addr); return netif_loop_output(netif, p); } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr) { LWIP_UNUSED_ARG(addr); return netif_loop_output(netif, p); } #endif /* LWIP_IPV6 */ #endif /* LWIP_HAVE_LOOPIF */ /** * Call netif_poll() in the main loop of your application. This is to prevent * reentering non-reentrant functions like tcp_input(). Packets passed to * netif_loop_output() are put on a list that is passed to netif->input() by * netif_poll(). */ void netif_poll(struct netif *netif) { /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if MIB2_STATS #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* MIB2_STATS */ SYS_ARCH_DECL_PROTECT(lev); /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ SYS_ARCH_PROTECT(lev); while (netif->loop_first != NULL) { struct pbuf *in, *in_end; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = 1; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ in = in_end = netif->loop_first; while (in_end->len != in_end->tot_len) { LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); in_end = in_end->next; #if LWIP_LOOPBACK_MAX_PBUFS clen++; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ } #if LWIP_LOOPBACK_MAX_PBUFS /* adjust the number of pbufs on queue */ LWIP_ASSERT("netif->loop_cnt_current underflow", ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); netif->loop_cnt_current -= clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* 'in_end' now points to the last pbuf from 'in' */ if (in_end == netif->loop_last) { /* this was the last pbuf in the list */ netif->loop_first = netif->loop_last = NULL; } else { /* pop the pbuf off the list */ netif->loop_first = in_end->next; LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); } /* De-queue the pbuf from its successors on the 'loop_' list. */ in_end->next = NULL; SYS_ARCH_UNPROTECT(lev); LINK_STATS_INC(link.recv); MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); /* loopback packets are always IP packets! */ if (ip_input(in, netif) != ERR_OK) { pbuf_free(in); } SYS_ARCH_PROTECT(lev); } SYS_ARCH_UNPROTECT(lev); } #if !LWIP_NETIF_LOOPBACK_MULTITHREADING /** * Calls netif_poll() for every netif on the netif_list. */ void netif_poll_all(void) { struct netif *netif = netif_list; /* loop through netifs */ while (netif != NULL) { netif_poll(netif); /* proceed to next network interface */ netif = netif->next; } } #endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ #endif /* ENABLE_LOOPBACK */ #if LWIP_NUM_NETIF_CLIENT_DATA > 0 /** * @ingroup netif_cd * Allocate an index to store data in client_data member of struct netif. * Returned value is an index in mentioned array. * @see LWIP_NUM_NETIF_CLIENT_DATA */ u8_t netif_alloc_client_data_id(void) { u8_t result = netif_client_id; netif_client_id++; LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); return result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX; } #endif #if LWIP_IPV6 /** * @ingroup netif_ip6 * Change an IPv6 address of a network interface * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param addr6 the new IPv6 address * * @note call netif_ip6_addr_set_state() to set the address valid/temptative */ void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) { LWIP_ASSERT("addr6 != NULL", addr6 != NULL); netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], addr6->addr[2], addr6->addr[3]); } /* * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param i0 word0 of the new IPv6 address * @param i1 word1 of the new IPv6 address * @param i2 word2 of the new IPv6 address * @param i3 word3 of the new IPv6 address */ void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) { const ip6_addr_t *old_addr; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); old_addr = netif_ip6_addr(netif, addr_idx); /* address is actually being changed? */ if ((old_addr->addr[0] != i0) || (old_addr->addr[1] != i1) || (old_addr->addr[2] != i2) || (old_addr->addr[3] != i3)) { LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { #if LWIP_TCP || LWIP_UDP ip_addr_t new_ipaddr; IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); #endif /* LWIP_TCP || LWIP_UDP */ #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_RAW */ } /* @todo: remove/readd mib2 ip6 entries? */ IP6_ADDR(ip_2_ip6(&(netif->ip6_addr[addr_idx])), i0, i1, i2, i3); IP_SET_TYPE_VAL(netif->ip6_addr[addr_idx], IPADDR_TYPE_V6); if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); NETIF_STATUS_CALLBACK(netif); } } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), netif_ip6_addr_state(netif, addr_idx))); } /** * @ingroup netif_ip6 * Change the state of an IPv6 address of a network interface * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE * includes the number of checks done, see ip6_addr.h) * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param state the new IPv6 address state */ void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state) { u8_t old_state; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); old_state = netif_ip6_addr_state(netif, addr_idx); /* state is actually being changed? */ if (old_state != state) { u8_t old_valid = old_state & IP6_ADDR_VALID; u8_t new_valid = state & IP6_ADDR_VALID; LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); #if LWIP_IPV6_MLD /* Reevaluate solicited-node multicast group membership. */ if (netif->flags & NETIF_FLAG_MLD6) { nd6_adjust_mld_membership(netif, addr_idx, state); } #endif /* LWIP_IPV6_MLD */ if (old_valid && !new_valid) { /* address about to be removed by setting invalid */ #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_RAW */ /* @todo: remove mib2 ip6 entries? */ } netif->ip6_addr_state[addr_idx] = state; if (!old_valid && new_valid) { /* address added by setting valid */ /* @todo: add mib2 ip6 entries? */ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); } if ((old_state & IP6_ADDR_PREFERRED) != (state & IP6_ADDR_PREFERRED)) { /* address state has changed (valid flag changed or switched between preferred and deprecated) -> call the callback function */ NETIF_STATUS_CALLBACK(netif); } } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), netif_ip6_addr_state(netif, addr_idx))); } /** * Checks if a specific address is assigned to the netif and returns its * index. * * @param netif the netif to check * @param ip6addr the IPv6 address to find * @return >= 0: address found, this is its index * -1: address not found on this netif */ s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) { s8_t i; for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(netif_ip6_addr(netif, i), ip6addr)) { return i; } } return -1; } /** * @ingroup netif_ip6 * Create a link-local IPv6 address on a netif (stored in slot 0) * * @param netif the netif to create the address on * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) * if == 0, use hwaddr directly as interface ID */ void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) { u8_t i, addr_index; /* Link-local prefix. */ ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; /* Generate interface ID. */ if (from_mac_48bit) { /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | ((u32_t)(netif->hwaddr[1]) << 16) | ((u32_t)(netif->hwaddr[2]) << 8) | (0xff)); ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((0xfeul << 24) | ((u32_t)(netif->hwaddr[3]) << 16) | ((u32_t)(netif->hwaddr[4]) << 8) | (netif->hwaddr[5])); } else { /* Use hwaddr directly as interface ID. */ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; addr_index = 3; for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { if (i == 4) { addr_index--; } ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= ((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03)); } } /* Set address state. */ #if LWIP_IPV6_DUP_DETECT_ATTEMPTS /* Will perform duplicate address detection (DAD). */ netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); #else /* Consider address valid. */ netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); #endif /* LWIP_IPV6_AUTOCONFIG */ } /** * @ingroup netif_ip6 * This function allows for the easy addition of a new IPv6 address to an interface. * It takes care of finding an empty slot and then sets the address tentative * (to make sure that all the subsequent processing happens). * * @param netif netif to add the address on * @param ip6addr address to add * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here */ err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) { s8_t i; i = netif_get_ip6_addr_match(netif, ip6addr); if (i >= 0) { /* Address already added */ if (chosen_idx != NULL) { *chosen_idx = i; } return ERR_OK; } /* Find a free slot -- musn't be the first one (reserved for link local) */ for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); if (chosen_idx != NULL) { *chosen_idx = i; } return ERR_OK; } } if (chosen_idx != NULL) { *chosen_idx = -1; } return ERR_VAL; } /** Dummy IPv6 output function for netifs not supporting IPv6 */ static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) { LWIP_UNUSED_ARG(netif); LWIP_UNUSED_ARG(p); LWIP_UNUSED_ARG(ipaddr); return ERR_IF; } #endif /* LWIP_IPV6 */
340803.c
/** ****************************************************************************** * @file stm32f4xx_hal_dsi.c * @author MCD Application Team * @brief DSI HAL module driver. * This file provides firmware functions to manage the following * functionalities of the DSI peripheral: * + Initialization and de-initialization functions * + IO operation functions * + Peripheral Control functions * + Peripheral State and Errors functions ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2017 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" /** @addtogroup STM32F4xx_HAL_Driver * @{ */ #ifdef HAL_DSI_MODULE_ENABLED #if defined(DSI) /** @addtogroup DSI * @{ */ /* Private types -------------------------------------------------------------*/ /* Private defines -----------------------------------------------------------*/ /** @addtogroup DSI_Private_Constants * @{ */ #define DSI_TIMEOUT_VALUE ((uint32_t)1000U) /* 1s */ #define DSI_ERROR_ACK_MASK (DSI_ISR0_AE0 | DSI_ISR0_AE1 | DSI_ISR0_AE2 | DSI_ISR0_AE3 | \ DSI_ISR0_AE4 | DSI_ISR0_AE5 | DSI_ISR0_AE6 | DSI_ISR0_AE7 | \ DSI_ISR0_AE8 | DSI_ISR0_AE9 | DSI_ISR0_AE10 | DSI_ISR0_AE11 | \ DSI_ISR0_AE12 | DSI_ISR0_AE13 | DSI_ISR0_AE14 | DSI_ISR0_AE15) #define DSI_ERROR_PHY_MASK (DSI_ISR0_PE0 | DSI_ISR0_PE1 | DSI_ISR0_PE2 | DSI_ISR0_PE3 | DSI_ISR0_PE4) #define DSI_ERROR_TX_MASK DSI_ISR1_TOHSTX #define DSI_ERROR_RX_MASK DSI_ISR1_TOLPRX #define DSI_ERROR_ECC_MASK (DSI_ISR1_ECCSE | DSI_ISR1_ECCME) #define DSI_ERROR_CRC_MASK DSI_ISR1_CRCE #define DSI_ERROR_PSE_MASK DSI_ISR1_PSE #define DSI_ERROR_EOT_MASK DSI_ISR1_EOTPE #define DSI_ERROR_OVF_MASK DSI_ISR1_LPWRE #define DSI_ERROR_GEN_MASK (DSI_ISR1_GCWRE | DSI_ISR1_GPWRE | DSI_ISR1_GPTXE | DSI_ISR1_GPRDE | DSI_ISR1_GPRXE) /** * @} */ /* Private variables ---------------------------------------------------------*/ /* Private constants ---------------------------------------------------------*/ /* Private macros ------------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ static void DSI_ConfigPacketHeader(DSI_TypeDef *DSIx, uint32_t ChannelID, uint32_t DataType, uint32_t Data0, uint32_t Data1); /* Private functions ---------------------------------------------------------*/ /** * @brief Generic DSI packet header configuration * @param DSIx Pointer to DSI register base * @param ChannelID Virtual channel ID of the header packet * @param DataType Packet data type of the header packet * This parameter can be any value of : * @ref DSI_SHORT_WRITE_PKT_Data_Type * or @ref DSI_LONG_WRITE_PKT_Data_Type * or @ref DSI_SHORT_READ_PKT_Data_Type * or DSI_MAX_RETURN_PKT_SIZE * @param Data0 Word count LSB * @param Data1 Word count MSB * @retval None */ static void DSI_ConfigPacketHeader(DSI_TypeDef *DSIx, uint32_t ChannelID, uint32_t DataType, uint32_t Data0, uint32_t Data1) { /* Update the DSI packet header with new information */ DSIx->GHCR = (DataType | (ChannelID << 6U) | (Data0 << 8U) | (Data1 << 16U)); } /* Exported functions --------------------------------------------------------*/ /** @addtogroup DSI_Exported_Functions * @{ */ /** @defgroup DSI_Group1 Initialization and Configuration functions * @brief Initialization and Configuration functions * @verbatim =============================================================================== ##### Initialization and Configuration functions ##### =============================================================================== [..] This section provides functions allowing to: (+) Initialize and configure the DSI (+) De-initialize the DSI @endverbatim * @{ */ /** * @brief Initializes the DSI according to the specified * parameters in the DSI_InitTypeDef and create the associated handle. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param PLLInit pointer to a DSI_PLLInitTypeDef structure that contains * the PLL Clock structure definition for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_Init(DSI_HandleTypeDef *hdsi, DSI_PLLInitTypeDef *PLLInit) { uint32_t tickstart; uint32_t unitIntervalx4; uint32_t tempIDF; /* Check the DSI handle allocation */ if (hdsi == NULL) { return HAL_ERROR; } /* Check function parameters */ assert_param(IS_DSI_PLL_NDIV(PLLInit->PLLNDIV)); assert_param(IS_DSI_PLL_IDF(PLLInit->PLLIDF)); assert_param(IS_DSI_PLL_ODF(PLLInit->PLLODF)); assert_param(IS_DSI_AUTO_CLKLANE_CONTROL(hdsi->Init.AutomaticClockLaneControl)); assert_param(IS_DSI_NUMBER_OF_LANES(hdsi->Init.NumberOfLanes)); if (hdsi->State == HAL_DSI_STATE_RESET) { /* Initialize the low level hardware */ HAL_DSI_MspInit(hdsi); } /* Change DSI peripheral state */ hdsi->State = HAL_DSI_STATE_BUSY; /**************** Turn on the regulator and enable the DSI PLL ****************/ /* Enable the regulator */ __HAL_DSI_REG_ENABLE(hdsi); /* Get tick */ tickstart = HAL_GetTick(); /* Wait until the regulator is ready */ while (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_RRS) == RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Set the PLL division factors */ hdsi->Instance->WRPCR &= ~(DSI_WRPCR_PLL_NDIV | DSI_WRPCR_PLL_IDF | DSI_WRPCR_PLL_ODF); hdsi->Instance->WRPCR |= (((PLLInit->PLLNDIV) << 2U) | ((PLLInit->PLLIDF) << 11U) | ((PLLInit->PLLODF) << 16U)); /* Enable the DSI PLL */ __HAL_DSI_PLL_ENABLE(hdsi); /* Get tick */ tickstart = HAL_GetTick(); /* Wait for the lock of the PLL */ while (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_PLLLS) == RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /*************************** Set the PHY parameters ***************************/ /* D-PHY clock and digital enable*/ hdsi->Instance->PCTLR |= (DSI_PCTLR_CKE | DSI_PCTLR_DEN); /* Clock lane configuration */ hdsi->Instance->CLCR &= ~(DSI_CLCR_DPCC | DSI_CLCR_ACR); hdsi->Instance->CLCR |= (DSI_CLCR_DPCC | hdsi->Init.AutomaticClockLaneControl); /* Configure the number of active data lanes */ hdsi->Instance->PCONFR &= ~DSI_PCONFR_NL; hdsi->Instance->PCONFR |= hdsi->Init.NumberOfLanes; /************************ Set the DSI clock parameters ************************/ /* Set the TX escape clock division factor */ hdsi->Instance->CCR &= ~DSI_CCR_TXECKDIV; hdsi->Instance->CCR |= hdsi->Init.TXEscapeCkdiv; /* Calculate the bit period in high-speed mode in unit of 0.25 ns (UIX4) */ /* The equation is : UIX4 = IntegerPart( (1000/F_PHY_Mhz) * 4 ) */ /* Where : F_PHY_Mhz = (NDIV * HSE_Mhz) / (IDF * ODF) */ tempIDF = (PLLInit->PLLIDF > 0U) ? PLLInit->PLLIDF : 1U; unitIntervalx4 = (4000000U * tempIDF * (1U << PLLInit->PLLODF)) / ((HSE_VALUE / 1000U) * PLLInit->PLLNDIV); /* Set the bit period in high-speed mode */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_UIX4; hdsi->Instance->WPCR[0U] |= unitIntervalx4; /****************************** Error management *****************************/ /* Disable all error interrupts and reset the Error Mask */ hdsi->Instance->IER[0U] = 0U; hdsi->Instance->IER[1U] = 0U; hdsi->ErrorMsk = 0U; /* Initialise the error code */ hdsi->ErrorCode = HAL_DSI_ERROR_NONE; /* Initialize the DSI state*/ hdsi->State = HAL_DSI_STATE_READY; return HAL_OK; } /** * @brief De-initializes the DSI peripheral registers to their default reset * values. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_DeInit(DSI_HandleTypeDef *hdsi) { /* Check the DSI handle allocation */ if (hdsi == NULL) { return HAL_ERROR; } /* Change DSI peripheral state */ hdsi->State = HAL_DSI_STATE_BUSY; /* Disable the DSI wrapper */ __HAL_DSI_WRAPPER_DISABLE(hdsi); /* Disable the DSI host */ __HAL_DSI_DISABLE(hdsi); /* D-PHY clock and digital disable */ hdsi->Instance->PCTLR &= ~(DSI_PCTLR_CKE | DSI_PCTLR_DEN); /* Turn off the DSI PLL */ __HAL_DSI_PLL_DISABLE(hdsi); /* Disable the regulator */ __HAL_DSI_REG_DISABLE(hdsi); /* DeInit the low level hardware */ HAL_DSI_MspDeInit(hdsi); /* Initialise the error code */ hdsi->ErrorCode = HAL_DSI_ERROR_NONE; /* Initialize the DSI state*/ hdsi->State = HAL_DSI_STATE_RESET; /* Release Lock */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Enable the error monitor flags * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param ActiveErrors indicates which error interrupts will be enabled. * This parameter can be any combination of @ref DSI_Error_Data_Type. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigErrorMonitor(DSI_HandleTypeDef *hdsi, uint32_t ActiveErrors) { /* Process locked */ __HAL_LOCK(hdsi); hdsi->Instance->IER[0U] = 0U; hdsi->Instance->IER[1U] = 0U; /* Store active errors to the handle */ hdsi->ErrorMsk = ActiveErrors; if ((ActiveErrors & HAL_DSI_ERROR_ACK) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[0U] |= DSI_ERROR_ACK_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_PHY) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[0U] |= DSI_ERROR_PHY_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_TX) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_TX_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_RX) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_RX_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_ECC) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_ECC_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_CRC) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_CRC_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_PSE) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_PSE_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_EOT) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_EOT_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_OVF) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_OVF_MASK; } if ((ActiveErrors & HAL_DSI_ERROR_GEN) != RESET) { /* Enable the interrupt generation on selected errors */ hdsi->Instance->IER[1U] |= DSI_ERROR_GEN_MASK; } /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Initializes the DSI MSP. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval None */ __weak void HAL_DSI_MspInit(DSI_HandleTypeDef* hdsi) { /* Prevent unused argument(s) compilation warning */ UNUSED(hdsi); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_DSI_MspInit could be implemented in the user file */ } /** * @brief De-initializes the DSI MSP. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval None */ __weak void HAL_DSI_MspDeInit(DSI_HandleTypeDef* hdsi) { /* Prevent unused argument(s) compilation warning */ UNUSED(hdsi); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_DSI_MspDeInit could be implemented in the user file */ } /** * @} */ /** @defgroup DSI_Group2 IO operation functions * @brief IO operation functions * @verbatim =============================================================================== ##### IO operation functions ##### =============================================================================== [..] This section provides function allowing to: (+) Handle DSI interrupt request @endverbatim * @{ */ /** * @brief Handles DSI interrupt request. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ void HAL_DSI_IRQHandler(DSI_HandleTypeDef *hdsi) { uint32_t ErrorStatus0, ErrorStatus1; /* Tearing Effect Interrupt management ***************************************/ if (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_TE) != RESET) { if (__HAL_DSI_GET_IT_SOURCE(hdsi, DSI_IT_TE) != RESET) { /* Clear the Tearing Effect Interrupt Flag */ __HAL_DSI_CLEAR_FLAG(hdsi, DSI_FLAG_TE); /* Tearing Effect Callback */ HAL_DSI_TearingEffectCallback(hdsi); } } /* End of Refresh Interrupt management ***************************************/ if (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_ER) != RESET) { if (__HAL_DSI_GET_IT_SOURCE(hdsi, DSI_IT_ER) != RESET) { /* Clear the End of Refresh Interrupt Flag */ __HAL_DSI_CLEAR_FLAG(hdsi, DSI_FLAG_ER); /* End of Refresh Callback */ HAL_DSI_EndOfRefreshCallback(hdsi); } } /* Error Interrupts management ***********************************************/ if (hdsi->ErrorMsk != 0U) { ErrorStatus0 = hdsi->Instance->ISR[0U]; ErrorStatus0 &= hdsi->Instance->IER[0U]; ErrorStatus1 = hdsi->Instance->ISR[1U]; ErrorStatus1 &= hdsi->Instance->IER[1U]; if ((ErrorStatus0 & DSI_ERROR_ACK_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_ACK; } if ((ErrorStatus0 & DSI_ERROR_PHY_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_PHY; } if ((ErrorStatus1 & DSI_ERROR_TX_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_TX; } if ((ErrorStatus1 & DSI_ERROR_RX_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_RX; } if ((ErrorStatus1 & DSI_ERROR_ECC_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_ECC; } if ((ErrorStatus1 & DSI_ERROR_CRC_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_CRC; } if ((ErrorStatus1 & DSI_ERROR_PSE_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_PSE; } if ((ErrorStatus1 & DSI_ERROR_EOT_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_EOT; } if ((ErrorStatus1 & DSI_ERROR_OVF_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_OVF; } if ((ErrorStatus1 & DSI_ERROR_GEN_MASK) != RESET) { hdsi->ErrorCode |= HAL_DSI_ERROR_GEN; } /* Check only selected errors */ if (hdsi->ErrorCode != HAL_DSI_ERROR_NONE) { /* DSI error interrupt user callback */ HAL_DSI_ErrorCallback(hdsi); } } } /** * @brief Tearing Effect DSI callback. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval None */ __weak void HAL_DSI_TearingEffectCallback(DSI_HandleTypeDef *hdsi) { /* Prevent unused argument(s) compilation warning */ UNUSED(hdsi); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_DSI_TearingEffectCallback could be implemented in the user file */ } /** * @brief End of Refresh DSI callback. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval None */ __weak void HAL_DSI_EndOfRefreshCallback(DSI_HandleTypeDef *hdsi) { /* Prevent unused argument(s) compilation warning */ UNUSED(hdsi); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_DSI_EndOfRefreshCallback could be implemented in the user file */ } /** * @brief Operation Error DSI callback. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval None */ __weak void HAL_DSI_ErrorCallback(DSI_HandleTypeDef *hdsi) { /* Prevent unused argument(s) compilation warning */ UNUSED(hdsi); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_DSI_ErrorCallback could be implemented in the user file */ } /** * @} */ /** @defgroup DSI_Group3 Peripheral Control functions * @brief Peripheral Control functions * @verbatim =============================================================================== ##### Peripheral Control functions ##### =============================================================================== [..] This section provides functions allowing to: (+) Configure the Generic interface read-back Virtual Channel ID (+) Select video mode and configure the corresponding parameters (+) Configure command transmission mode: High-speed or Low-power (+) Configure the flow control (+) Configure the DSI PHY timer (+) Configure the DSI HOST timeout (+) Configure the DSI HOST timeout (+) Start/Stop the DSI module (+) Refresh the display in command mode (+) Controls the display color mode in Video mode (+) Control the display shutdown in Video mode (+) write short DCS or short Generic command (+) write long DCS or long Generic command (+) Read command (DCS or generic) (+) Enter/Exit the Ultra Low Power Mode on data only (D-PHY PLL running) (+) Enter/Exit the Ultra Low Power Mode on data only and clock (D-PHY PLL turned off) (+) Start/Stop test pattern generation (+) Slew-Rate And Delay Tuning (+) Low-Power Reception Filter Tuning (+) Activate an additional current path on all lanes to meet the SDDTx parameter (+) Custom lane pins configuration (+) Set custom timing for the PHY (+) Force the Clock/Data Lane in TX Stop Mode (+) Force LP Receiver in Low-Power Mode (+) Force Data Lanes in RX Mode after a BTA (+) Enable a pull-down on the lanes to prevent from floating states when unused (+) Switch off the contention detection on data lanes @endverbatim * @{ */ /** * @brief Configure the Generic interface read-back Virtual Channel ID. * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param VirtualChannelID Virtual channel ID * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetGenericVCID(DSI_HandleTypeDef *hdsi, uint32_t VirtualChannelID) { /* Process locked */ __HAL_LOCK(hdsi); /* Update the GVCID register */ hdsi->Instance->GVCIDR &= ~DSI_GVCIDR_VCID; hdsi->Instance->GVCIDR |= VirtualChannelID; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Select video mode and configure the corresponding parameters * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param VidCfg pointer to a DSI_VidCfgTypeDef structure that contains * the DSI video mode configuration parameters * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigVideoMode(DSI_HandleTypeDef *hdsi, DSI_VidCfgTypeDef *VidCfg) { /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_COLOR_CODING(VidCfg->ColorCoding)); assert_param(IS_DSI_VIDEO_MODE_TYPE(VidCfg->Mode)); assert_param(IS_DSI_LP_COMMAND(VidCfg->LPCommandEnable)); assert_param(IS_DSI_LP_HFP(VidCfg->LPHorizontalFrontPorchEnable)); assert_param(IS_DSI_LP_HBP(VidCfg->LPHorizontalBackPorchEnable)); assert_param(IS_DSI_LP_VACTIVE(VidCfg->LPVerticalActiveEnable)); assert_param(IS_DSI_LP_VFP(VidCfg->LPVerticalFrontPorchEnable)); assert_param(IS_DSI_LP_VBP(VidCfg->LPVerticalBackPorchEnable)); assert_param(IS_DSI_LP_VSYNC(VidCfg->LPVerticalSyncActiveEnable)); assert_param(IS_DSI_FBTAA(VidCfg->FrameBTAAcknowledgeEnable)); assert_param(IS_DSI_DE_POLARITY(VidCfg->DEPolarity)); assert_param(IS_DSI_VSYNC_POLARITY(VidCfg->VSPolarity)); assert_param(IS_DSI_HSYNC_POLARITY(VidCfg->HSPolarity)); /* Check the LooselyPacked variant only in 18-bit mode */ if (VidCfg->ColorCoding == DSI_RGB666) { assert_param(IS_DSI_LOOSELY_PACKED(VidCfg->LooselyPacked)); } /* Select video mode by resetting CMDM and DSIM bits */ hdsi->Instance->MCR &= ~DSI_MCR_CMDM; hdsi->Instance->WCFGR &= ~DSI_WCFGR_DSIM; /* Configure the video mode transmission type */ hdsi->Instance->VMCR &= ~DSI_VMCR_VMT; hdsi->Instance->VMCR |= VidCfg->Mode; /* Configure the video packet size */ hdsi->Instance->VPCR &= ~DSI_VPCR_VPSIZE; hdsi->Instance->VPCR |= VidCfg->PacketSize; /* Set the chunks number to be transmitted through the DSI link */ hdsi->Instance->VCCR &= ~DSI_VCCR_NUMC; hdsi->Instance->VCCR |= VidCfg->NumberOfChunks; /* Set the size of the null packet */ hdsi->Instance->VNPCR &= ~DSI_VNPCR_NPSIZE; hdsi->Instance->VNPCR |= VidCfg->NullPacketSize; /* Select the virtual channel for the LTDC interface traffic */ hdsi->Instance->LVCIDR &= ~DSI_LVCIDR_VCID; hdsi->Instance->LVCIDR |= VidCfg->VirtualChannelID; /* Configure the polarity of control signals */ hdsi->Instance->LPCR &= ~(DSI_LPCR_DEP | DSI_LPCR_VSP | DSI_LPCR_HSP); hdsi->Instance->LPCR |= (VidCfg->DEPolarity | VidCfg->VSPolarity | VidCfg->HSPolarity); /* Select the color coding for the host */ hdsi->Instance->LCOLCR &= ~DSI_LCOLCR_COLC; hdsi->Instance->LCOLCR |= VidCfg->ColorCoding; /* Select the color coding for the wrapper */ hdsi->Instance->WCFGR &= ~DSI_WCFGR_COLMUX; hdsi->Instance->WCFGR |= ((VidCfg->ColorCoding) << 1U); /* Enable/disable the loosely packed variant to 18-bit configuration */ if (VidCfg->ColorCoding == DSI_RGB666) { hdsi->Instance->LCOLCR &= ~DSI_LCOLCR_LPE; hdsi->Instance->LCOLCR |= VidCfg->LooselyPacked; } /* Set the Horizontal Synchronization Active (HSA) in lane byte clock cycles */ hdsi->Instance->VHSACR &= ~DSI_VHSACR_HSA; hdsi->Instance->VHSACR |= VidCfg->HorizontalSyncActive; /* Set the Horizontal Back Porch (HBP) in lane byte clock cycles */ hdsi->Instance->VHBPCR &= ~DSI_VHBPCR_HBP; hdsi->Instance->VHBPCR |= VidCfg->HorizontalBackPorch; /* Set the total line time (HLINE=HSA+HBP+HACT+HFP) in lane byte clock cycles */ hdsi->Instance->VLCR &= ~DSI_VLCR_HLINE; hdsi->Instance->VLCR |= VidCfg->HorizontalLine; /* Set the Vertical Synchronization Active (VSA) */ hdsi->Instance->VVSACR &= ~DSI_VVSACR_VSA; hdsi->Instance->VVSACR |= VidCfg->VerticalSyncActive; /* Set the Vertical Back Porch (VBP)*/ hdsi->Instance->VVBPCR &= ~DSI_VVBPCR_VBP; hdsi->Instance->VVBPCR |= VidCfg->VerticalBackPorch; /* Set the Vertical Front Porch (VFP)*/ hdsi->Instance->VVFPCR &= ~DSI_VVFPCR_VFP; hdsi->Instance->VVFPCR |= VidCfg->VerticalFrontPorch; /* Set the Vertical Active period*/ hdsi->Instance->VVACR &= ~DSI_VVACR_VA; hdsi->Instance->VVACR |= VidCfg->VerticalActive; /* Configure the command transmission mode */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPCE; hdsi->Instance->VMCR |= VidCfg->LPCommandEnable; /* Low power largest packet size */ hdsi->Instance->LPMCR &= ~DSI_LPMCR_LPSIZE; hdsi->Instance->LPMCR |= ((VidCfg->LPLargestPacketSize) << 16U); /* Low power VACT largest packet size */ hdsi->Instance->LPMCR &= ~DSI_LPMCR_VLPSIZE; hdsi->Instance->LPMCR |= VidCfg->LPVACTLargestPacketSize; /* Enable LP transition in HFP period */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPHFPE; hdsi->Instance->VMCR |= VidCfg->LPHorizontalFrontPorchEnable; /* Enable LP transition in HBP period */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPHBPE; hdsi->Instance->VMCR |= VidCfg->LPHorizontalBackPorchEnable; /* Enable LP transition in VACT period */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPVAE; hdsi->Instance->VMCR |= VidCfg->LPVerticalActiveEnable; /* Enable LP transition in VFP period */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPVFPE; hdsi->Instance->VMCR |= VidCfg->LPVerticalFrontPorchEnable; /* Enable LP transition in VBP period */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPVBPE; hdsi->Instance->VMCR |= VidCfg->LPVerticalBackPorchEnable; /* Enable LP transition in vertical sync period */ hdsi->Instance->VMCR &= ~DSI_VMCR_LPVSAE; hdsi->Instance->VMCR |= VidCfg->LPVerticalSyncActiveEnable; /* Enable the request for an acknowledge response at the end of a frame */ hdsi->Instance->VMCR &= ~DSI_VMCR_FBTAAE; hdsi->Instance->VMCR |= VidCfg->FrameBTAAcknowledgeEnable; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Select adapted command mode and configure the corresponding parameters * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param CmdCfg pointer to a DSI_CmdCfgTypeDef structure that contains * the DSI command mode configuration parameters * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigAdaptedCommandMode(DSI_HandleTypeDef *hdsi, DSI_CmdCfgTypeDef *CmdCfg) { /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_COLOR_CODING(CmdCfg->ColorCoding)); assert_param(IS_DSI_TE_SOURCE(CmdCfg->TearingEffectSource)); assert_param(IS_DSI_TE_POLARITY(CmdCfg->TearingEffectPolarity)); assert_param(IS_DSI_AUTOMATIC_REFRESH(CmdCfg->AutomaticRefresh)); assert_param(IS_DSI_VS_POLARITY(CmdCfg->VSyncPol)); assert_param(IS_DSI_TE_ACK_REQUEST(CmdCfg->TEAcknowledgeRequest)); assert_param(IS_DSI_DE_POLARITY(CmdCfg->DEPolarity)); assert_param(IS_DSI_VSYNC_POLARITY(CmdCfg->VSPolarity)); assert_param(IS_DSI_HSYNC_POLARITY(CmdCfg->HSPolarity)); /* Select command mode by setting CMDM and DSIM bits */ hdsi->Instance->MCR |= DSI_MCR_CMDM; hdsi->Instance->WCFGR &= ~DSI_WCFGR_DSIM; hdsi->Instance->WCFGR |= DSI_WCFGR_DSIM; /* Select the virtual channel for the LTDC interface traffic */ hdsi->Instance->LVCIDR &= ~DSI_LVCIDR_VCID; hdsi->Instance->LVCIDR |= CmdCfg->VirtualChannelID; /* Configure the polarity of control signals */ hdsi->Instance->LPCR &= ~(DSI_LPCR_DEP | DSI_LPCR_VSP | DSI_LPCR_HSP); hdsi->Instance->LPCR |= (CmdCfg->DEPolarity | CmdCfg->VSPolarity | CmdCfg->HSPolarity); /* Select the color coding for the host */ hdsi->Instance->LCOLCR &= ~DSI_LCOLCR_COLC; hdsi->Instance->LCOLCR |= CmdCfg->ColorCoding; /* Select the color coding for the wrapper */ hdsi->Instance->WCFGR &= ~DSI_WCFGR_COLMUX; hdsi->Instance->WCFGR |= ((CmdCfg->ColorCoding) << 1U); /* Configure the maximum allowed size for write memory command */ hdsi->Instance->LCCR &= ~DSI_LCCR_CMDSIZE; hdsi->Instance->LCCR |= CmdCfg->CommandSize; /* Configure the tearing effect source and polarity and select the refresh mode */ hdsi->Instance->WCFGR &= ~(DSI_WCFGR_TESRC | DSI_WCFGR_TEPOL | DSI_WCFGR_AR | DSI_WCFGR_VSPOL); hdsi->Instance->WCFGR |= (CmdCfg->TearingEffectSource | CmdCfg->TearingEffectPolarity | CmdCfg->AutomaticRefresh | CmdCfg->VSyncPol); /* Configure the tearing effect acknowledge request */ hdsi->Instance->CMCR &= ~DSI_CMCR_TEARE; hdsi->Instance->CMCR |= CmdCfg->TEAcknowledgeRequest; /* Enable the Tearing Effect interrupt */ __HAL_DSI_ENABLE_IT(hdsi, DSI_IT_TE); /* Enable the End of Refresh interrupt */ __HAL_DSI_ENABLE_IT(hdsi, DSI_IT_ER); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Configure command transmission mode: High-speed or Low-power * and enable/disable acknowledge request after packet transmission * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param LPCmd pointer to a DSI_LPCmdTypeDef structure that contains * the DSI command transmission mode configuration parameters * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigCommand(DSI_HandleTypeDef *hdsi, DSI_LPCmdTypeDef *LPCmd) { /* Process locked */ __HAL_LOCK(hdsi); assert_param(IS_DSI_LP_GSW0P(LPCmd->LPGenShortWriteNoP)); assert_param(IS_DSI_LP_GSW1P(LPCmd->LPGenShortWriteOneP)); assert_param(IS_DSI_LP_GSW2P(LPCmd->LPGenShortWriteTwoP)); assert_param(IS_DSI_LP_GSR0P(LPCmd->LPGenShortReadNoP)); assert_param(IS_DSI_LP_GSR1P(LPCmd->LPGenShortReadOneP)); assert_param(IS_DSI_LP_GSR2P(LPCmd->LPGenShortReadTwoP)); assert_param(IS_DSI_LP_GLW(LPCmd->LPGenLongWrite)); assert_param(IS_DSI_LP_DSW0P(LPCmd->LPDcsShortWriteNoP)); assert_param(IS_DSI_LP_DSW1P(LPCmd->LPDcsShortWriteOneP)); assert_param(IS_DSI_LP_DSR0P(LPCmd->LPDcsShortReadNoP)); assert_param(IS_DSI_LP_DLW(LPCmd->LPDcsLongWrite)); assert_param(IS_DSI_LP_MRDP(LPCmd->LPMaxReadPacket)); assert_param(IS_DSI_ACK_REQUEST(LPCmd->AcknowledgeRequest)); /* Select High-speed or Low-power for command transmission */ hdsi->Instance->CMCR &= ~(DSI_CMCR_GSW0TX | \ DSI_CMCR_GSW1TX | \ DSI_CMCR_GSW2TX | \ DSI_CMCR_GSR0TX | \ DSI_CMCR_GSR1TX | \ DSI_CMCR_GSR2TX | \ DSI_CMCR_GLWTX | \ DSI_CMCR_DSW0TX | \ DSI_CMCR_DSW1TX | \ DSI_CMCR_DSR0TX | \ DSI_CMCR_DLWTX | \ DSI_CMCR_MRDPS); hdsi->Instance->CMCR |= (LPCmd->LPGenShortWriteNoP | \ LPCmd->LPGenShortWriteOneP | \ LPCmd->LPGenShortWriteTwoP | \ LPCmd->LPGenShortReadNoP | \ LPCmd->LPGenShortReadOneP | \ LPCmd->LPGenShortReadTwoP | \ LPCmd->LPGenLongWrite | \ LPCmd->LPDcsShortWriteNoP | \ LPCmd->LPDcsShortWriteOneP | \ LPCmd->LPDcsShortReadNoP | \ LPCmd->LPDcsLongWrite | \ LPCmd->LPMaxReadPacket); /* Configure the acknowledge request after each packet transmission */ hdsi->Instance->CMCR &= ~DSI_CMCR_ARE; hdsi->Instance->CMCR |= LPCmd->AcknowledgeRequest; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Configure the flow control parameters * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param FlowControl flow control feature(s) to be enabled. * This parameter can be any combination of @ref DSI_FlowControl. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigFlowControl(DSI_HandleTypeDef *hdsi, uint32_t FlowControl) { /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_FLOW_CONTROL(FlowControl)); /* Set the DSI Host Protocol Configuration Register */ hdsi->Instance->PCR &= ~DSI_FLOW_CONTROL_ALL; hdsi->Instance->PCR |= FlowControl; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Configure the DSI PHY timer parameters * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param PhyTimers DSI_PHY_TimerTypeDef structure that contains * the DSI PHY timing parameters * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigPhyTimer(DSI_HandleTypeDef *hdsi, DSI_PHY_TimerTypeDef *PhyTimers) { uint32_t maxTime; /* Process locked */ __HAL_LOCK(hdsi); maxTime = (PhyTimers->ClockLaneLP2HSTime > PhyTimers->ClockLaneHS2LPTime) ? PhyTimers->ClockLaneLP2HSTime : PhyTimers->ClockLaneHS2LPTime; /* Clock lane timer configuration */ /* In Automatic Clock Lane control mode, the DSI Host can turn off the clock lane between two High-Speed transmission. To do so, the DSI Host calculates the time required for the clock lane to change from HighSpeed to Low-Power and from Low-Power to High-Speed. This timings are configured by the HS2LP_TIME and LP2HS_TIME in the DSI Host Clock Lane Timer Configuration Register (DSI_CLTCR). But the DSI Host is not calculating LP2HS_TIME + HS2LP_TIME but 2 x HS2LP_TIME. Workaround : Configure HS2LP_TIME and LP2HS_TIME with the same value being the max of HS2LP_TIME or LP2HS_TIME. */ hdsi->Instance->CLTCR &= ~(DSI_CLTCR_LP2HS_TIME | DSI_CLTCR_HS2LP_TIME); hdsi->Instance->CLTCR |= (maxTime | ((maxTime) << 16U)); /* Data lane timer configuration */ hdsi->Instance->DLTCR &= ~(DSI_DLTCR_MRD_TIME | DSI_DLTCR_LP2HS_TIME | DSI_DLTCR_HS2LP_TIME); hdsi->Instance->DLTCR |= (PhyTimers->DataLaneMaxReadTime | ((PhyTimers->DataLaneLP2HSTime) << 16U) | ((PhyTimers->DataLaneHS2LPTime) << 24U)); /* Configure the wait period to request HS transmission after a stop state */ hdsi->Instance->PCONFR &= ~DSI_PCONFR_SW_TIME; hdsi->Instance->PCONFR |= ((PhyTimers->StopWaitTime) << 8U); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Configure the DSI HOST timeout parameters * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param HostTimeouts DSI_HOST_TimeoutTypeDef structure that contains * the DSI host timeout parameters * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ConfigHostTimeouts(DSI_HandleTypeDef *hdsi, DSI_HOST_TimeoutTypeDef *HostTimeouts) { /* Process locked */ __HAL_LOCK(hdsi); /* Set the timeout clock division factor */ hdsi->Instance->CCR &= ~DSI_CCR_TOCKDIV; hdsi->Instance->CCR |= ((HostTimeouts->TimeoutCkdiv) << 8U); /* High-speed transmission timeout */ hdsi->Instance->TCCR[0U] &= ~DSI_TCCR0_HSTX_TOCNT; hdsi->Instance->TCCR[0U] |= ((HostTimeouts->HighSpeedTransmissionTimeout) << 16U); /* Low-power reception timeout */ hdsi->Instance->TCCR[0U] &= ~DSI_TCCR0_LPRX_TOCNT; hdsi->Instance->TCCR[0U] |= HostTimeouts->LowPowerReceptionTimeout; /* High-speed read timeout */ hdsi->Instance->TCCR[1U] &= ~DSI_TCCR1_HSRD_TOCNT; hdsi->Instance->TCCR[1U] |= HostTimeouts->HighSpeedReadTimeout; /* Low-power read timeout */ hdsi->Instance->TCCR[2U] &= ~DSI_TCCR2_LPRD_TOCNT; hdsi->Instance->TCCR[2U] |= HostTimeouts->LowPowerReadTimeout; /* High-speed write timeout */ hdsi->Instance->TCCR[3U] &= ~DSI_TCCR3_HSWR_TOCNT; hdsi->Instance->TCCR[3U] |= HostTimeouts->HighSpeedWriteTimeout; /* High-speed write presp mode */ hdsi->Instance->TCCR[3U] &= ~DSI_TCCR3_PM; hdsi->Instance->TCCR[3U] |= HostTimeouts->HighSpeedWritePrespMode; /* Low-speed write timeout */ hdsi->Instance->TCCR[4U] &= ~DSI_TCCR4_LPWR_TOCNT; hdsi->Instance->TCCR[4U] |= HostTimeouts->LowPowerWriteTimeout; /* BTA timeout */ hdsi->Instance->TCCR[5U] &= ~DSI_TCCR5_BTA_TOCNT; hdsi->Instance->TCCR[5U] |= HostTimeouts->BTATimeout; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Start the DSI module * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_Start(DSI_HandleTypeDef *hdsi) { /* Process locked */ __HAL_LOCK(hdsi); /* Enable the DSI host */ __HAL_DSI_ENABLE(hdsi); /* Enable the DSI wrapper */ __HAL_DSI_WRAPPER_ENABLE(hdsi); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Stop the DSI module * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_Stop(DSI_HandleTypeDef *hdsi) { /* Process locked */ __HAL_LOCK(hdsi); /* Disable the DSI host */ __HAL_DSI_DISABLE(hdsi); /* Disable the DSI wrapper */ __HAL_DSI_WRAPPER_DISABLE(hdsi); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Refresh the display in command mode * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_Refresh(DSI_HandleTypeDef *hdsi) { /* Process locked */ __HAL_LOCK(hdsi); /* Update the display */ hdsi->Instance->WCR |= DSI_WCR_LTDCEN; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Controls the display color mode in Video mode * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param ColorMode Color mode (full or 8-colors). * This parameter can be any value of @ref DSI_Color_Mode * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ColorMode(DSI_HandleTypeDef *hdsi, uint32_t ColorMode) { /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_COLOR_MODE(ColorMode)); /* Update the display color mode */ hdsi->Instance->WCR &= ~DSI_WCR_COLM; hdsi->Instance->WCR |= ColorMode; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Control the display shutdown in Video mode * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param Shutdown Shut-down (Display-ON or Display-OFF). * This parameter can be any value of @ref DSI_ShutDown * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_Shutdown(DSI_HandleTypeDef *hdsi, uint32_t Shutdown) { /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_SHUT_DOWN(Shutdown)); /* Update the display Shutdown */ hdsi->Instance->WCR &= ~DSI_WCR_SHTDN; hdsi->Instance->WCR |= Shutdown; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief write short DCS or short Generic command * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param ChannelID Virtual channel ID. * @param Mode DSI short packet data type. * This parameter can be any value of @ref DSI_SHORT_WRITE_PKT_Data_Type. * @param Param1 DSC command or first generic parameter. * This parameter can be any value of @ref DSI_DCS_Command or a * generic command code. * @param Param2 DSC parameter or second generic parameter. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ShortWrite(DSI_HandleTypeDef *hdsi, uint32_t ChannelID, uint32_t Mode, uint32_t Param1, uint32_t Param2) { uint32_t tickstart; /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_SHORT_WRITE_PACKET_TYPE(Mode)); /* Get tick */ tickstart = HAL_GetTick(); /* Wait for Command FIFO Empty */ while ((hdsi->Instance->GPSR & DSI_GPSR_CMDFE) == 0U) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } /* Configure the packet to send a short DCS command with 0 or 1 parameter */ DSI_ConfigPacketHeader(hdsi->Instance, ChannelID, Mode, Param1, Param2); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief write long DCS or long Generic command * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param ChannelID Virtual channel ID. * @param Mode DSI long packet data type. * This parameter can be any value of @ref DSI_LONG_WRITE_PKT_Data_Type. * @param NbParams Number of parameters. * @param Param1 DSC command or first generic parameter. * This parameter can be any value of @ref DSI_DCS_Command or a * generic command code * @param ParametersTable Pointer to parameter values table. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_LongWrite(DSI_HandleTypeDef *hdsi, uint32_t ChannelID, uint32_t Mode, uint32_t NbParams, uint32_t Param1, uint8_t* ParametersTable) { uint32_t uicounter, nbBytes, count; uint32_t tickstart; uint32_t fifoword; uint8_t* pparams = ParametersTable; /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_LONG_WRITE_PACKET_TYPE(Mode)); /* Get tick */ tickstart = HAL_GetTick(); /* Wait for Command FIFO Empty */ while ((hdsi->Instance->GPSR & DSI_GPSR_CMDFE) == RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } /* Set the DCS code on payload byte 1, and the other parameters on the write FIFO command*/ fifoword = Param1; nbBytes = (NbParams < 3U) ? NbParams : 3U; for (count = 0U; count < nbBytes; count++) { fifoword |= (((uint32_t)(*(pparams + count))) << (8U + (8U * count))); } hdsi->Instance->GPDR = fifoword; uicounter = NbParams - nbBytes; pparams += nbBytes; /* Set the Next parameters on the write FIFO command*/ while (uicounter != 0U) { nbBytes = (uicounter < 4U) ? uicounter : 4U; fifoword = 0U; for (count = 0U; count < nbBytes; count++) { fifoword |= (((uint32_t)(*(pparams + count))) << (8U * count)); } hdsi->Instance->GPDR = fifoword; uicounter -= nbBytes; pparams += nbBytes; } /* Configure the packet to send a long DCS command */ DSI_ConfigPacketHeader(hdsi->Instance, ChannelID, Mode, ((NbParams + 1U) & 0x00FFU), (((NbParams + 1U) & 0xFF00U) >> 8U)); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Read command (DCS or generic) * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param ChannelNbr Virtual channel ID * @param Array pointer to a buffer to store the payload of a read back operation. * @param Size Data size to be read (in byte). * @param Mode DSI read packet data type. * This parameter can be any value of @ref DSI_SHORT_READ_PKT_Data_Type. * @param DCSCmd DCS get/read command. * @param ParametersTable Pointer to parameter values table. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_Read(DSI_HandleTypeDef *hdsi, uint32_t ChannelNbr, uint8_t* Array, uint32_t Size, uint32_t Mode, uint32_t DCSCmd, uint8_t* ParametersTable) { uint32_t tickstart; uint8_t* pdata = Array; uint32_t datasize = Size; /* Process locked */ __HAL_LOCK(hdsi); /* Check the parameters */ assert_param(IS_DSI_READ_PACKET_TYPE(Mode)); if (datasize > 2U) { /* set max return packet size */ if (HAL_DSI_ShortWrite(hdsi, ChannelNbr, DSI_MAX_RETURN_PKT_SIZE, ((datasize) & 0xFFU), (((datasize) >> 8U) & 0xFFU)) != HAL_OK) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } } /* Configure the packet to read command */ if (Mode == DSI_DCS_SHORT_PKT_READ) { DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, DCSCmd, 0U); } else if (Mode == DSI_GEN_SHORT_PKT_READ_P0) { DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, 0U, 0U); } else if (Mode == DSI_GEN_SHORT_PKT_READ_P1) { DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, ParametersTable[0U], 0U); } else if (Mode == DSI_GEN_SHORT_PKT_READ_P2) { DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, ParametersTable[0U], ParametersTable[1U]); } else { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } /* Get tick */ tickstart = HAL_GetTick(); /* Check that the payload read FIFO is not empty */ while ((hdsi->Instance->GPSR & DSI_GPSR_PRDFE) == DSI_GPSR_PRDFE) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } /* Get the first byte */ *((uint32_t *)pdata) = (hdsi->Instance->GPDR); if (datasize > 4U) { datasize -= 4U; pdata += 4U; } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /* Get tick */ tickstart = HAL_GetTick(); /* Get the remaining bytes if any */ while (((int)(datasize)) > 0) { if ((hdsi->Instance->GPSR & DSI_GPSR_PRDFE) == 0U) { *((uint32_t *)pdata) = (hdsi->Instance->GPDR); datasize -= 4U; pdata += 4U; } /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Enter the ULPM (Ultra Low Power Mode) with the D-PHY PLL running * (only data lanes are in ULPM) * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_EnterULPMData(DSI_HandleTypeDef *hdsi) { uint32_t tickstart; /* Process locked */ __HAL_LOCK(hdsi); /* ULPS Request on Data Lanes */ hdsi->Instance->PUCR |= DSI_PUCR_URDL; /* Get tick */ tickstart = HAL_GetTick(); /* Wait until the D-PHY active lanes enter into ULPM */ if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) { while ((hdsi->Instance->PSR & DSI_PSR_UAN0) != RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) { while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1)) != RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Exit the ULPM (Ultra Low Power Mode) with the D-PHY PLL running * (only data lanes are in ULPM) * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ExitULPMData(DSI_HandleTypeDef *hdsi) { uint32_t tickstart; /* Process locked */ __HAL_LOCK(hdsi); /* Exit ULPS on Data Lanes */ hdsi->Instance->PUCR |= DSI_PUCR_UEDL; /* Get tick */ tickstart = HAL_GetTick(); /* Wait until all active lanes exit ULPM */ if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) { while ((hdsi->Instance->PSR & DSI_PSR_UAN0) != DSI_PSR_UAN0) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) { while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1)) != (DSI_PSR_UAN0 | DSI_PSR_UAN1)) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } /* wait for 1 ms*/ HAL_Delay(1U); /* De-assert the ULPM requests and the ULPM exit bits */ hdsi->Instance->PUCR = 0U; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Enter the ULPM (Ultra Low Power Mode) with the D-PHY PLL turned off * (both data and clock lanes are in ULPM) * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_EnterULPM(DSI_HandleTypeDef *hdsi) { uint32_t tickstart; /* Process locked */ __HAL_LOCK(hdsi); /* Clock lane configuration: no more HS request */ hdsi->Instance->CLCR &= ~DSI_CLCR_DPCC; /* Use system PLL as byte lane clock source before stopping DSIPHY clock source */ __HAL_RCC_DSI_CONFIG(RCC_DSICLKSOURCE_PLLR); /* ULPS Request on Clock and Data Lanes */ hdsi->Instance->PUCR |= (DSI_PUCR_URCL | DSI_PUCR_URDL); /* Get tick */ tickstart = HAL_GetTick(); /* Wait until all active lanes exit ULPM */ if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) { while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UANC)) != RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) { while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1 | DSI_PSR_UANC)) != RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } /* Turn off the DSI PLL */ __HAL_DSI_PLL_DISABLE(hdsi); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Exit the ULPM (Ultra Low Power Mode) with the D-PHY PLL turned off * (both data and clock lanes are in ULPM) * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ExitULPM(DSI_HandleTypeDef *hdsi) { uint32_t tickstart; /* Process locked */ __HAL_LOCK(hdsi); /* Turn on the DSI PLL */ __HAL_DSI_PLL_ENABLE(hdsi); /* Get tick */ tickstart = HAL_GetTick(); /* Wait for the lock of the PLL */ while (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_PLLLS) == RESET) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } /* Exit ULPS on Clock and Data Lanes */ hdsi->Instance->PUCR |= (DSI_PUCR_UECL | DSI_PUCR_UEDL); /* Get tick */ tickstart = HAL_GetTick(); /* Wait until all active lanes exit ULPM */ if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) { while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UANC)) != (DSI_PSR_UAN0 | DSI_PSR_UANC)) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) { while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1 | DSI_PSR_UANC)) != (DSI_PSR_UAN0 | DSI_PSR_UAN1 | DSI_PSR_UANC)) { /* Check for the Timeout */ if ((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) { /* Process Unlocked */ __HAL_UNLOCK(hdsi); return HAL_TIMEOUT; } } } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } /* wait for 1 ms */ HAL_Delay(1U); /* De-assert the ULPM requests and the ULPM exit bits */ hdsi->Instance->PUCR = 0U; /* Switch the lanbyteclock source in the RCC from system PLL to D-PHY */ __HAL_RCC_DSI_CONFIG(RCC_DSICLKSOURCE_DSIPHY); /* Restore clock lane configuration to HS */ hdsi->Instance->CLCR |= DSI_CLCR_DPCC; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Start test pattern generation * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param Mode Pattern generator mode * This parameter can be one of the following values: * 0 : Color bars (horizontal or vertical) * 1 : BER pattern (vertical only) * @param Orientation Pattern generator orientation * This parameter can be one of the following values: * 0 : Vertical color bars * 1 : Horizontal color bars * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_PatternGeneratorStart(DSI_HandleTypeDef *hdsi, uint32_t Mode, uint32_t Orientation) { /* Process locked */ __HAL_LOCK(hdsi); /* Configure pattern generator mode and orientation */ hdsi->Instance->VMCR &= ~(DSI_VMCR_PGM | DSI_VMCR_PGO); hdsi->Instance->VMCR |= ((Mode << 20U) | (Orientation << 24U)); /* Enable pattern generator by setting PGE bit */ hdsi->Instance->VMCR |= DSI_VMCR_PGE; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Stop test pattern generation * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_PatternGeneratorStop(DSI_HandleTypeDef *hdsi) { /* Process locked */ __HAL_LOCK(hdsi); /* Disable pattern generator by clearing PGE bit */ hdsi->Instance->VMCR &= ~DSI_VMCR_PGE; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Set Slew-Rate And Delay Tuning * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param CommDelay Communication delay to be adjusted. * This parameter can be any value of @ref DSI_Communication_Delay * @param Lane select between clock or data lanes. * This parameter can be any value of @ref DSI_Lane_Group * @param Value Custom value of the slew-rate or delay * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetSlewRateAndDelayTuning(DSI_HandleTypeDef *hdsi, uint32_t CommDelay, uint32_t Lane, uint32_t Value) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_DSI_COMMUNICATION_DELAY(CommDelay)); assert_param(IS_DSI_LANE_GROUP(Lane)); switch (CommDelay) { case DSI_SLEW_RATE_HSTX: if (Lane == DSI_CLOCK_LANE) { /* High-Speed Transmission Slew Rate Control on Clock Lane */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXSRCCL; hdsi->Instance->WPCR[1U] |= Value << 16U; } else if (Lane == DSI_DATA_LANES) { /* High-Speed Transmission Slew Rate Control on Data Lanes */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXSRCDL; hdsi->Instance->WPCR[1U] |= Value << 18U; } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } break; case DSI_SLEW_RATE_LPTX: if (Lane == DSI_CLOCK_LANE) { /* Low-Power transmission Slew Rate Compensation on Clock Lane */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_LPSRCCL; hdsi->Instance->WPCR[1U] |= Value << 6U; } else if (Lane == DSI_DATA_LANES) { /* Low-Power transmission Slew Rate Compensation on Data Lanes */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_LPSRCDL; hdsi->Instance->WPCR[1U] |= Value << 8U; } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } break; case DSI_HS_DELAY: if (Lane == DSI_CLOCK_LANE) { /* High-Speed Transmission Delay on Clock Lane */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXDCL; hdsi->Instance->WPCR[1U] |= Value; } else if (Lane == DSI_DATA_LANES) { /* High-Speed Transmission Delay on Data Lanes */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXDDL; hdsi->Instance->WPCR[1U] |= Value << 2U; } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } break; default: break; } /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Low-Power Reception Filter Tuning * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param Frequency cutoff frequency of low-pass filter at the input of LPRX * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetLowPowerRXFilter(DSI_HandleTypeDef *hdsi, uint32_t Frequency) { /* Process locked */ __HAL_LOCK(hdsi); /* Low-Power RX low-pass Filtering Tuning */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_LPRXFT; hdsi->Instance->WPCR[1U] |= Frequency << 25U; /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Activate an additional current path on all lanes to meet the SDDTx parameter * defined in the MIPI D-PHY specification * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetSDD(DSI_HandleTypeDef *hdsi, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_FUNCTIONAL_STATE(State)); /* Activate/Disactivate additional current path on all lanes */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_SDDC; hdsi->Instance->WPCR[1U] |= ((uint32_t)State << 12U); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Custom lane pins configuration * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param CustomLane Function to be applyed on selected lane. * This parameter can be any value of @ref DSI_CustomLane * @param Lane select between clock or data lane 0 or data lane 1. * This parameter can be any value of @ref DSI_Lane_Select * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetLanePinsConfiguration(DSI_HandleTypeDef *hdsi, uint32_t CustomLane, uint32_t Lane, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_DSI_CUSTOM_LANE(CustomLane)); assert_param(IS_DSI_LANE(Lane)); assert_param(IS_FUNCTIONAL_STATE(State)); switch (CustomLane) { case DSI_SWAP_LANE_PINS: if (Lane == DSI_CLK_LANE) { /* Swap pins on clock lane */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_SWCL; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 6U); } else if (Lane == DSI_DATA_LANE0) { /* Swap pins on data lane 0 */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_SWDL0; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 7U); } else if (Lane == DSI_DATA_LANE1) { /* Swap pins on data lane 1 */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_SWDL1; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 8U); } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } break; case DSI_INVERT_HS_SIGNAL: if (Lane == DSI_CLK_LANE) { /* Invert HS signal on clock lane */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_HSICL; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 9U); } else if (Lane == DSI_DATA_LANE0) { /* Invert HS signal on data lane 0 */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_HSIDL0; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 10U); } else if (Lane == DSI_DATA_LANE1) { /* Invert HS signal on data lane 1 */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_HSIDL1; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 11U); } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } break; default: break; } /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Set custom timing for the PHY * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param Timing PHY timing to be adjusted. * This parameter can be any value of @ref DSI_PHY_Timing * @param State ENABLE or DISABLE * @param Value Custom value of the timing * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetPHYTimings(DSI_HandleTypeDef *hdsi, uint32_t Timing, FunctionalState State, uint32_t Value) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_DSI_PHY_TIMING(Timing)); assert_param(IS_FUNCTIONAL_STATE(State)); switch (Timing) { case DSI_TCLK_POST: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TCLKPOSTEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 27U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[4U] &= ~DSI_WPCR4_TCLKPOST; hdsi->Instance->WPCR[4U] |= Value & DSI_WPCR4_TCLKPOST; } break; case DSI_TLPX_CLK: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TLPXCEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 26U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_TLPXC; hdsi->Instance->WPCR[3U] |= (Value << 24U) & DSI_WPCR3_TLPXC; } break; case DSI_THS_EXIT: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSEXITEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 25U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_THSEXIT; hdsi->Instance->WPCR[3U] |= (Value << 16U) & DSI_WPCR3_THSEXIT; } break; case DSI_TLPX_DATA: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TLPXDEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 24U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_TLPXD; hdsi->Instance->WPCR[3U] |= (Value << 8U) & DSI_WPCR3_TLPXD; } break; case DSI_THS_ZERO: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSZEROEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 23U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_THSZERO; hdsi->Instance->WPCR[3U] |= Value & DSI_WPCR3_THSZERO; } break; case DSI_THS_TRAIL: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSTRAILEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 22U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_THSTRAIL; hdsi->Instance->WPCR[2U] |= (Value << 24U) & DSI_WPCR2_THSTRAIL; } break; case DSI_THS_PREPARE: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSPREPEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 21U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_THSPREP; hdsi->Instance->WPCR[2U] |= (Value << 16U) & DSI_WPCR2_THSPREP; } break; case DSI_TCLK_ZERO: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TCLKZEROEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 20U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_TCLKZERO; hdsi->Instance->WPCR[2U] |= (Value << 8U) & DSI_WPCR2_TCLKZERO; } break; case DSI_TCLK_PREPARE: /* Enable/Disable custom timing setting */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TCLKPREPEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 19U); if (State != DISABLE) { /* Set custom value */ hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_TCLKPREP; hdsi->Instance->WPCR[2U] |= Value & DSI_WPCR2_TCLKPREP; } break; default: break; } /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Force the Clock/Data Lane in TX Stop Mode * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param Lane select between clock or data lanes. * This parameter can be any value of @ref DSI_Lane_Group * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ForceTXStopMode(DSI_HandleTypeDef *hdsi, uint32_t Lane, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_DSI_LANE_GROUP(Lane)); assert_param(IS_FUNCTIONAL_STATE(State)); if (Lane == DSI_CLOCK_LANE) { /* Force/Unforce the Clock Lane in TX Stop Mode */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_FTXSMCL; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 12U); } else if (Lane == DSI_DATA_LANES) { /* Force/Unforce the Data Lanes in TX Stop Mode */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_FTXSMDL; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 13U); } else { /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_ERROR; } /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Force LP Receiver in Low-Power Mode * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ForceRXLowPower(DSI_HandleTypeDef *hdsi, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_FUNCTIONAL_STATE(State)); /* Force/Unforce LP Receiver in Low-Power Mode */ hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_FLPRXLPM; hdsi->Instance->WPCR[1U] |= ((uint32_t)State << 22U); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Force Data Lanes in RX Mode after a BTA * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_ForceDataLanesInRX(DSI_HandleTypeDef *hdsi, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_FUNCTIONAL_STATE(State)); /* Force Data Lanes in RX Mode */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TDDL; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 16U); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Enable a pull-down on the lanes to prevent from floating states when unused * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetPullDown(DSI_HandleTypeDef *hdsi, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_FUNCTIONAL_STATE(State)); /* Enable/Disable pull-down on lanes */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_PDEN; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 18U); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @brief Switch off the contention detection on data lanes * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @param State ENABLE or DISABLE * @retval HAL status */ HAL_StatusTypeDef HAL_DSI_SetContentionDetectionOff(DSI_HandleTypeDef *hdsi, FunctionalState State) { /* Process locked */ __HAL_LOCK(hdsi); /* Check function parameters */ assert_param(IS_FUNCTIONAL_STATE(State)); /* Contention Detection on Data Lanes OFF */ hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_CDOFFDL; hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 14U); /* Process unlocked */ __HAL_UNLOCK(hdsi); return HAL_OK; } /** * @} */ /** @defgroup DSI_Group4 Peripheral State and Errors functions * @brief Peripheral State and Errors functions * @verbatim =============================================================================== ##### Peripheral State and Errors functions ##### =============================================================================== [..] This subsection provides functions allowing to (+) Check the DSI state. (+) Get error code. @endverbatim * @{ */ /** * @brief Return the DSI state * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval HAL state */ HAL_DSI_StateTypeDef HAL_DSI_GetState(DSI_HandleTypeDef *hdsi) { return hdsi->State; } /** * @brief Return the DSI error code * @param hdsi pointer to a DSI_HandleTypeDef structure that contains * the configuration information for the DSI. * @retval DSI Error Code */ uint32_t HAL_DSI_GetError(DSI_HandleTypeDef *hdsi) { /* Get the error code */ return hdsi->ErrorCode; } /** * @} */ /** * @} */ /** * @} */ #endif /* DSI */ #endif /* HAL_DSI_MODULE_ENABLED */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
975332.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include "simplehash.h" // 初始化 hashtable table * init_hash_table() { int i; table *t = (table *)malloc(sizeof(table)); if (t == NULL) { printf("init hash table is failed!\n"); return NULL; } t->cap = MAX_ENTRY_NUMS; t->count = 0; for (i = 0; i < MAX_ENTRY_NUMS; i ++) { t->table = (entry *)malloc(MAX_ENTRY_NUMS * sizeof(entry)); if (t->table != NULL) { memset(t->table, 0, MAX_ENTRY_NUMS * sizeof(entry)); } } for (i = 0; i < MAX_ENTRY_NUMS; i ++) { data_table[i] = malloc(MAX_STR_LEN); memset(data_table[i], 0, MAX_STR_LEN); } return t; } // 从 hashtable 查询数据 int find_hash_table(table *t) { } // 向 hashtable 插入数据 int insert_hash_table(table *t, int key, char *value, int len) { int index, pos; if (NULL == t) { return 0; } if (t->count >= MAX_ENTRY_NUMS) { printf("hash table is full!\n"); return 0; } index = hash_key(key); if (t->table[index].key == key) { printf("index: %d, key is exit!, orikey: %d, nowkey: %d\n", index, t->table[index].key, key); return 0; } pos = index; while (t->table[pos].flag == 1) { pos = (++ pos) % MAX_ENTRY_NUMS; } t->table[pos].key = key; memcpy(data_table[pos], value, len); t->table[pos].value = data_table[pos]; t->table[pos].flag = 1; t->count ++; return 1; } // 计算 hash key int hash_key(int key) { return (key % MAX_ENTRY_NUMS); } void print_hash(table *t, int len) { int i; printf("count: %d\n", t->count); for (i = 0; i <= len; i ++) { printf("key: %d, value: %s\n", t->table[i].key, t->table[i].value); } printf("table:\n"); for (i = 0; i <= len; i ++) { printf("value: %s\n", data_table[i]); } } int main(void) { int i; table *t = init_hash_table(); //print_hash(t, 5); char *value[5] = {"ab", "bc", "cd", "de", "ef"}; for (i = 1; i <= 5; i ++) { insert_hash_table(t, i, value[i-1], 2); } print_hash(t, 5); return 0; }
952474.c
// SPDX-License-Identifier: Zlib /* zutil.c -- target dependent utility functions for the compression library * Copyright (C) 1995-2017 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #include "zutil.h" #ifndef Z_SOLO # include "gzguts.h" #endif z_const char * const z_errmsg[10] = { (z_const char *)"need dictionary", /* Z_NEED_DICT 2 */ (z_const char *)"stream end", /* Z_STREAM_END 1 */ (z_const char *)"", /* Z_OK 0 */ (z_const char *)"file error", /* Z_ERRNO (-1) */ (z_const char *)"stream error", /* Z_STREAM_ERROR (-2) */ (z_const char *)"data error", /* Z_DATA_ERROR (-3) */ (z_const char *)"insufficient memory", /* Z_MEM_ERROR (-4) */ (z_const char *)"buffer error", /* Z_BUF_ERROR (-5) */ (z_const char *)"incompatible version",/* Z_VERSION_ERROR (-6) */ (z_const char *)"" }; const char * ZEXPORT zlibVersion() { return ZLIB_VERSION; } uLong ZEXPORT zlibCompileFlags() { uLong flags; flags = 0; switch ((int)(sizeof(uInt))) { case 2: break; case 4: flags += 1; break; case 8: flags += 2; break; default: flags += 3; } switch ((int)(sizeof(uLong))) { case 2: break; case 4: flags += 1 << 2; break; case 8: flags += 2 << 2; break; default: flags += 3 << 2; } switch ((int)(sizeof(voidpf))) { case 2: break; case 4: flags += 1 << 4; break; case 8: flags += 2 << 4; break; default: flags += 3 << 4; } switch ((int)(sizeof(z_off_t))) { case 2: break; case 4: flags += 1 << 6; break; case 8: flags += 2 << 6; break; default: flags += 3 << 6; } #ifdef ZLIB_DEBUG flags += 1 << 8; #endif #if defined(ASMV) || defined(ASMINF) flags += 1 << 9; #endif #ifdef ZLIB_WINAPI flags += 1 << 10; #endif #ifdef BUILDFIXED flags += 1 << 12; #endif #ifdef DYNAMIC_CRC_TABLE flags += 1 << 13; #endif #ifdef NO_GZCOMPRESS flags += 1L << 16; #endif #ifdef NO_GZIP flags += 1L << 17; #endif #ifdef PKZIP_BUG_WORKAROUND flags += 1L << 20; #endif #ifdef FASTEST flags += 1L << 21; #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifdef NO_vsnprintf flags += 1L << 25; # ifdef HAS_vsprintf_void flags += 1L << 26; # endif # else # ifdef HAS_vsnprintf_void flags += 1L << 26; # endif # endif #else flags += 1L << 24; # ifdef NO_snprintf flags += 1L << 25; # ifdef HAS_sprintf_void flags += 1L << 26; # endif # else # ifdef HAS_snprintf_void flags += 1L << 26; # endif # endif #endif return flags; } #ifdef ZLIB_DEBUG #include <stdlib.h> # ifndef verbose # define verbose 0 # endif int ZLIB_INTERNAL z_verbose = verbose; void ZLIB_INTERNAL z_error (m) char *m; { fprintf(stderr, "%s\n", m); exit(1); } #endif /* exported to allow conversion of error code to string for compress() and * uncompress() */ const char * ZEXPORT zError(err) int err; { return ERR_MSG(err); } #if defined(_WIN32_WCE) /* The Microsoft C Run-Time Library for Windows CE doesn't have * errno. We define it as a global variable to simplify porting. * Its value is always 0 and should not be used. */ int errno = 0; #endif #ifndef HAVE_MEMCPY void ZLIB_INTERNAL zmemcpy(dest, source, len) Bytef* dest; const Bytef* source; uInt len; { if (len == 0) return; do { *dest++ = *source++; /* ??? to be unrolled */ } while (--len != 0); } int ZLIB_INTERNAL zmemcmp(s1, s2, len) const Bytef* s1; const Bytef* s2; uInt len; { uInt j; for (j = 0; j < len; j++) { if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; } return 0; } void ZLIB_INTERNAL zmemzero(dest, len) Bytef* dest; uInt len; { if (len == 0) return; do { *dest++ = 0; /* ??? to be unrolled */ } while (--len != 0); } #endif #ifndef Z_SOLO #ifdef SYS16BIT #ifdef __TURBOC__ /* Turbo C in 16-bit mode */ # define MY_ZCALLOC /* Turbo C malloc() does not allow dynamic allocation of 64K bytes * and farmalloc(64K) returns a pointer with an offset of 8, so we * must fix the pointer. Warning: the pointer must be put back to its * original form in order to free it, use zcfree(). */ #define MAX_PTR 10 /* 10*64K = 640K */ local int next_ptr = 0; typedef struct ptr_table_s { voidpf org_ptr; voidpf new_ptr; } ptr_table; local ptr_table table[MAX_PTR]; /* This table is used to remember the original form of pointers * to large buffers (64K). Such pointers are normalized with a zero offset. * Since MSDOS is not a preemptive multitasking OS, this table is not * protected from concurrent access. This hack doesn't work anyway on * a protected system like OS/2. Use Microsoft C instead. */ voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) { voidpf buf; ulg bsize = (ulg)items*size; (void)opaque; /* If we allocate less than 65520 bytes, we assume that farmalloc * will return a usable pointer which doesn't have to be normalized. */ if (bsize < 65520L) { buf = farmalloc(bsize); if (*(ush*)&buf != 0) return buf; } else { buf = farmalloc(bsize + 16L); } if (buf == NULL || next_ptr >= MAX_PTR) return NULL; table[next_ptr].org_ptr = buf; /* Normalize the pointer to seg:0 */ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; *(ush*)&buf = 0; table[next_ptr++].new_ptr = buf; return buf; } void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { int n; (void)opaque; if (*(ush*)&ptr != 0) { /* object < 64K */ farfree(ptr); return; } /* Find the original pointer */ for (n = 0; n < next_ptr; n++) { if (ptr != table[n].new_ptr) continue; farfree(table[n].org_ptr); while (++n < next_ptr) { table[n-1] = table[n]; } next_ptr--; return; } Assert(0, "zcfree: ptr not found"); } #endif /* __TURBOC__ */ #ifdef M_I86 /* Microsoft C in 16-bit mode */ # define MY_ZCALLOC #if (!defined(_MSC_VER) || (_MSC_VER <= 600)) # define _halloc halloc # define _hfree hfree #endif voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) { (void)opaque; return _halloc((long)items, size); } void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { (void)opaque; _hfree(ptr); } #endif /* M_I86 */ #endif /* SYS16BIT */ #ifndef MY_ZCALLOC /* Any system without a special alloc function */ #ifndef STDC extern voidp malloc OF((uInt size)); extern voidp calloc OF((uInt items, uInt size)); extern void free OF((voidpf ptr)); #endif voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) voidpf opaque; unsigned items; unsigned size; { (void)opaque; return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : (voidpf)calloc(items, size); } void ZLIB_INTERNAL zcfree (opaque, ptr) voidpf opaque; voidpf ptr; { (void)opaque; free(ptr); } #endif /* MY_ZCALLOC */ #endif /* !Z_SOLO */
666490.c
#include <string.h> #include "ws2812b.h" //------------------------------------------------------------ // Internal //------------------------------------------------------------ #define WS2820_MIN(a, b) ({ typeof(a) a1 = a; typeof(b) b1 = b; a1 < b1 ? a1 : b1; }) #ifdef WS2812B_USE_GAMMA_CORRECTION #ifdef WS2812B_USE_PRECALCULATED_GAMMA_TABLE static const uint8_t LEDGammaTable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 23, 23, 24, 24, 25, 26, 26, 27, 28, 28, 29, 30, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37, 38, 38, 39, 40, 41, 42, 42, 43, 44, 45, 46, 47, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 97, 98, 99, 100, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 117, 119, 120, 121, 123, 124, 126, 127, 128, 130, 131, 133, 134, 136, 137, 139, 140, 142, 143, 145, 146, 148, 149, 151, 152, 154, 155, 157, 158, 160, 162, 163, 165, 166, 168, 170, 171, 173, 175, 176, 178, 180, 181, 183, 185, 186, 188, 190, 192, 193, 195, 197, 199, 200, 202, 204, 206, 207, 209, 211, 213, 215, 217, 218, 220, 222, 224, 226, 228, 230, 232, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255 }; #endif #endif static inline uint8_t LEDGamma(uint8_t v) { #ifdef WS2812B_USE_GAMMA_CORRECTION #ifdef WS2812B_USE_PRECALCULATED_GAMMA_TABLE return LEDGammaTable[v]; #else return (v * v + v) >> 8; #endif #else return v; #endif } static void RGB2PWM(RGB_t *rgb, PWM_t *pwm) { uint8_t r = LEDGamma(rgb->r); uint8_t g = LEDGamma(rgb->g); uint8_t b = LEDGamma(rgb->b); uint8_t mask = 128; for (unsigned int i = 0; i < 8; i++) { pwm->r[i] = r & mask ? WS2812B_PULSE_HIGH : WS2812B_PULSE_LOW; pwm->g[i] = g & mask ? WS2812B_PULSE_HIGH : WS2812B_PULSE_LOW; pwm->b[i] = b & mask ? WS2812B_PULSE_HIGH : WS2812B_PULSE_LOW; mask >>= 1; } } static void SrcFilterNull(void **src, PWM_t **pwm, unsigned *count, unsigned size) { memset(*pwm, 0, size * sizeof(PWM_t)); *pwm += size; } static void SrcFilterRGB(void **src, PWM_t **pwm, unsigned *count, unsigned size) { RGB_t *rgb = *src; PWM_t *p = *pwm; *count -= size; while (size--) { RGB2PWM(rgb++, p++); } *src = rgb; *pwm = p; } static void SrcFilterHSV(void **src, PWM_t **pwm, unsigned *count, unsigned size) { HSV_t *hsv = *src; PWM_t *p = *pwm; *count -= size; while (size--) { RGB_t rgb; HSV2RGB(hsv++, &rgb); RGB2PWM(&rgb, p++); } *src = hsv; *pwm = p; } static void DMASend(ws2812b_t * obj, ws18b20_SrcFilter_t *filter, void *src, unsigned count) { if (obj->DMABusy) { return; } obj->DMABusy = 1; obj->DMAFilter = filter; obj->DMASrc = src; obj->DMACount = count; PWM_t *pwm = obj->DMABuffer; PWM_t *end = &obj->DMABuffer[WS2812B_BUFFER_SIZE]; // Start sequence SrcFilterNull(NULL, &pwm, NULL, WS2812B_START_SIZE); // RGB PWM data obj->DMAFilter(&obj->DMASrc, &pwm, &obj->DMACount, WS2820_MIN(obj->DMACount, end - pwm)); // Rest of buffer if (pwm < end) { SrcFilterNull(NULL, &pwm, NULL, end - pwm); } // Start transfer WS2812B_TIM_START_DMA(obj->tim, obj->ch, (uint32_t*)obj->DMABuffer, sizeof(obj->DMABuffer) / sizeof(WS2812B_TIMER_CCR_TYPE)) } static void DMASendNext(ws2812b_t * obj, PWM_t *pwm, PWM_t *end) { if (!obj->DMAFilter) { // Stop transfer WS2812B_TIM_STOP_DMA(obj->tim, obj->ch); WS2812B_SET_COMPARE(obj->tim, obj->ch, 0); obj->DMABusy = 0; } else if (!obj->DMACount) { // Rest of buffer SrcFilterNull(NULL, &pwm, NULL, end - pwm); obj->DMAFilter = NULL; } else { // RGB PWM data obj->DMAFilter(&obj->DMASrc, &pwm, &obj->DMACount, WS2820_MIN(obj->DMACount, end - pwm)); // Rest of buffer if (pwm < end) { SrcFilterNull(NULL, &pwm, NULL, end - pwm); } } } void ws2812b_dma_handler(ws2812b_t * obj, uint8_t isHalf) { if (isHalf) { DMASendNext(obj, obj->DMABuffer, &obj->DMABuffer[WS2812B_BUFFER_SIZE >> 1]); } else { DMASendNext(obj ,&obj->DMABuffer[WS2812B_BUFFER_SIZE >> 1], &obj->DMABuffer[WS2812B_BUFFER_SIZE]); } } //------------------------------------------------------------ // Interface //------------------------------------------------------------ void ws2812b_Init(ws2812b_t * obj, WS2812B_TIMER_TYPE * tim, WS2812B_TIMER_CH_TYPE ch) { if(!obj) { return; } WS2812B_TIM_STOP_DMA(tim, ch); WS2812B_SET_COMPARE(tim, ch, 0); obj->ch = ch; obj->tim = tim; obj->DMAFilter = NULL; obj->DMASrc = NULL; obj->DMACount = 0; obj->DMABusy = 0; } void ws2812b_SendRGB(ws2812b_t * obj, RGB_t *rgb, unsigned count) { if(!obj) { return; } DMASend(obj, &SrcFilterRGB, rgb, count); } void ws2812b_SendHSV(ws2812b_t * obj, HSV_t *hsv, unsigned count) { if(!obj) { return; } DMASend(obj, &SrcFilterHSV, hsv, count); }
188555.c
//***************************************************************************** // //! @file rtos.c //! //! @brief Essential functions to make the RTOS run correctly. //! //! These functions are required by the RTOS for ticking, sleeping, and basic //! error checking. // //***************************************************************************** //***************************************************************************** // // Copyright (c) 2021, Ambiq Micro, Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // Third party software included in this distribution is subject to the // additional license terms as defined in the /docs/licenses directory. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // This is part of revision b0-release-20210111-375-gc3201eeb4 of the AmbiqSuite Development Package. // //***************************************************************************** #include <stdint.h> #include <stdbool.h> #include "am_mcu_apollo.h" #include "am_bsp.h" #include "FreeRTOS.h" #include "task.h" #include "queue.h" #include "portmacro.h" #include "portable.h" #include "nemagfx_watch_gui.h" //***************************************************************************** // // Task handle for the initial setup task. // //***************************************************************************** TaskHandle_t xSetupTask; #if 0 //***************************************************************************** // // Interrupt handler for the CTIMER module. // //***************************************************************************** void am_ctimer_isr(void) { uint32_t ui32Status; // // Check the timer interrupt status. // ui32Status = am_hal_ctimer_int_status_get(false); am_hal_ctimer_int_clear(ui32Status); // // Run handlers for the various possible timer events. // am_hal_ctimer_int_service(ui32Status); } #endif //***************************************************************************** // // Sleep function called from FreeRTOS IDLE task. // Do necessary application specific Power down operations here // Return 0 if this function also incorporates the WFI, else return value same // as idleTime // //***************************************************************************** uint32_t am_freertos_sleep(uint32_t idleTime) { am_hal_sysctrl_sleep(AM_HAL_SYSCTRL_SLEEP_DEEP); return 0; } //***************************************************************************** // // Recovery function called from FreeRTOS IDLE task, after waking up from Sleep // Do necessary 'wakeup' operations here, e.g. to power up/enable peripherals etc. // //***************************************************************************** void am_freertos_wakeup(uint32_t idleTime) { return; } //***************************************************************************** // // FreeRTOS debugging functions. // //***************************************************************************** void vApplicationMallocFailedHook(void) { // // Called if a call to pvPortMalloc() fails because there is insufficient // free memory available in the FreeRTOS heap. pvPortMalloc() is called // internally by FreeRTOS API functions that create tasks, queues, software // timers, and semaphores. The size of the FreeRTOS heap is set by the // configTOTAL_HEAP_SIZE configuration constant in FreeRTOSConfig.h. // while (1); } void vApplicationStackOverflowHook(TaskHandle_t pxTask, char *pcTaskName) { (void) pcTaskName; (void) pxTask; // // Run time stack overflow checking is performed if // configconfigCHECK_FOR_STACK_OVERFLOW is defined to 1 or 2. This hook // function is called if a stack overflow is detected. // while (1) { __asm("BKPT #0\n") ; // Break into the debugger } } #ifndef BAREMETAL //***************************************************************************** // // High priority task to run immediately after the scheduler starts. // // This task is used for any global initialization that must occur after the // scheduler starts, but before any functional tasks are running. This can be // useful for enabling events, semaphores, and other global, RTOS-specific // features. // //***************************************************************************** void setup_task(void *pvParameters) { // // Print a debug message. // am_util_debug_printf("Running setup tasks...\r\n"); // // Run setup functions. // GuiTaskSetup(); // // Create the functional tasks // xTaskCreate(GuiTask, "GuiTask", 512, 0, 3, &gui_task_handle); // // The setup operations are complete, so suspend the setup task now. // vTaskSuspend(NULL); while (1); } //***************************************************************************** // // Initializes all tasks // //***************************************************************************** void run_tasks(void) { // // Set some interrupt priorities before we create tasks or start the scheduler. // // Note: Timer priority is handled by the FreeRTOS kernel, so we won't // touch it here. // // // Create essential tasks. // xTaskCreate(setup_task, "Setup", 512, 0, 3, &xSetupTask); // // Start the scheduler. // vTaskStartScheduler(); } #endif
401184.c
int main() { // variable declarations int n; int x; // pre-conditions (x = n); // loop body while ((x > 0)) { { (x = (x - 1)); } } // post-condition if ( (n >= 0) ) assert( (x == 0) ); }
771621.c
/** ******************************************************************************************************* * @file fm33lc0xx_fl_opa.c * @author FMSH Application Team * @brief Src file of OPA FL Module ******************************************************************************************************* * @attention * * Copyright (c) [2019] [Fudan Microelectronics] * THIS SOFTWARE is licensed under the Mulan PSL v1. * can use this software according to the terms and conditions of the Mulan PSL v1. * You may obtain a copy of Mulan PSL v1 at: * http://license.coscl.org.cn/MulanPSL * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR * PURPOSE. * See the Mulan PSL v1 for more details. * ******************************************************************************************************* */ #include "fm33lc0xx_fl_rcc.h" #include "fm33lc0xx_fl_rmu.h" #include "fm33lc0xx_fl_opa.h" #include "fm33_assert.h" /** @addtogroup FM33LC0XX_FL_Driver * @{ */ /** @addtogroup OPA * @{ */ /* Private macros ------------------------------------------------------------*/ /** @addtogroup OPA_FL_Private_Macros * @{ */ #define IS_OPA_ALL_INSTANCE(INTENCE) (((INTENCE) == OPA1)||\ ((INTENCE) == OPA2)) #define IS_FL_OPA_INP_CHANNAL(__VALUE__) (((__VALUE__) == FL_OPA_INP_SOURCE_INP1)||\ ((__VALUE__) == FL_OPA_INP_SOURCE_INP2)) #define IS_FL_OPA_INN_CHANNAL(__VALUE__) (((__VALUE__) == FL_OPA_INN_SOURCE_INN1)||\ ((__VALUE__) == FL_OPA_INN_SOURCE_INN2)||\ ((__VALUE__) == FL_OPA_INN_SOURCE_VREF)||\ ((__VALUE__) == FL_OPA_INN_SOURCE_THREE_QUARTERS_VREF)||\ ((__VALUE__) == FL_OPA_INN_SOURCE_HALF_VREF)||\ ((__VALUE__) == FL_OPA_INN_SOURCE_ONE_QUARTER_VREF)||\ ((__VALUE__) == FL_OPA_INN_SOURCE_ONE_EIGHTH_VREF)) #define IS_FL_OPA_MODE(__VALUE__) (((__VALUE__) == FL_OPA_MODE_STANDALONE)||\ ((__VALUE__) == FL_OPA_MODE_COMPARATOR)||\ ((__VALUE__) == FL_OPA_MODE_PGA)||\ ((__VALUE__) == FL_OPA_MODE_BUFFER)) #define IS_FL_OPA_DIGITALFILTER(__VALUE__) (((__VALUE__) == FL_DISABLE)||\ ((__VALUE__) == FL_ENABLE)) #define IS_FL_OPA_NEGTIVE_TO_PIN(__VALUE__) (((__VALUE__) == FL_DISABLE)||\ ((__VALUE__) == FL_ENABLE)) #define IS_FL_OPA_LOW_POWER_MODE(__VALUE__) (((__VALUE__) == FL_DISABLE)||\ ((__VALUE__) == FL_ENABLE)) #define IS_FL_OPA_GAIN(__VALUE__) (((__VALUE__) == FL_OPA_GAIN_X2)||\ ((__VALUE__) == FL_OPA_GAIN_X4)||\ ((__VALUE__) == FL_OPA_GAIN_X8)||\ ((__VALUE__) == FL_OPA_GAIN_X16)) /** * @} */ /** @addtogroup OPA_FL_EF_Init * @{ */ /** * @brief 复位OPA * @param OPAx 外设入口地址 * @retval 错误状态,可能值: * -FL_PASS 外设寄存器值恢复复位值 * -FL_FAIL 未成功执行 */ FL_ErrorStatus FL_OPA_DeInit(OPA_Type *OPAx) { /* 入口参数合法性断言 */ assert_param(IS_OPA_ALL_INSTANCE(OPAx)); /* 使能外设复位 */ FL_RCC_EnablePeripheralReset(); /* 复位外设寄存器 */ FL_RCC_EnableResetAPB2Peripheral(FL_RCC_RSTAPB_OPA); FL_RCC_DisableResetAPB2Peripheral(FL_RCC_RSTAPB_OPA); /* 关闭外设总线时钟和工作时钟 */ FL_RCC_DisableGroup1BusClock(FL_RCC_GROUP1_BUSCLK_ANAC); /* 锁定外设复位 */ FL_RCC_DisablePeripheralReset(); return FL_PASS; } /** * @brief 配置OPA * * @param OPAx 外设入口地址 * @param initStruct 指向 @ref FL_OPA_InitTypeDef 结构体的指针 * * @retval 错误状态,可能值: * -FL_PASS 配置成功 * -FL_FAIL 配置过程发生错误 */ FL_ErrorStatus FL_OPA_Init(OPA_Type *OPAx, FL_OPA_InitTypeDef *initStruct) { FL_ErrorStatus status = FL_PASS; /* 入口参数检查 */ assert_param(IS_OPA_ALL_INSTANCE(OPAx)); assert_param(IS_FL_OPA_INP_CHANNAL(initStruct->INP)); assert_param(IS_FL_OPA_INN_CHANNAL(initStruct->INN)); assert_param(IS_FL_OPA_MODE(initStruct->mode)); assert_param(IS_FL_OPA_DIGITALFILTER(initStruct->digitalFilter)); assert_param(IS_FL_OPA_NEGTIVE_TO_PIN(initStruct->negtiveToPin)); assert_param(IS_FL_OPA_LOW_POWER_MODE(initStruct->lowPowermode)); assert_param(IS_FL_OPA_GAIN(initStruct->gain)); /*总线时钟使能*/ FL_RCC_EnableGroup1BusClock(FL_RCC_GROUP1_BUSCLK_ANAC); /*配置模式*/ FL_OPA_SetMode(OPAx, initStruct->mode); /*配置反向输入*/ FL_OPA_SetINNSource(OPAx, initStruct->INN); /*配置正向输入*/ FL_OPA_SetINPSource(OPAx, initStruct->INP); /*配置低功耗模式*/ if(initStruct->lowPowermode == FL_ENABLE) { FL_OPA_EnableLowPowerMode(OPAx); } if(initStruct->mode == FL_OPA_MODE_PGA) { /*配置PGA增益*/ FL_OPA_PGA_SetGain(OPAx, initStruct->gain); if(initStruct->negtiveToPin == FL_ENABLE) { /*配置PGA模式下反向输入端接到PIN*/ FL_OPA_PGA_EnableINNConnectToPin(OPAx); } } return status; } /** * @brief 将 @ref FL_OPA_InitTypeDef 结构体初始化为默认配置 * @param initStruct 指向 @ref FL_OPA_InitTypeDef 结构体的指针 * * @retval None */ void FL_OPA_StructInit(FL_OPA_InitTypeDef *initStruct) { initStruct->INP = FL_OPA_INP_SOURCE_INP1; initStruct->INN = FL_OPA_INN_SOURCE_INN1; initStruct->mode = FL_OPA_MODE_STANDALONE; initStruct->digitalFilter = FL_DISABLE; initStruct->negtiveToPin = FL_DISABLE; initStruct->gain = FL_OPA_GAIN_X2; initStruct->lowPowermode = FL_DISABLE; } /** *@} */ /** *@} */ /** *@} */ /******************************************* END OF FILE *******************************************/
862183.c
/* * Copyright (c) 2018-2021, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause * */ #include <stdbool.h> #include <stdio.h> #include "security_defs.h" #include "tfm_arch.h" #include "tfm_secure_api.h" #include "tfm_api.h" #include "tfm_svcalls.h" #include "utilities.h" /* * Use assembly to: * - Explicit stack usage to perform re-entrant detection. * - SVC here to take hardware context management advantages. */ __tfm_psa_secure_gateway_attributes__ uint32_t tfm_psa_framework_version_veneer(void) { __ASM volatile( #if !defined(__ARM_ARCH_8_1M_MAIN__) " ldr r2, [sp] \n" " ldr r3, ="M2S(STACK_SEAL_PATTERN)" \n" " cmp r2, r3 \n" " bne reent_panic1 \n" #endif " svc %0 \n" " bxns lr \n" #if !defined(__ARM_ARCH_8_1M_MAIN__) "reent_panic1: \n" " svc %1 \n" " b . \n" #endif : : "I" (TFM_SVC_PSA_FRAMEWORK_VERSION), "I" (TFM_SVC_PSA_PANIC)); } __tfm_psa_secure_gateway_attributes__ uint32_t tfm_psa_version_veneer(uint32_t sid) { __ASM volatile( #if !defined(__ARM_ARCH_8_1M_MAIN__) " ldr r2, [sp] \n" " ldr r3, ="M2S(STACK_SEAL_PATTERN)" \n" " cmp r2, r3 \n" " bne reent_panic2 \n" #endif " svc %0 \n" " bxns lr \n" #if !defined(__ARM_ARCH_8_1M_MAIN__) "reent_panic2: \n" " svc %1 \n" " b . \n" #endif : : "I" (TFM_SVC_PSA_VERSION), "I" (TFM_SVC_PSA_PANIC)); } __tfm_psa_secure_gateway_attributes__ psa_handle_t tfm_psa_connect_veneer(uint32_t sid, uint32_t version) { __ASM volatile( #if !defined(__ARM_ARCH_8_1M_MAIN__) " ldr r2, [sp] \n" " ldr r3, ="M2S(STACK_SEAL_PATTERN)" \n" " cmp r2, r3 \n" " bne reent_panic3 \n" #endif " svc %0 \n" " bxns lr \n" #if !defined(__ARM_ARCH_8_1M_MAIN__) "reent_panic3: \n" " svc %1 \n" " b . \n" #endif : : "I" (TFM_SVC_PSA_CONNECT), "I" (TFM_SVC_PSA_PANIC)); } __tfm_psa_secure_gateway_attributes__ psa_status_t tfm_psa_call_veneer(psa_handle_t handle, uint32_t ctrl_param, const psa_invec *in_vec, psa_outvec *out_vec) { __ASM volatile( #if !defined(__ARM_ARCH_8_1M_MAIN__) " push {r2, r3} \n" " ldr r2, [sp, #8] \n" " ldr r3, ="M2S(STACK_SEAL_PATTERN)" \n" " cmp r2, r3 \n" " bne reent_panic4 \n" " pop {r2, r3} \n" #endif " svc %0 \n" " bxns lr \n" #if !defined(__ARM_ARCH_8_1M_MAIN__) "reent_panic4: \n" " svc %1 \n" " b . \n" #endif : : "I" (TFM_SVC_PSA_CALL), "I" (TFM_SVC_PSA_PANIC)); } __tfm_psa_secure_gateway_attributes__ void tfm_psa_close_veneer(psa_handle_t handle) { __ASM volatile( #if !defined(__ARM_ARCH_8_1M_MAIN__) " ldr r2, [sp] \n" " ldr r3, ="M2S(STACK_SEAL_PATTERN)" \n" " cmp r2, r3 \n" " bne reent_panic5 \n" #endif " svc %0 \n" " bxns lr \n" #if !defined(__ARM_ARCH_8_1M_MAIN__) "reent_panic5: \n" " svc %1 \n" " b . \n" #endif : : "I" (TFM_SVC_PSA_CLOSE), "I" (TFM_SVC_PSA_PANIC)); }
756167.c
/* Hack to hide an <object>NULL from Cython. */ #include "Python.h" void unset_trace() { PyEval_SetTrace(NULL, NULL); }
516335.c
#include <stdio.h> #include <signal.h> #include <assert.h> #include <ieeefp.h> #include <float.h> void sigfpe(); volatile sig_atomic_t signal_cought; static volatile const double one = 1.0; static volatile const double zero = 0.0; static volatile const double huge = DBL_MAX; static volatile const double tiny = DBL_MIN; main() { volatile double x; /* * check to make sure that all exceptions are masked and * that the accumulated exception status is clear. */ assert(fpgetmask() == 0); assert(fpgetsticky() == 0); /* set up signal handler */ signal (SIGFPE, sigfpe); signal_cought = 0; /* trip divide by zero */ x = one / zero; assert (fpgetsticky() & FP_X_DZ); assert (signal_cought == 0); fpsetsticky(0); /* trip invalid operation */ x = zero / zero; assert (fpgetsticky() & FP_X_INV); assert (signal_cought == 0); fpsetsticky(0); /* trip overflow */ x = huge * huge; assert (fpgetsticky() & FP_X_OFL); assert (signal_cought == 0); fpsetsticky(0); /* trip underflow */ x = tiny * tiny; assert (fpgetsticky() & FP_X_UFL); assert (signal_cought == 0); fpsetsticky(0); #if 1 /* unmask and then trip divide by zero */ fpsetmask(FP_X_DZ); x = one / zero; assert (signal_cought == 1); signal_cought = 0; /* unmask and then trip invalid operation */ fpsetmask(FP_X_INV); x = zero / zero; assert (signal_cought == 1); signal_cought = 0; /* unmask and then trip overflow */ fpsetmask(FP_X_OFL); x = huge * huge; assert (signal_cought == 1); signal_cought = 0; /* unmask and then trip underflow */ fpsetmask(FP_X_UFL); x = tiny * tiny; assert (signal_cought == 1); signal_cought = 0; #endif exit(0); } void sigfpe() { signal_cought = 1; }
61345.c
/* --COPYRIGHT--,BSD * Copyright (c) 2015, Texas Instruments Incorporated * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of Texas Instruments Incorporated nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * --/COPYRIGHT--*/ //! \file drivers/drvic/drv8301/src/32b/f28x/f2802x/drv8301.c //! \brief Contains the various functions related to the DRV8301 object //! //! (C) Copyright 2015, Texas Instruments, Inc. // ************************************************************************** // the includes #include <math.h> // drivers #include "sw/drivers/drvic/drv8301/src/32b/f28x/f2802x/drv8301.h" // modules // platforms // ************************************************************************** // the defines // ************************************************************************** // the globals // ************************************************************************** // the function prototypes void DRV8301_enable(DRV8301_Handle handle) { DRV8301_Obj *obj = (DRV8301_Obj *)handle; static volatile uint16_t enableWaitTimeOut; uint16_t n = 0; // Enable the drv8301 GPIO_setHigh(obj->gpioHandle,obj->gpioNumber); enableWaitTimeOut = 0; // Make sure the Fault bit is not set during startup while(((DRV8301_readSpi(handle,DRV8301_RegName_Status_1) & DRV8301_STATUS1_FAULT_BITS) != 0) && (enableWaitTimeOut < 1000)) { if(++enableWaitTimeOut > 999) { obj->enableTimeOut = true; } } // Wait for the DRV8301 registers to update for(n=0;n<0xffff;n++) asm(" NOP"); return; } DRV8301_DcCalMode_e DRV8301_getDcCalMode(DRV8301_Handle handle,const DRV8301_ShuntAmpNumber_e ampNumber) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits if(ampNumber == DRV8301_ShuntAmpNumber_1) { data &= (~DRV8301_CTRL2_DC_CAL_1_BITS); } else if(ampNumber == DRV8301_ShuntAmpNumber_2) { data &= (~DRV8301_CTRL2_DC_CAL_2_BITS); } return((DRV8301_DcCalMode_e)data); } // end of DRV8301_getDcCalMode() function DRV8301_FaultType_e DRV8301_getFaultType(DRV8301_Handle handle) { DRV8301_Word_t readWord; DRV8301_FaultType_e faultType = DRV8301_FaultType_NoFault; // read the data readWord = DRV8301_readSpi(handle,DRV8301_RegName_Status_1); if(readWord & DRV8301_STATUS1_FAULT_BITS) { faultType = (DRV8301_FaultType_e)(readWord & DRV8301_FAULT_TYPE_MASK); if(faultType == DRV8301_FaultType_NoFault) { // read the data readWord = DRV8301_readSpi(handle,DRV8301_RegName_Status_2); if(readWord & DRV8301_STATUS2_GVDD_OV_BITS) { faultType = DRV8301_FaultType_GVDD_OV; } } } return(faultType); } // end of DRV8301_getFaultType() function uint16_t DRV8301_getId(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Status_2); // mask bits data &= DRV8301_STATUS2_ID_BITS; return(data); } // end of DRV8301_getId() function DRV8301_VdsLevel_e DRV8301_getOcLevel(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_OC_ADJ_SET_BITS); return((DRV8301_VdsLevel_e)data); } // end of DRV8301_getOcLevel() function DRV8301_OcMode_e DRV8301_getOcMode(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_OC_MODE_BITS); return((DRV8301_OcMode_e)data); } // end of DRV8301_getOcMode() function DRV8301_OcOffTimeMode_e DRV8301_getOcOffTimeMode(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits data &= (~DRV8301_CTRL2_OC_TOFF_BITS); return((DRV8301_OcOffTimeMode_e)data); } // end of DRV8301_getOcOffTimeMode() function DRV8301_OcTwMode_e DRV8301_getOcTwMode(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits data &= (~DRV8301_CTRL2_OCTW_SET_BITS); return((DRV8301_OcTwMode_e)data); } // end of DRV8301_getOcTwMode() function DRV8301_PeakCurrent_e DRV8301_getPeakCurrent(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_GATE_CURRENT_BITS); return((DRV8301_PeakCurrent_e)data); } // end of DRV8301_getPeakCurrent() function DRV8301_PwmMode_e DRV8301_getPwmMode(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_PWM_MODE_BITS); return((DRV8301_PwmMode_e)data); } // end of DRV8301_getPwmMode() function DRV8301_ShuntAmpGain_e DRV8301_getShuntAmpGain(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits data &= (~DRV8301_CTRL2_GAIN_BITS); return((DRV8301_ShuntAmpGain_e)data); } // end of DRV8301_getShuntAmpGain() function DRV8301_Handle DRV8301_init(void *pMemory,const size_t numBytes) { DRV8301_Handle handle; if(numBytes < sizeof(DRV8301_Obj)) return((DRV8301_Handle)NULL); // assign the handle handle = (DRV8301_Handle)pMemory; DRV8301_resetRxTimeout(handle); DRV8301_resetEnableTimeout(handle); return(handle); } // end of DRV8301_init() function void DRV8301_setGpioHandle(DRV8301_Handle handle,GPIO_Handle gpioHandle) { DRV8301_Obj *obj = (DRV8301_Obj *)handle; // initialize the gpio interface object obj->gpioHandle = gpioHandle; return; } // end of DRV8301_setGpioHandle() function void DRV8301_setGpioNumber(DRV8301_Handle handle,GPIO_Number_e gpioNumber) { DRV8301_Obj *obj = (DRV8301_Obj *)handle; // initialize the gpio interface object obj->gpioNumber = gpioNumber; return; } // end of DRV8301_setGpioNumber() function void DRV8301_setSpiHandle(DRV8301_Handle handle,SPI_Handle spiHandle) { DRV8301_Obj *obj = (DRV8301_Obj *)handle; // initialize the serial peripheral interface object obj->spiHandle = spiHandle; return; } // end of DRV8301_setSpiHandle() function bool DRV8301_isFault(DRV8301_Handle handle) { DRV8301_Word_t readWord; bool status=false; // read the data readWord = DRV8301_readSpi(handle,DRV8301_RegName_Status_1); if(readWord & DRV8301_STATUS1_FAULT_BITS) { status = true; } return(status); } // end of DRV8301_isFault() function bool DRV8301_isReset(DRV8301_Handle handle) { DRV8301_Word_t readWord; bool status=false; // read the data readWord = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); if(readWord & DRV8301_CTRL1_GATE_RESET_BITS) { status = true; } return(status); } // end of DRV8301_isReset() function uint16_t DRV8301_readSpi(DRV8301_Handle handle,const DRV8301_RegName_e regName) { DRV8301_Obj *obj = (DRV8301_Obj *)handle; uint16_t ctrlWord; const uint16_t data = 0; volatile uint16_t readWord; static volatile uint16_t WaitTimeOut = 0; volatile SPI_FifoStatus_e RxFifoCnt = SPI_FifoStatus_Empty; // build the control word ctrlWord = (uint16_t)DRV8301_buildCtrlWord(DRV8301_CtrlMode_Read,regName,data); // reset the Rx fifo pointer to zero SPI_resetRxFifo(obj->spiHandle); SPI_enableRxFifo(obj->spiHandle); // write the command SPI_write(obj->spiHandle,ctrlWord); // dummy write to return the reply from the 8301 SPI_write(obj->spiHandle,0x0000); // wait for two words to populate the RX fifo, or a wait timeout will occur while((RxFifoCnt < SPI_FifoStatus_2_Words) && (WaitTimeOut < 0xffff)) { RxFifoCnt = SPI_getRxFifoStatus(obj->spiHandle); if(++WaitTimeOut > 0xfffe) { obj->RxTimeOut = true; } } // Read two words, the dummy word and the data readWord = SPI_readEmu(obj->spiHandle); readWord = SPI_readEmu(obj->spiHandle); return(readWord & DRV8301_DATA_MASK); } // end of DRV8301_readSpi() function void DRV8301_reset(DRV8301_Handle handle) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // set the bits data |= DRV8301_CTRL1_GATE_RESET_BITS; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_1,data); return; } // end of DRV8301_reset() function void DRV8301_setDcCalMode(DRV8301_Handle handle,const DRV8301_ShuntAmpNumber_e ampNumber,const DRV8301_DcCalMode_e mode) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits if(ampNumber == DRV8301_ShuntAmpNumber_1) { data &= (~DRV8301_CTRL2_DC_CAL_1_BITS); } else if(ampNumber == DRV8301_ShuntAmpNumber_2) { data &= (~DRV8301_CTRL2_DC_CAL_2_BITS); } // set the bits data |= mode; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_2,data); return; } // end of DRV8301_setDcCalMode() function void DRV8301_setOcLevel(DRV8301_Handle handle,const DRV8301_VdsLevel_e VdsLevel) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_OC_ADJ_SET_BITS); // set the bits data |= VdsLevel; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_1,data); return; } // end of DRV8301_setOcLevel() function void DRV8301_setOcMode(DRV8301_Handle handle,const DRV8301_OcMode_e mode) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_OC_MODE_BITS); // set the bits data |= mode; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_1,data); return; } // end of DRV8301_setOcMode() function void DRV8301_setOcOffTimeMode(DRV8301_Handle handle,const DRV8301_OcOffTimeMode_e mode) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits data &= (~DRV8301_CTRL2_OC_TOFF_BITS); // set the bits data |= mode; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_2,data); return; } // end of DRV8301_setOcOffTimeMode() function void DRV8301_setOcTwMode(DRV8301_Handle handle,const DRV8301_OcTwMode_e mode) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits data &= (~DRV8301_CTRL2_OCTW_SET_BITS); // set the bits data |= mode; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_2,data); return; } // end of DRV8301_setOcTwMode() function void DRV8301_setPeakCurrent(DRV8301_Handle handle,const DRV8301_PeakCurrent_e peakCurrent) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_GATE_CURRENT_BITS); // set the bits data |= peakCurrent; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_1,data); return; } // end of DRV8301_setPeakCurrent() function void DRV8301_setPwmMode(DRV8301_Handle handle,const DRV8301_PwmMode_e mode) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_1); // clear the bits data &= (~DRV8301_CTRL1_PWM_MODE_BITS); // set the bits data |= mode; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_1,data); return; } // end of DRV8301_setPwmMode() function void DRV8301_setShuntAmpGain(DRV8301_Handle handle,const DRV8301_ShuntAmpGain_e gain) { uint16_t data; // read data data = DRV8301_readSpi(handle,DRV8301_RegName_Control_2); // clear the bits data &= (~DRV8301_CTRL2_GAIN_BITS); // set the bits data |= gain; // write the data DRV8301_writeSpi(handle,DRV8301_RegName_Control_2,data); return; } // end of DRV8301_setShuntAmpGain() function void DRV8301_writeSpi(DRV8301_Handle handle, const DRV8301_RegName_e regName,const uint16_t data) { DRV8301_Obj *obj = (DRV8301_Obj *)handle; uint16_t ctrlWord; // build the command ctrlWord = (uint16_t)DRV8301_buildCtrlWord(DRV8301_CtrlMode_Write,regName,data); // reset the Rx fifo pointer to zero SPI_resetRxFifo(obj->spiHandle); SPI_enableRxFifo(obj->spiHandle); // write the command (time N) SPI_write(obj->spiHandle,ctrlWord); return; } // end of DRV8301_writeSpi() function void DRV8301_writeData(DRV8301_Handle handle, DRV_SPI_8301_Vars_t *Spi_8301_Vars) { DRV8301_RegName_e drvRegName; uint16_t drvDataNew; if(Spi_8301_Vars->SndCmd) { // Update Control Register 1 drvRegName = DRV8301_RegName_Control_1; drvDataNew = Spi_8301_Vars->Ctrl_Reg_1.DRV8301_CURRENT | \ Spi_8301_Vars->Ctrl_Reg_1.DRV8301_RESET | \ Spi_8301_Vars->Ctrl_Reg_1.PWM_MODE | \ Spi_8301_Vars->Ctrl_Reg_1.OC_MODE | \ Spi_8301_Vars->Ctrl_Reg_1.OC_ADJ_SET; DRV8301_writeSpi(handle,drvRegName,drvDataNew); // Update Control Register 2 drvRegName = DRV8301_RegName_Control_2; drvDataNew = Spi_8301_Vars->Ctrl_Reg_2.OCTW_SET | \ Spi_8301_Vars->Ctrl_Reg_2.GAIN | \ Spi_8301_Vars->Ctrl_Reg_2.DC_CAL_CH1p2 | \ Spi_8301_Vars->Ctrl_Reg_2.OC_TOFF; DRV8301_writeSpi(handle,drvRegName,drvDataNew); Spi_8301_Vars->SndCmd = false; } return; } // end of DRV8301_writeData() function void DRV8301_readData(DRV8301_Handle handle, DRV_SPI_8301_Vars_t *Spi_8301_Vars) { DRV8301_RegName_e drvRegName; uint16_t drvDataNew; if(Spi_8301_Vars->RcvCmd) { // Update Status Register 1 drvRegName = DRV8301_RegName_Status_1; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Stat_Reg_1.FAULT = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FAULT_BITS); Spi_8301_Vars->Stat_Reg_1.GVDD_UV = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_GVDD_UV_BITS); Spi_8301_Vars->Stat_Reg_1.PVDD_UV = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_PVDD_UV_BITS); Spi_8301_Vars->Stat_Reg_1.OTSD = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_OTSD_BITS); Spi_8301_Vars->Stat_Reg_1.OTW = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_OTW_BITS); Spi_8301_Vars->Stat_Reg_1.FETHA_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETHA_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETLA_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETLA_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETHB_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETHB_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETLB_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETLB_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETHC_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETHC_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETLC_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETLC_OC_BITS); // Update Status Register 2 drvRegName = DRV8301_RegName_Status_2; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Stat_Reg_2.GVDD_OV = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS2_GVDD_OV_BITS); Spi_8301_Vars->Stat_Reg_2.DeviceID = (uint16_t)(drvDataNew & (uint16_t)DRV8301_STATUS2_ID_BITS); // Update Control Register 1 drvRegName = DRV8301_RegName_Control_1; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Ctrl_Reg_1.DRV8301_CURRENT = (DRV8301_PeakCurrent_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_GATE_CURRENT_BITS); Spi_8301_Vars->Ctrl_Reg_1.DRV8301_RESET = (DRV8301_Reset_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_GATE_RESET_BITS); Spi_8301_Vars->Ctrl_Reg_1.PWM_MODE = (DRV8301_PwmMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_PWM_MODE_BITS); Spi_8301_Vars->Ctrl_Reg_1.OC_MODE = (DRV8301_OcMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_OC_MODE_BITS); Spi_8301_Vars->Ctrl_Reg_1.OC_ADJ_SET = (DRV8301_VdsLevel_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_OC_ADJ_SET_BITS); // Update Control Register 2 drvRegName = DRV8301_RegName_Control_2; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Ctrl_Reg_2.OCTW_SET = (DRV8301_OcTwMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL2_OCTW_SET_BITS); Spi_8301_Vars->Ctrl_Reg_2.GAIN = (DRV8301_ShuntAmpGain_e)(drvDataNew & (uint16_t)DRV8301_CTRL2_GAIN_BITS); Spi_8301_Vars->Ctrl_Reg_2.DC_CAL_CH1p2 = (DRV8301_DcCalMode_e)(drvDataNew & (uint16_t)(DRV8301_CTRL2_DC_CAL_1_BITS | DRV8301_CTRL2_DC_CAL_2_BITS)); Spi_8301_Vars->Ctrl_Reg_2.OC_TOFF = (DRV8301_OcOffTimeMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL2_OC_TOFF_BITS); Spi_8301_Vars->RcvCmd = false; } return; } // end of DRV8301_readData() function void DRV8301_setupSpi(DRV8301_Handle handle, DRV_SPI_8301_Vars_t *Spi_8301_Vars) { DRV8301_RegName_e drvRegName; uint16_t drvDataNew; uint16_t n; // Update Control Register 1 drvRegName = DRV8301_RegName_Control_1; drvDataNew = (DRV8301_PeakCurrent_0p25_A | \ DRV8301_Reset_Normal | \ DRV8301_PwmMode_Six_Inputs | \ DRV8301_OcMode_CurrentLimit | \ DRV8301_VdsLevel_0p730_V); DRV8301_writeSpi(handle,drvRegName,drvDataNew); // Update Control Register 2 drvRegName = DRV8301_RegName_Control_2; drvDataNew = (DRV8301_OcTwMode_Both | \ DRV8301_ShuntAmpGain_10VpV | \ DRV8301_DcCalMode_Ch1_Load | \ DRV8301_DcCalMode_Ch2_Load | \ DRV8301_OcOffTimeMode_Normal); DRV8301_writeSpi(handle,drvRegName,drvDataNew); Spi_8301_Vars->SndCmd = false; Spi_8301_Vars->RcvCmd = false; // Wait for the DRV8301 registers to update for(n=0;n<100;n++) asm(" NOP"); // Update Status Register 1 drvRegName = DRV8301_RegName_Status_1; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Stat_Reg_1.FAULT = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FAULT_BITS); Spi_8301_Vars->Stat_Reg_1.GVDD_UV = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_GVDD_UV_BITS); Spi_8301_Vars->Stat_Reg_1.PVDD_UV = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_PVDD_UV_BITS); Spi_8301_Vars->Stat_Reg_1.OTSD = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_OTSD_BITS); Spi_8301_Vars->Stat_Reg_1.OTW = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_OTW_BITS); Spi_8301_Vars->Stat_Reg_1.FETHA_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETHA_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETLA_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETLA_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETHB_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETHB_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETLB_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETLB_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETHC_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETHC_OC_BITS); Spi_8301_Vars->Stat_Reg_1.FETLC_OC = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS1_FETLC_OC_BITS); // Update Status Register 2 drvRegName = DRV8301_RegName_Status_2; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Stat_Reg_2.GVDD_OV = (bool)(drvDataNew & (uint16_t)DRV8301_STATUS2_GVDD_OV_BITS); Spi_8301_Vars->Stat_Reg_2.DeviceID = (uint16_t)(drvDataNew & (uint16_t)DRV8301_STATUS2_ID_BITS); // Update Control Register 1 drvRegName = DRV8301_RegName_Control_1; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Ctrl_Reg_1.DRV8301_CURRENT = (DRV8301_PeakCurrent_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_GATE_CURRENT_BITS); Spi_8301_Vars->Ctrl_Reg_1.DRV8301_RESET = (DRV8301_Reset_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_GATE_RESET_BITS); Spi_8301_Vars->Ctrl_Reg_1.PWM_MODE = (DRV8301_PwmMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_PWM_MODE_BITS); Spi_8301_Vars->Ctrl_Reg_1.OC_MODE = (DRV8301_OcMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_OC_MODE_BITS); Spi_8301_Vars->Ctrl_Reg_1.OC_ADJ_SET = (DRV8301_VdsLevel_e)(drvDataNew & (uint16_t)DRV8301_CTRL1_OC_ADJ_SET_BITS); // Update Control Register 2 drvRegName = DRV8301_RegName_Control_2; drvDataNew = DRV8301_readSpi(handle,drvRegName); Spi_8301_Vars->Ctrl_Reg_2.OCTW_SET = (DRV8301_OcTwMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL2_OCTW_SET_BITS); Spi_8301_Vars->Ctrl_Reg_2.GAIN = (DRV8301_ShuntAmpGain_e)(drvDataNew & (uint16_t)DRV8301_CTRL2_GAIN_BITS); Spi_8301_Vars->Ctrl_Reg_2.DC_CAL_CH1p2 = (DRV8301_DcCalMode_e)(drvDataNew & (uint16_t)(DRV8301_CTRL2_DC_CAL_1_BITS | DRV8301_CTRL2_DC_CAL_2_BITS)); Spi_8301_Vars->Ctrl_Reg_2.OC_TOFF = (DRV8301_OcOffTimeMode_e)(drvDataNew & (uint16_t)DRV8301_CTRL2_OC_TOFF_BITS); return; } // end of file
46403.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic driver for NXP NCI NFC chips * * Copyright (C) 2014 NXP Semiconductors All rights reserved. * * Authors: Clément Perrochaud <[email protected]> * * Derived from PN544 device driver: * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/nci_core.h> #include "nxp-nci.h" #define NXP_NCI_HDR_LEN 4 #define NXP_NCI_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | \ NFC_PROTO_NFC_DEP_MASK) static int nxp_nci_open(struct nci_dev *ndev) { struct nxp_nci_info *info = nci_get_drvdata(ndev); int r = 0; mutex_lock(&info->info_lock); if (info->mode != NXP_NCI_MODE_COLD) { r = -EBUSY; goto open_exit; } if (info->phy_ops->set_mode) r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_NCI); info->mode = NXP_NCI_MODE_NCI; open_exit: mutex_unlock(&info->info_lock); return r; } static int nxp_nci_close(struct nci_dev *ndev) { struct nxp_nci_info *info = nci_get_drvdata(ndev); int r = 0; mutex_lock(&info->info_lock); if (info->phy_ops->set_mode) r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); info->mode = NXP_NCI_MODE_COLD; mutex_unlock(&info->info_lock); return r; } static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct nxp_nci_info *info = nci_get_drvdata(ndev); int r; if (!info->phy_ops->write) { r = -ENOTSUPP; goto send_exit; } if (info->mode != NXP_NCI_MODE_NCI) { r = -EINVAL; goto send_exit; } r = info->phy_ops->write(info->phy_id, skb); if (r < 0) kfree_skb(skb); send_exit: return r; } static struct nci_ops nxp_nci_ops = { .open = nxp_nci_open, .close = nxp_nci_close, .send = nxp_nci_send, .fw_download = nxp_nci_fw_download, }; int nxp_nci_probe(void *phy_id, struct device *pdev, const struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload, struct nci_dev **ndev) { struct nxp_nci_info *info; int r; info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL); if (!info) { r = -ENOMEM; goto probe_exit; } info->phy_id = phy_id; info->pdev = pdev; info->phy_ops = phy_ops; info->max_payload = max_payload; INIT_WORK(&info->fw_info.work, nxp_nci_fw_work); init_completion(&info->fw_info.cmd_completion); mutex_init(&info->info_lock); if (info->phy_ops->set_mode) { r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); if (r < 0) goto probe_exit; } info->mode = NXP_NCI_MODE_COLD; info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS, NXP_NCI_HDR_LEN, 0); if (!info->ndev) { r = -ENOMEM; goto probe_exit; } nci_set_parent_dev(info->ndev, pdev); nci_set_drvdata(info->ndev, info); r = nci_register_device(info->ndev); if (r < 0) goto probe_exit_free_nci; *ndev = info->ndev; goto probe_exit; probe_exit_free_nci: nci_free_device(info->ndev); probe_exit: return r; } EXPORT_SYMBOL(nxp_nci_probe); void nxp_nci_remove(struct nci_dev *ndev) { struct nxp_nci_info *info = nci_get_drvdata(ndev); if (info->mode == NXP_NCI_MODE_FW) nxp_nci_fw_work_complete(info, -ESHUTDOWN); cancel_work_sync(&info->fw_info.work); mutex_lock(&info->info_lock); if (info->phy_ops->set_mode) info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); nci_unregister_device(ndev); nci_free_device(ndev); mutex_unlock(&info->info_lock); } EXPORT_SYMBOL(nxp_nci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NXP NCI NFC driver"); MODULE_AUTHOR("Clément Perrochaud <[email protected]>");
531473.c
/* * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The OpenAirInterface Software Alliance licenses this file to You under * the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------------------- * For more information about the OpenAirInterface (OAI) Software Alliance: * [email protected] */ /*! \file mme_app_location.c \brief \author Sebastien ROUX, Lionel GAUTHIER \version 1.0 \company Eurecom \email: [email protected] */ #include <stdio.h> #include <string.h> #include <stdbool.h> #include <stdint.h> #include <inttypes.h> #include "bstrlib.h" #include "log.h" #include "msc.h" #include "assertions.h" #include "common_types.h" #include "conversions.h" #include "intertask_interface.h" #include "common_defs.h" #include "mme_config.h" #include "mme_app_ue_context.h" #include "mme_app_defs.h" #include "timer.h" #include "3gpp_23.003.h" #include "3gpp_36.401.h" #include "TrackingAreaIdentity.h" #include "emm_data.h" #include "intertask_interface_types.h" #include "itti_types.h" #include "mme_app_desc.h" #include "nas_messages_types.h" #include "s6a_messages_types.h" #include "service303.h" #include "sgs_messages_types.h" #include "esm_proc.h" //------------------------------------------------------------------------------ int mme_app_send_s6a_update_location_req( struct ue_mm_context_s *const ue_context_p) { OAILOG_FUNC_IN(LOG_MME_APP); MessageDef *message_p = NULL; s6a_update_location_req_t *s6a_ulr_p = NULL; int rc = RETURNok; OAILOG_INFO( TASK_MME_APP, "Sending S6A UPDATE LOCATION REQ to S6A, ue_id = %u\n", ue_context_p->mme_ue_s1ap_id); message_p = itti_alloc_new_message(TASK_MME_APP, S6A_UPDATE_LOCATION_REQ); if (message_p == NULL) { OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } s6a_ulr_p = &message_p->ittiMsg.s6a_update_location_req; memset((void *) s6a_ulr_p, 0, sizeof(s6a_update_location_req_t)); IMSI64_TO_STRING( (ue_context_p->emm_context._imsi64), s6a_ulr_p->imsi, ue_context_p->emm_context._imsi.length); s6a_ulr_p->imsi_length = strlen(s6a_ulr_p->imsi); s6a_ulr_p->initial_attach = INITIAL_ATTACH; //memcpy(&s6a_ulr_p->visited_plmn, &ue_context_p->e_utran_cgi.plmn, sizeof //(plmn_t)); plmn_t visited_plmn = {0}; visited_plmn.mcc_digit1 = ue_context_p->emm_context.originating_tai.mcc_digit1; visited_plmn.mcc_digit2 = ue_context_p->emm_context.originating_tai.mcc_digit2; visited_plmn.mcc_digit3 = ue_context_p->emm_context.originating_tai.mcc_digit3; visited_plmn.mnc_digit1 = ue_context_p->emm_context.originating_tai.mnc_digit1; visited_plmn.mnc_digit2 = ue_context_p->emm_context.originating_tai.mnc_digit2; visited_plmn.mnc_digit3 = ue_context_p->emm_context.originating_tai.mnc_digit3; memcpy(&s6a_ulr_p->visited_plmn, &visited_plmn, sizeof(plmn_t)); s6a_ulr_p->rat_type = RAT_EUTRAN; OAILOG_DEBUG( TASK_MME_APP, "S6A ULR: RAT TYPE = (%d) for (ue_id = %u)\n", s6a_ulr_p->rat_type, ue_context_p->mme_ue_s1ap_id); /* * Check if we already have UE data * set the skip subscriber data flas as true in case we are sending ULR against recieved HSS Reset */ if (ue_context_p->location_info_confirmed_in_hss == true) { s6a_ulr_p->skip_subscriber_data = 1; OAILOG_DEBUG( TASK_MME_APP, "S6A Location information confirmed in HSS (%d) for (ue_id = %u)\n", ue_context_p->location_info_confirmed_in_hss, ue_context_p->mme_ue_s1ap_id); } else { s6a_ulr_p->skip_subscriber_data = 0; OAILOG_DEBUG( TASK_MME_APP, "S6A Location information not confirmed in HSS (%d) for (ue_id = %u)\n", ue_context_p->location_info_confirmed_in_hss, ue_context_p->mme_ue_s1ap_id); } //Check if we have voice domain preference IE and send to S6a task if(ue_context_p->emm_context.volte_params.presencemask & VOICE_DOMAIN_PREF_UE_USAGE_SETTING) { s6a_ulr_p->voice_dom_pref_ue_usg_setting = ue_context_p->emm_context.volte_params.voice_domain_preference_and_ue_usage_setting; s6a_ulr_p->presencemask |= S6A_PDN_CONFIG_VOICE_DOM_PREF; } MSC_LOG_TX_MESSAGE( MSC_MMEAPP_MME, MSC_S6A_MME, NULL, 0, "0 S6A_UPDATE_LOCATION_REQ imsi %s", s6a_ulr_p->imsi); OAILOG_DEBUG( LOG_MME_APP, "0 S6A_UPDATE_LOCATION_REQ imsi %s with length %d for (ue_id = %u)\n", s6a_ulr_p->imsi, s6a_ulr_p->imsi_length, ue_context_p->mme_ue_s1ap_id); rc = itti_send_msg_to_task(TASK_S6A, INSTANCE_DEFAULT, message_p); /* * Do not start this timer in case we are sending ULR after receiving HSS reset */ if (ue_context_p->location_info_confirmed_in_hss == false) { // Start ULR Response timer if ( timer_setup( ue_context_p->ulr_response_timer.sec, 0, TASK_MME_APP, INSTANCE_DEFAULT, TIMER_ONE_SHOT, (void *) &(ue_context_p->mme_ue_s1ap_id), sizeof(mme_ue_s1ap_id_t), &(ue_context_p->ulr_response_timer.id)) < 0) { OAILOG_ERROR( LOG_MME_APP, "Failed to start Update location update response timer for UE id %d " "\n", ue_context_p->mme_ue_s1ap_id); ue_context_p->ulr_response_timer.id = MME_APP_TIMER_INACTIVE_ID; } else { OAILOG_DEBUG( LOG_MME_APP, "Started location update response timer for UE id %d \n", ue_context_p->mme_ue_s1ap_id); } } OAILOG_FUNC_RETURN(LOG_MME_APP, rc); } int _handle_ula_failure(struct ue_mm_context_s *ue_context_p) { MessageDef *message_p = NULL; int rc = RETURNok; OAILOG_FUNC_IN(LOG_MME_APP); // Stop ULR Response timer if running if (ue_context_p->ulr_response_timer.id != MME_APP_TIMER_INACTIVE_ID) { if (timer_remove(ue_context_p->ulr_response_timer.id, NULL)) { OAILOG_ERROR( LOG_MME_APP, "Failed to stop Update location update response timer for UE id %d \n", ue_context_p->mme_ue_s1ap_id); } ue_context_p->ulr_response_timer.id = MME_APP_TIMER_INACTIVE_ID; } // Send PDN CONNECTIVITY FAIL message to NAS layer increment_counter("mme_s6a_update_location_ans", 1, 1, "result", "failure"); message_p = itti_alloc_new_message(TASK_MME_APP, NAS_PDN_CONNECTIVITY_FAIL); itti_nas_pdn_connectivity_fail_t *nas_pdn_connectivity_fail = &message_p->ittiMsg.nas_pdn_connectivity_fail; memset( (void *) nas_pdn_connectivity_fail, 0, sizeof(itti_nas_pdn_connectivity_fail_t)); if(ue_context_p->emm_context.esm_ctx.esm_proc_data) { nas_pdn_connectivity_fail->pti = ue_context_p->emm_context.esm_ctx. esm_proc_data->pti; } else { OAILOG_ERROR( LOG_MME_APP," esm_proc_data is NULL, so failed to fetch pti \n"); } nas_pdn_connectivity_fail->ue_id = ue_context_p->mme_ue_s1ap_id; nas_pdn_connectivity_fail->cause = CAUSE_SYSTEM_FAILURE; rc = itti_send_msg_to_task(TASK_NAS_MME, INSTANCE_DEFAULT, message_p); OAILOG_FUNC_RETURN(LOG_MME_APP, rc); } //------------------------------------------------------------------------------ int mme_app_handle_s6a_update_location_ans( const s6a_update_location_ans_t *const ula_pP) { OAILOG_FUNC_IN(LOG_MME_APP); uint64_t imsi64 = 0; struct ue_mm_context_s *ue_mm_context = NULL; int rc = RETURNok; DevAssert(ula_pP); IMSI_STRING_TO_IMSI64((char *) ula_pP->imsi, &imsi64); OAILOG_DEBUG( LOG_MME_APP, "Handling imsi " IMSI_64_FMT "\n", imsi64); if ( (ue_mm_context = mme_ue_context_exists_imsi( &mme_app_desc.mme_ue_contexts, imsi64)) == NULL) { OAILOG_ERROR( LOG_MME_APP, "That's embarrassing as we don't know this IMSI\n"); MSC_LOG_EVENT( MSC_MMEAPP_MME, "0 S6A_UPDATE_LOCATION unknown imsi " IMSI_64_FMT " ", imsi64); OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } if (ula_pP->result.present == S6A_RESULT_BASE) { if (ula_pP->result.choice.base != DIAMETER_SUCCESS) { /* * The update location procedure has failed. Notify the NAS layer * and don't initiate the bearer creation on S-GW side. */ OAILOG_ERROR( LOG_MME_APP, "ULR/ULA procedure returned non success (ULA.result.choice.base=%d)\n", ula_pP->result.choice.base); if (_handle_ula_failure(ue_mm_context) == RETURNok) { OAILOG_DEBUG(LOG_MME_APP, "Sent PDN Connectivity failure to NAS for ue_id (%u)\n", ue_mm_context->mme_ue_s1ap_id); unlock_ue_contexts(ue_mm_context); OAILOG_FUNC_RETURN(LOG_MME_APP, rc); } else { OAILOG_ERROR( LOG_MME_APP, "Failed to send PDN Connectivity failure to NAS for ue_id (%u)\n", ue_mm_context->mme_ue_s1ap_id); unlock_ue_contexts(ue_mm_context); OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } } } else { /* * The update location procedure has failed. Notify the NAS layer * and don't initiate the bearer creation on S-GW side. */ OAILOG_ERROR( LOG_MME_APP, "ULR/ULA procedure returned non success (ULA.result.present=%d)\n", ula_pP->result.present); if (_handle_ula_failure(ue_mm_context) == RETURNok) { OAILOG_DEBUG(LOG_MME_APP, "Sent PDN Connectivity failure to NAS for ue_id (%u)\n", ue_mm_context->mme_ue_s1ap_id); unlock_ue_contexts(ue_mm_context); OAILOG_FUNC_RETURN(LOG_MME_APP, rc); } else { OAILOG_ERROR( LOG_MME_APP, "Failed to send PDN Connectivity failure to NAS for ue_id (%u)\n", ue_mm_context->mme_ue_s1ap_id); unlock_ue_contexts(ue_mm_context); OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } } // Stop ULR Response timer if running if (ue_mm_context->ulr_response_timer.id != MME_APP_TIMER_INACTIVE_ID) { if (timer_remove(ue_mm_context->ulr_response_timer.id, NULL)) { OAILOG_ERROR( LOG_MME_APP, "Failed to stop Update location update response timer for UE id %d \n", ue_mm_context->mme_ue_s1ap_id); } ue_mm_context->ulr_response_timer.id = MME_APP_TIMER_INACTIVE_ID; } ue_mm_context->subscription_known = SUBSCRIPTION_KNOWN; ue_mm_context->sub_status = ula_pP->subscription_data.subscriber_status; ue_mm_context->access_restriction_data = ula_pP->subscription_data.access_restriction; /* * Copy the subscribed ambr to the sgw create session request message */ memcpy( &ue_mm_context->subscribed_ue_ambr, &ula_pP->subscription_data.subscribed_ambr, sizeof(ambr_t)); OAILOG_DEBUG( LOG_MME_APP, "Received UL rate %" PRIu64 " and DL rate %" PRIu64 "\n", ue_mm_context->subscribed_ue_ambr.br_ul, ue_mm_context->subscribed_ue_ambr.br_dl); if (ula_pP->subscription_data.msisdn_length != 0) { ue_mm_context->msisdn = blk2bstr( ula_pP->subscription_data.msisdn, ula_pP->subscription_data.msisdn_length); } else { OAILOG_ERROR( LOG_MME_APP, "No MSISDN received for %s " IMSI_64_FMT "\n", __FUNCTION__, imsi64); } ue_mm_context->rau_tau_timer = ula_pP->subscription_data.rau_tau_timer; ue_mm_context->network_access_mode = ula_pP->subscription_data.access_mode; memcpy( &ue_mm_context->apn_config_profile, &ula_pP->subscription_data.apn_config_profile, sizeof(apn_config_profile_t)); MessageDef *message_p = NULL; itti_nas_pdn_config_rsp_t *nas_pdn_config_rsp = NULL; message_p = itti_alloc_new_message(TASK_MME_APP, NAS_PDN_CONFIG_RSP); if (message_p == NULL) { OAILOG_ERROR( LOG_MME_APP, "Message pointer is NULL while allocating new message for PDN Config Rsp, (ue_id = %u)\n", ue_mm_context->mme_ue_s1ap_id); unlock_ue_contexts(ue_mm_context); OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } /* * Set the value of Mobile Reachability timer based on value of T3412 (Periodic TAU timer) sent in Attach accept /TAU accept. * Set it to MME_APP_DELTA_T3412_REACHABILITY_TIMER minutes greater than T3412. * Set the value of Implicit timer. Set it to MME_APP_DELTA_REACHABILITY_IMPLICIT_DETACH_TIMER minutes greater than Mobile Reachability timer */ ue_mm_context->mobile_reachability_timer.id = MME_APP_TIMER_INACTIVE_ID; ue_mm_context->mobile_reachability_timer.sec = ((mme_config.nas_config.t3412_min) + MME_APP_DELTA_T3412_REACHABILITY_TIMER) * 60; ue_mm_context->implicit_detach_timer.id = MME_APP_TIMER_INACTIVE_ID; ue_mm_context->implicit_detach_timer.sec = (ue_mm_context->mobile_reachability_timer.sec) + MME_APP_DELTA_REACHABILITY_IMPLICIT_DETACH_TIMER * 60; /* * Set the flag: send_ue_purge_request to indicate that * Update Location procedure is completed. * During UE initiated detach/Implicit detach this MME would send PUR to hss, * if this flag is true. */ ue_mm_context->send_ue_purge_request = true; /* * Set the flag: location_info_confirmed_in_hss to false to indicate that * Update Location procedure is completed. * During HSS Reset * if this flag is true. */ if (ue_mm_context->location_info_confirmed_in_hss == true) { ue_mm_context->location_info_confirmed_in_hss = false; } nas_pdn_config_rsp = &message_p->ittiMsg.nas_pdn_config_rsp; nas_pdn_config_rsp->ue_id = ue_mm_context->mme_ue_s1ap_id; MSC_LOG_TX_MESSAGE( MSC_MMEAPP_MME, MSC_NAS_MME, NULL, 0, "0 NAS_PDN_CONFIG_RESP imsi %s", ula_pP->imsi); OAILOG_INFO(LOG_MME_APP, "Sending PDN CONFIG RSP to NAS for (ue_id = %u)\n", nas_pdn_config_rsp->ue_id); rc = itti_send_msg_to_task(TASK_NAS_MME, INSTANCE_DEFAULT, message_p); unlock_ue_contexts(ue_mm_context); OAILOG_FUNC_RETURN(LOG_MME_APP, rc); } int mme_app_handle_s6a_cancel_location_req( const s6a_cancel_location_req_t *const clr_pP) { int rc = RETURNok; uint64_t imsi = 0; struct ue_mm_context_s *ue_context_p = NULL; MessageDef *message_p = NULL; int cla_result = DIAMETER_SUCCESS; itti_nas_sgs_detach_req_t sgs_detach_req = {0}; OAILOG_FUNC_IN(LOG_MME_APP); DevAssert(clr_pP); IMSI_STRING_TO_IMSI64((char *) clr_pP->imsi, &imsi); OAILOG_DEBUG( LOG_MME_APP, "S6a Cancel Location Request for imsi " IMSI_64_FMT "\n", imsi); if ( (mme_app_send_s6a_cancel_location_ans( cla_result, clr_pP->imsi, clr_pP->imsi_length, clr_pP->msg_cla_p)) != RETURNok) { OAILOG_ERROR( LOG_MME_APP, "S6a Cancel Location Request: Failed to send Cancel Location Answer from " "MME app for imsi " IMSI_64_FMT "\n", imsi); } if ( (ue_context_p = mme_ue_context_exists_imsi( &mme_app_desc.mme_ue_contexts, imsi)) == NULL) { OAILOG_ERROR(LOG_MME_APP, "IMSI is not present in the MME context for imsi " IMSI_64_FMT "\n", imsi); OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } if (clr_pP->cancellation_type != SUBSCRIPTION_WITHDRAWL) { OAILOG_ERROR( LOG_MME_APP, "S6a Cancel Location Request: Cancellation_type not supported %d for imsi " IMSI_64_FMT "\n", clr_pP->cancellation_type, imsi); unlock_ue_contexts(ue_context_p); OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } /* * set the flag: hss_initiated_detach to indicate that, * hss has initiated detach and MME shall not send PUR to hss */ ue_context_p->hss_initiated_detach = true; /* * Check UE's S1 connection status.If UE is in connected state, send Detach Request to UE. * If UE is in idle state, do implicit detach. - TODO once we add support for Paging- Page UE to bring it back to * connected mode and then send Detach Request */ if (ue_context_p->ecm_state == ECM_IDLE) { // Initiate Implicit Detach for the UE // Stop Mobile reachability timer,if running if ( ue_context_p->mobile_reachability_timer.id != MME_APP_TIMER_INACTIVE_ID) { if (timer_remove(ue_context_p->mobile_reachability_timer.id, NULL)) { OAILOG_ERROR( LOG_MME_APP, "Failed to stop Mobile Reachability timer for UE id %d \n", ue_context_p->mme_ue_s1ap_id); } ue_context_p->mobile_reachability_timer.id = MME_APP_TIMER_INACTIVE_ID; } // Stop Implicit detach timer,if running if (ue_context_p->implicit_detach_timer.id != MME_APP_TIMER_INACTIVE_ID) { if (timer_remove(ue_context_p->implicit_detach_timer.id, NULL)) { OAILOG_ERROR( LOG_MME_APP, "Failed to stop Implicit Detach timer for UE id %d \n", ue_context_p->mme_ue_s1ap_id); } ue_context_p->implicit_detach_timer.id = MME_APP_TIMER_INACTIVE_ID; } // Send Implicit Detach Ind to NAS message_p = itti_alloc_new_message(TASK_MME_APP, NAS_IMPLICIT_DETACH_UE_IND); DevAssert(message_p); message_p->ittiMsg.nas_implicit_detach_ue_ind.ue_id = ue_context_p->mme_ue_s1ap_id; itti_send_msg_to_task(TASK_NAS_MME, INSTANCE_DEFAULT, message_p); } else { // Send N/W Initiated Detach Request to NAS OAILOG_INFO(LOG_MME_APP, "Sending Detach to NAS for (ue_id = %u)\n", ue_context_p->mme_ue_s1ap_id); rc = mme_app_send_nas_detach_request( ue_context_p->mme_ue_s1ap_id, HSS_INITIATED_EPS_DETACH); // Send SGS explicit network initiated Detach Ind to SGS if (ue_context_p->sgs_context) { sgs_detach_req.ue_id = ue_context_p->mme_ue_s1ap_id; sgs_detach_req.detach_type = SGS_DETACH_TYPE_NW_INITIATED_EPS; mme_app_handle_sgs_detach_req(&sgs_detach_req); } } unlock_ue_contexts(ue_context_p); OAILOG_FUNC_RETURN(LOG_MME_APP, rc); } int mme_app_send_s6a_cancel_location_ans( int cla_result, const char *imsi, uint8_t imsi_length, void *msg_cla_p) { MessageDef *message_p = NULL; s6a_cancel_location_ans_t *s6a_cla_p = NULL; int rc = RETURNok; OAILOG_FUNC_IN(LOG_MME_APP); message_p = itti_alloc_new_message(TASK_MME_APP, S6A_CANCEL_LOCATION_ANS); if (message_p == NULL) { OAILOG_FUNC_RETURN(LOG_MME_APP, RETURNerror); } s6a_cla_p = &message_p->ittiMsg.s6a_cancel_location_ans; memset((void *) s6a_cla_p, 0, sizeof(s6a_cancel_location_ans_t)); /* Using the IMSI details deom CLR */ memcpy(s6a_cla_p->imsi, imsi, imsi_length); s6a_cla_p->imsi_length = imsi_length; s6a_cla_p->result = cla_result; s6a_cla_p->msg_cla_p = msg_cla_p; MSC_LOG_TX_MESSAGE( MSC_MMEAPP_MME, MSC_S6A_MME, NULL, 0, "0 S6A_CANCEL_LOCATION_ANS "); rc = itti_send_msg_to_task(TASK_S6A, INSTANCE_DEFAULT, message_p); OAILOG_FUNC_RETURN(LOG_MME_APP, rc); }
196791.c
// // iirfilt_cccf_example.c // // Complex infinite impulse response filter example. Demonstrates the // functionality of iirfilt with complex coefficients by designing a // filter with specified parameters and then filters noise. // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <complex.h> #include <getopt.h> #include "liquid.h" #define OUTPUT_FILENAME "iirfilt_cccf_example.m" // print usage/help message void usage() { printf("iirfilt_cccf_example -- infinite impulse response filter example\n"); printf("options (default values in []):\n"); printf(" h : print help\n"); printf(" t : filter type: [butter], cheby1, cheby2, ellip, bessel\n"); printf(" b : filter transformation: [LP], HP, BP, BS\n"); printf(" n : filter order, n > 0 [5]\n"); printf(" r : passband ripple in dB (cheby1, ellip), r > 0 [1.0]\n"); printf(" s : stopband attenuation in dB (cheby2, ellip), s > 0 [40.0]\n"); printf(" f : passband cut-off, 0 < f < 0.5 [0.2]\n"); printf(" c : center frequency (BP, BS cases), 0 < c < 0.5 [0.25]\n"); printf(" o : format [sos], tf\n"); printf(" sos : second-order sections form\n"); printf(" tf : regular transfer function form (potentially\n"); printf(" unstable for large orders\n"); } int main(int argc, char*argv[]) { // options unsigned int order=4; // filter order float fc=0.1f; // cutoff frequency float f0=0.0f; // center frequency float Ap=1.0f; // pass-band ripple float As=40.0f; // stop-band attenuation unsigned int n=128; // number of samples liquid_iirdes_filtertype ftype = LIQUID_IIRDES_ELLIP; liquid_iirdes_bandtype btype = LIQUID_IIRDES_LOWPASS; liquid_iirdes_format format = LIQUID_IIRDES_SOS; int dopt; while ((dopt = getopt(argc,argv,"ht:b:n:r:s:f:c:o:")) != EOF) { switch (dopt) { case 'h': usage(); return 0; case 't': if (strcmp(optarg,"butter")==0) ftype = LIQUID_IIRDES_BUTTER; else if (strcmp(optarg,"cheby1")==0) ftype = LIQUID_IIRDES_CHEBY1; else if (strcmp(optarg,"cheby2")==0) ftype = LIQUID_IIRDES_CHEBY2; else if (strcmp(optarg,"ellip") ==0) ftype = LIQUID_IIRDES_ELLIP; else if (strcmp(optarg,"bessel")==0) ftype = LIQUID_IIRDES_BESSEL; else { fprintf(stderr,"error: iirdes_example, unknown filter type '%s'\n", optarg); exit(1); } break; case 'b': if (strcmp(optarg,"LP")==0) btype = LIQUID_IIRDES_LOWPASS; else if (strcmp(optarg,"HP")==0) btype = LIQUID_IIRDES_HIGHPASS; else if (strcmp(optarg,"BP")==0) btype = LIQUID_IIRDES_BANDPASS; else if (strcmp(optarg,"BS")==0) btype = LIQUID_IIRDES_BANDSTOP; else { fprintf(stderr,"error: iirdes_example, unknown band type '%s'\n", optarg); exit(1); } break; case 'n': order = atoi(optarg); break; case 'r': Ap = atof(optarg); break; case 's': As = atof(optarg); break; case 'f': fc = atof(optarg); break; case 'c': f0 = atof(optarg); break; case 'o': if (strcmp(optarg,"sos")==0) format = LIQUID_IIRDES_SOS; else if (strcmp(optarg,"tf") ==0) format = LIQUID_IIRDES_TF; else { fprintf(stderr,"error: iirdes_example, unknown output format '%s'\n", optarg); exit(1); } break; default: exit(1); } } // design filter from prototype iirfilt_cccf q = iirfilt_cccf_create_prototype(ftype, btype, format, order, fc, f0, Ap, As); iirfilt_cccf_print(q); unsigned int i; // allocate memory for data arrays float complex x[n]; float complex y[n]; // generate input signal (noisy sine wave with decaying amplitude) unsigned int wlen = (3*n)/4; for (i=0; i<n; i++) { // input signal (windowed noise) x[i] = randnf() + _Complex_I*randnf(); x[i] *= i < wlen ? liquid_hamming(i,wlen) : 0.0f; // run filter iirfilt_cccf_execute(q, x[i], &y[i]); } // compute two-sided frequency response unsigned int nfft=512; float complex H[nfft]; for (i=0; i<nfft; i++) { float freq = (float)i / (float)nfft - 0.5f; iirfilt_cccf_freqresponse(q, freq, &H[i]); } // destroy filter object iirfilt_cccf_destroy(q); // // plot results to output file // FILE * fid = fopen(OUTPUT_FILENAME,"w"); fprintf(fid,"%% %s : auto-generated file\n", OUTPUT_FILENAME); fprintf(fid,"clear all;\n"); fprintf(fid,"close all;\n"); fprintf(fid,"\n"); fprintf(fid,"order=%u;\n", order); // save input, output arrays fprintf(fid,"n=%u;\n",n); fprintf(fid,"x=zeros(1,n);\n"); fprintf(fid,"y=zeros(1,n);\n"); for (i=0; i<n; i++) { fprintf(fid,"x(%4u) = %12.4e + j*%12.4e;\n", i+1, crealf(x[i]), cimagf(x[i])); fprintf(fid,"y(%4u) = %12.4e + j*%12.4e;\n", i+1, crealf(y[i]), cimagf(y[i])); } // save frequency response fprintf(fid,"nfft=%u;\n",nfft); fprintf(fid,"H=zeros(1,nfft);\n"); for (i=0; i<nfft; i++) fprintf(fid,"H(%4u) = %12.8f + j*%12.8f;\n", i+1, crealf(H[i]), cimagf(H[i])); // plot time-domain output fprintf(fid,"t=0:(n-1);\n"); fprintf(fid,"figure;\n"); fprintf(fid,"subplot(2,1,1);\n"); fprintf(fid," plot(t,real(x),'-','Color',[1 1 1]*0.5,'LineWidth',1,...\n"); fprintf(fid," t,real(y),'-','Color',[0 0.5 0.25],'LineWidth',2);\n"); fprintf(fid," xlabel('time');\n"); fprintf(fid," ylabel('real');\n"); fprintf(fid," legend('input','filtered output',1);\n"); fprintf(fid," grid on;\n"); fprintf(fid,"subplot(2,1,2);\n"); fprintf(fid," plot(t,imag(x),'-','Color',[1 1 1]*0.5,'LineWidth',1,...\n"); fprintf(fid," t,imag(y),'-','Color',[0 0.25 0.5],'LineWidth',2);\n"); fprintf(fid," xlabel('time');\n"); fprintf(fid," ylabel('imag');\n"); fprintf(fid," legend('input','filtered output',1);\n"); fprintf(fid," grid on;\n"); // plot spectral output fprintf(fid,"X = 20*log10(abs(fftshift(fft(x))));\n"); fprintf(fid,"Y = 20*log10(abs(fftshift(fft(y))));\n"); fprintf(fid,"figure;\n"); fprintf(fid,"plot([0:(n-1)]/n-0.5, X, 'Color', [1 1 1]*0.5,\n"); fprintf(fid," [0:(n-1)]/n-0.5, Y, 'Color', [0 0.25 0.50]);\n"); fprintf(fid,"xlabel('Normalized Frequency');\n"); fprintf(fid,"ylabel('PSD [dB]');\n"); fprintf(fid,"legend('input','filtered output',1);\n"); fprintf(fid,"grid on;\n"); // plot ideal frequency response fprintf(fid,"f=[0:(nfft-1)]/nfft - 0.5;\n"); fprintf(fid,"figure;\n"); fprintf(fid,"subplot(3,1,1);\n"); fprintf(fid," plot(f,20*log10(abs(H)));\n"); fprintf(fid," axis([-0.5 0.5 -3 0]);\n"); fprintf(fid," grid on;\n"); fprintf(fid," legend('Pass band (dB)',0);\n"); fprintf(fid,"subplot(3,1,2);\n"); fprintf(fid," plot(f,20*log10(abs(H)));\n"); fprintf(fid," axis([-0.5 0.5 -100 0]);\n"); fprintf(fid," grid on;\n"); fprintf(fid," legend('Stop band (dB)',0);\n"); fprintf(fid,"subplot(3,1,3);\n"); fprintf(fid," plot(f,180/pi*arg(H));\n"); fprintf(fid," axis([-0.5 0.5 -180 180]);\n"); fprintf(fid," grid on;\n"); fprintf(fid," legend('Phase (degrees)',0);\n"); fclose(fid); printf("results written to %s.\n", OUTPUT_FILENAME); printf("done.\n"); return 0; }
438406.c
/** * Copyright (c) 2018 hgoel * * This software is released under the MIT License. * https://opensource.org/licenses/MIT */ #include <types.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <cardinal/local_spinlock.h> #include "SysVirtualMemory/vmem.h" #include "SysPhysicalMemory/phys_mem.h" #include "SysInterrupts/interrupts.h" #include "pci/pci.h" #include "ahci.h" static int device_init_lock = 0; static ahci_instance_t *instance = NULL; static uint32_t device_count = 0; static void tmp_handler(int int_num) { //Handle appropriately ahci_instance_t *iter = instance; while (iter != NULL) { if (iter->interrupt_vec == int_num) { //Check HBA_IS per port uint32_t g_is = ahci_read32(iter, HBA_IS); { char tmpbuf[10]; DEBUG_PRINT("[AHCI] G_IS: "); DEBUG_PRINT(itoa(g_is, tmpbuf, 16)); DEBUG_PRINT("\r\n"); } for (int p_idx = 0; p_idx < 32; p_idx++) if (g_is & (1 << p_idx)) { //Check Px_IS to determine specific interrupt uint32_t px_is = ahci_read32(iter, HBA_PxIS(p_idx)); if (px_is & HBA_PxIS_DHRS) { DEBUG_PRINT("[AHCI] D2H Register FIS\r\n"); //On receive ahci_write32(iter, HBA_PxIS(p_idx), HBA_PxIS_DHRS); //Clear interrupt uint32_t ci = ahci_read32(iter, HBA_PxCI(p_idx)); // ci's bits are always a subset of activeCmdBits // thus, xor is bits where activeCmdBits is set, but ci is clear local_spinlock_lock(&iter->lock); uint32_t finishedCmds = iter->activeCmdBits[p_idx] ^ ci; iter->finishedCmdBits[p_idx] |= finishedCmds; iter->activeCmdBits[p_idx] = ci; local_spinlock_unlock(&iter->lock); //TODO: queue a notification to CoreStorage about finished tasks } ahci_write32(iter, HBA_IS, 1 << p_idx); //clear interrupt status } } iter = iter->next; } } int module_init(void *ecam_addr) { int cli_state = cli(); local_spinlock_lock(&device_init_lock); pci_config_t *device = (pci_config_t *)vmem_phystovirt((intptr_t)ecam_addr, KiB(4), vmem_flags_uncached | vmem_flags_kernel | vmem_flags_rw); //enable pci bus master device->command.busmaster = 1; //interrupt setup int int_cnt = 0; int msi_val = pci_getmsiinfo(device, &int_cnt); if (msi_val < 0) DEBUG_PRINT("[AHCI] NO MSI\r\n"); int int_val = 0; interrupt_allocate(1, interrupt_flags_none, &int_val); { DEBUG_PRINT("[AHCI] Allocated Interrupt Vector: "); char tmpbuf[10]; DEBUG_PRINT(itoa(int_val, tmpbuf, 10)); DEBUG_PRINT("\r\n"); } uintptr_t msi_addr = (uintptr_t)msi_register_addr(0); uint32_t msi_msg = msi_register_data(int_val); pci_setmsiinfo(device, msi_val, &msi_addr, &msi_msg, 1); //figure out which bar to use uint64_t bar = 0; for (int i = 0; i < 6; i++) { if ((device->bar[i] & 0x7) == 0x4) //Is 64-bit memory mapped bar = (device->bar[i] & 0xFFFFFFF0) + ((uint64_t)device->bar[i + 1] << 32); else if ((device->bar[i] & 0x7) == 0x0) //Is 32-bit memory mapped bar = (device->bar[i] & 0xFFFFFFF0); if (bar) break; } //automatically handle ahci_instance_t *prev_inst = instance; instance = (ahci_instance_t *)malloc(sizeof(ahci_instance_t)); instance->lock = 0; local_spinlock_lock(&instance->lock); instance->cfg = (uintptr_t)vmem_phystovirt(bar, KiB(4), vmem_flags_uncached | vmem_flags_kernel | vmem_flags_rw); instance->interrupt_vec = int_val; instance->next = prev_inst; device_count++; local_spinlock_unlock(&instance->lock); ahci_obtainownership(instance); ahci_reportawareness(instance); ahci_resethba(instance); ahci_reportawareness(instance); //Get implemented ports uint32_t ports = ahci_readports(instance); uint32_t port_cnt = 32; //popcnt32(ports); instance->implPortCnt = port_cnt; //allocate dma memory //32 * (CMD_BUF_SIZE + FIS_SIZE + sizeof(ahci_cmdtable_t)) size_t dma_sz = port_cnt * (CMD_BUF_SIZE + FIS_SIZE + sizeof(ahci_cmdtable_t)); instance->port_dma.phys_addr = (uint64_t)pagealloc_alloc(0, 0, physmem_alloc_flags_zero | physmem_alloc_flags_data, dma_sz); instance->port_dma.virt_addr = (uint64_t)vmem_phystovirt(instance->port_dma.phys_addr, dma_sz, vmem_flags_uncached | vmem_flags_kernel | vmem_flags_rw); //initialize ports //register each active port to IO interface for (int i = 0; i < 32; i++) if (ports & (1 << i)) if (ahci_initializeport(instance, i) == 0) { //Configure interrupts for the port ahci_write32(instance, HBA_PxIS(i), 0); ahci_write32(instance, HBA_IS, 0); //Descriptor Processed Interrupt //DMA Setup FIS Interrupt - //Device to Host Register FIS Interrupt - Status/Error notification ahci_write32(instance, HBA_PxIE(i), HBA_PxIS_DHRS); //Register to IO interface } //Exit init state local_spinlock_unlock(&device_init_lock); sti(cli_state); //Enable interrupts interrupt_registerhandler(int_val, tmp_handler); ahci_write32(instance, HBA_GHC, ahci_read32(instance, HBA_GHC) | (1 << 1)); { //uint64_t paddr = (uint64_t)pagealloc_alloc(0, 0, physmem_alloc_flags_zero | physmem_alloc_flags_data, KiB(32)); //uint64_t vaddr = (uint64_t)vmem_phystovirt(paddr, KiB(32), vmem_flags_uncached | vmem_flags_kernel | vmem_flags_rw); //vmem_map(NULL, (intptr_t)vaddr, (intptr_t)paddr, KiB(32), vmem_flags_uncached | vmem_flags_kernel | vmem_flags_rw, 0); //ahci_readdev(instance, 0, 0, (void *)vaddr, KiB(32)); } return 0; }
678654.c
/* * Fullscreen Hack * * Simulate monitor resolution change * * Copyright 2020 Andrew Eikum for CodeWeavers * Copyright 2020 Zhiyi Zhang for CodeWeavers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include "config.h" #include <math.h> #include <stdlib.h> #define NONAMELESSSTRUCT #define NONAMELESSUNION #include "x11drv.h" #include "wine/debug.h" #include "wine/list.h" #include "wine/heap.h" #include "wine/unicode.h" WINE_DEFAULT_DEBUG_CHANNEL(fshack); static struct x11drv_display_device_handler real_device_handler; static struct x11drv_settings_handler real_settings_handler; static struct list fs_monitors = LIST_INIT(fs_monitors); /* Access to fs_monitors is protected by fs_section */ static CRITICAL_SECTION fs_section; static CRITICAL_SECTION_DEBUG critsect_debug = { 0, 0, &fs_section, {&critsect_debug.ProcessLocksList, &critsect_debug.ProcessLocksList}, 0, 0, {(DWORD_PTR)(__FILE__ ": fs_section")} }; static CRITICAL_SECTION fs_section = {&critsect_debug, -1, 0, 0, 0, 0}; struct fs_monitor_size { DWORD width; DWORD height; }; /* A table of resolutions some games expect but host system may not report */ static struct fs_monitor_size fs_monitor_sizes[] = { {640, 480}, /* 4:3 */ {800, 600}, /* 4:3 */ {1024, 768}, /* 4:3 */ {1600, 1200}, /* 4:3 */ {960, 540}, /* 16:9 */ {1280, 720}, /* 16:9 */ {1600, 900}, /* 16:9 */ {1920, 1080}, /* 16:9 */ {2560, 1440}, /* 16:9 */ {2880, 1620}, /* 16:9 */ {3200, 1800}, /* 16:9 */ {1440, 900}, /* 8:5 */ {1680, 1050}, /* 8:5 */ {1920, 1200}, /* 8:5 */ {2560, 1600}, /* 8:5 */ {1440, 960}, /* 3:2 */ {1920, 1280}, /* 3:2 */ {2560, 1080}, /* 21:9 ultra-wide */ {1920, 800}, /* 12:5 */ {3840, 1600}, /* 12:5 */ {1280, 1024}, /* 5:4 */ }; /* A fake monitor for the fullscreen hack */ struct fs_monitor { struct list entry; DEVMODEW user_mode; /* Mode changed to by users */ DEVMODEW real_mode; /* Mode actually used by the host system */ double user_to_real_scale; /* Scale factor from fake monitor to real monitor */ POINT top_left; /* Top left corner of the fake monitor rectangle in real virtual screen coordinates */ DEVMODEW *modes; /* Supported display modes */ UINT mode_count; /* Display mode count */ UINT unique_resolutions; /* Number of unique resolutions in terms of WxH */ }; static void add_fs_mode(struct fs_monitor *fs_monitor, DWORD depth, DWORD width, DWORD height, DWORD frequency, DWORD orientation) { int i; DEVMODEW *mode; const char *appid; BOOL is_new_resolution; /* Titan Souls renders incorrectly if we report modes smaller than 800x600 */ if ((appid = getenv("SteamAppId")) && !strcmp(appid, "297130")) { if (orientation == DMDO_DEFAULT || orientation == DMDO_180) { if (height <= 600 && !(height == 600 && width == 800)) return; } else { if (width <= 600 && !(width == 600 && height == 800)) return; } } is_new_resolution = TRUE; for (i = 0; i < fs_monitor->mode_count; ++i) { if (fs_monitor->modes[i].dmPelsWidth == width && fs_monitor->modes[i].dmPelsHeight == height) { is_new_resolution = FALSE; if (fs_monitor->modes[i].dmBitsPerPel == depth && fs_monitor->modes[i].dmDisplayFrequency == frequency && fs_monitor->modes[i].u1.s2.dmDisplayOrientation == orientation) return; /* The exact mode is already added, nothing to do */ } } if (is_new_resolution) { /* Some games crash if we report too many unique resolutions (in terms of HxW) */ if (limit_number_of_resolutions && fs_monitor->unique_resolutions >= limit_number_of_resolutions) return; fs_monitor->unique_resolutions++; } mode = &fs_monitor->modes[fs_monitor->mode_count++]; mode->dmSize = sizeof(*mode); mode->dmDriverExtra = 0; mode->dmFields = DM_DISPLAYORIENTATION | DM_BITSPERPEL | DM_PELSWIDTH | DM_PELSHEIGHT | DM_DISPLAYFLAGS | DM_DISPLAYFREQUENCY; mode->u1.s2.dmDisplayOrientation = orientation; mode->dmBitsPerPel = depth; mode->dmPelsWidth = width; mode->dmPelsHeight = height; mode->u2.dmDisplayFlags = 0; mode->dmDisplayFrequency = frequency; } static BOOL fs_monitor_add_modes(struct fs_monitor *fs_monitor) { DEVMODEW *real_modes, *real_mode, current_mode; UINT real_mode_count; DWORD width, height; ULONG_PTR real_id; ULONG offset; UINT i, j; if (!real_settings_handler.get_id(fs_monitor->user_mode.dmDeviceName, &real_id)) return FALSE; if (!real_settings_handler.get_current_mode(real_id, &current_mode)) return FALSE; /* Fullscreen hack doesn't support changing display orientations */ if (!real_settings_handler.get_modes(real_id, 0, &real_modes, &real_mode_count)) return FALSE; fs_monitor->mode_count = 0; fs_monitor->unique_resolutions = 0; fs_monitor->modes = heap_calloc(ARRAY_SIZE(fs_monitor_sizes) * DEPTH_COUNT + real_mode_count, sizeof(*fs_monitor->modes)); if (!fs_monitor->modes) { real_settings_handler.free_modes(real_modes); return FALSE; } /* Add the current mode early, in case we have to limit */ add_fs_mode(fs_monitor, current_mode.dmBitsPerPel, current_mode.dmPelsWidth, current_mode.dmPelsHeight, current_mode.dmDisplayFrequency, current_mode.u1.s2.dmDisplayOrientation); /* Linux reports far fewer resolutions than Windows. Add modes that some games may expect. */ for (i = 0; i < ARRAY_SIZE(fs_monitor_sizes); ++i) { if (current_mode.u1.s2.dmDisplayOrientation == DMDO_DEFAULT || current_mode.u1.s2.dmDisplayOrientation == DMDO_180) { width = fs_monitor_sizes[i].width; height = fs_monitor_sizes[i].height; } else { width = fs_monitor_sizes[i].height; height = fs_monitor_sizes[i].width; } /* Don't report modes that are larger than the current mode */ if (width > current_mode.dmPelsWidth || height > current_mode.dmPelsHeight) continue; for (j = 0; j < DEPTH_COUNT; ++j) add_fs_mode(fs_monitor, depths[j], width, height, 60, current_mode.u1.s2.dmDisplayOrientation); } for (i = 0; i < real_mode_count; ++i) { offset = (sizeof(*real_modes) + real_modes[0].dmDriverExtra) * i; real_mode = (DEVMODEW *)((BYTE *)real_modes + offset); /* Don't report real modes that are larger than the current mode */ if (real_mode->dmPelsWidth > current_mode.dmPelsWidth || real_mode->dmPelsHeight > current_mode.dmPelsHeight) continue; add_fs_mode(fs_monitor, real_mode->dmBitsPerPel, real_mode->dmPelsWidth, real_mode->dmPelsHeight, real_mode->dmDisplayFrequency, real_mode->u1.s2.dmDisplayOrientation); } real_settings_handler.free_modes(real_modes); /* Sort display modes so that X11DRV_EnumDisplaySettingsEx gets an already sorted mode list */ qsort(fs_monitor->modes, fs_monitor->mode_count, sizeof(*fs_monitor->modes), mode_compare); return TRUE; } /* Add a fake monitor to fs_monitors list. * Call this function with fs_section entered */ static BOOL fs_add_monitor(const WCHAR *device_name) { struct fs_monitor *fs_monitor; DEVMODEW real_mode; ULONG_PTR real_id; if (!real_settings_handler.get_id(device_name, &real_id)) return FALSE; if (!real_settings_handler.get_current_mode(real_id, &real_mode)) return FALSE; if (!(fs_monitor = heap_alloc(sizeof(*fs_monitor)))) return FALSE; fs_monitor->user_mode = real_mode; fs_monitor->real_mode = real_mode; fs_monitor->user_to_real_scale = 1.0; fs_monitor->top_left.x = real_mode.u1.s2.dmPosition.x; fs_monitor->top_left.y = real_mode.u1.s2.dmPosition.y; lstrcpyW(fs_monitor->user_mode.dmDeviceName, device_name); if (!fs_monitor_add_modes(fs_monitor)) { ERR("Failed to initialize display modes.\n"); heap_free(fs_monitor); return FALSE; } list_add_tail(&fs_monitors, &fs_monitor->entry); return TRUE; } /* Fullscreen settings handler */ /* Convert fullscreen hack settings handler id to GDI device name */ static void fs_id_to_device_name(ULONG_PTR id, WCHAR *device_name) { static WCHAR display_fmtW[] = {'\\','\\','.','\\','D','I','S','P','L','A','Y','%','d',0}; sprintfW(device_name, display_fmtW, (INT)id); } static BOOL fs_get_id(const WCHAR *device_name, ULONG_PTR *id) { static const WCHAR displayW[] = {'\\','\\','.','\\','D','I','S','P','L','A','Y'}; long int display_index; WCHAR *end; if (strncmpiW( device_name, displayW, ARRAY_SIZE(displayW) )) return FALSE; display_index = strtolW( device_name + ARRAY_SIZE(displayW), &end, 10 ); if (*end) return FALSE; *id = (ULONG_PTR)display_index; return TRUE; } /* Find a fs_monitor from a display name. * Call this function with fs_section entered */ static struct fs_monitor *fs_get_monitor_by_name(const WCHAR *name) { struct fs_monitor *fs_monitor; TRACE("name %s\n", wine_dbgstr_w(name)); LIST_FOR_EACH_ENTRY(fs_monitor, &fs_monitors, struct fs_monitor, entry) { if (!lstrcmpiW(fs_monitor->user_mode.dmDeviceName, name)) return fs_monitor; } return NULL; } static BOOL fs_get_modes(ULONG_PTR id, DWORD flags, DEVMODEW **new_modes, UINT *mode_count) { WCHAR device_name[CCHDEVICENAME]; struct fs_monitor *fs_monitor; fs_id_to_device_name(id, device_name); EnterCriticalSection(&fs_section); if ((fs_monitor = fs_get_monitor_by_name(device_name))) { *new_modes = fs_monitor->modes; *mode_count = fs_monitor->mode_count; LeaveCriticalSection(&fs_section); return TRUE; } LeaveCriticalSection(&fs_section); return FALSE; } static void fs_free_modes(DEVMODEW *modes){} /* Find a fs_monitor from a HMONITOR handle. * Call this function with fs_section entered */ static struct fs_monitor *fs_find_monitor_by_handle(HMONITOR monitor) { MONITORINFOEXW monitor_info; TRACE("monitor %p\n", monitor); monitor_info.cbSize = sizeof(monitor_info); if (!GetMonitorInfoW(monitor, (MONITORINFO *)&monitor_info)) return NULL; return fs_get_monitor_by_name(monitor_info.szDevice); } static BOOL fs_get_current_mode(ULONG_PTR id, DEVMODEW *mode) { WCHAR device_name[CCHDEVICENAME]; struct fs_monitor *fs_monitor; fs_id_to_device_name(id, device_name); EnterCriticalSection(&fs_section); fs_monitor = fs_get_monitor_by_name(device_name); if (fs_monitor) { *mode = fs_monitor->user_mode; LeaveCriticalSection(&fs_section); return TRUE; } LeaveCriticalSection(&fs_section); return FALSE; } static LONG fs_set_current_mode(ULONG_PTR id, DEVMODEW *user_mode) { WCHAR device_name[CCHDEVICENAME]; struct fs_monitor *fs_monitor; DEVMODEW real_mode; ULONG_PTR real_id; double scale; fs_id_to_device_name(id, device_name); EnterCriticalSection(&fs_section); fs_monitor = fs_get_monitor_by_name(device_name); if (!fs_monitor) { LeaveCriticalSection(&fs_section); return DISP_CHANGE_FAILED; } if (is_detached_mode(&fs_monitor->real_mode) && !is_detached_mode(user_mode)) { FIXME("Attaching adapters is unsupported with fullscreen hack.\n"); return DISP_CHANGE_SUCCESSFUL; } /* Real modes may be changed since initialization */ if (!real_settings_handler.get_id(device_name, &real_id) || !real_settings_handler.get_current_mode(real_id, &real_mode)) { LeaveCriticalSection(&fs_section); return DISP_CHANGE_FAILED; } fs_monitor->user_mode = *user_mode; fs_monitor->real_mode = real_mode; lstrcpyW(fs_monitor->user_mode.dmDeviceName, device_name); if (is_detached_mode(user_mode)) { fs_monitor->user_to_real_scale = 0; fs_monitor->top_left.x = 0; fs_monitor->top_left.y = 0; } /* Integer scaling */ else if (fs_hack_is_integer()) { scale = min(real_mode.dmPelsWidth / user_mode->dmPelsWidth, real_mode.dmPelsHeight / user_mode->dmPelsHeight); fs_monitor->user_to_real_scale = scale; fs_monitor->top_left.x = real_mode.u1.s2.dmPosition.x + (real_mode.dmPelsWidth - user_mode->dmPelsWidth * scale) / 2; fs_monitor->top_left.y = real_mode.u1.s2.dmPosition.y + (real_mode.dmPelsHeight - user_mode->dmPelsHeight * scale) / 2; } /* If real mode is narrower than fake mode, scale to fit width */ else if ((double)real_mode.dmPelsWidth / (double)real_mode.dmPelsHeight < (double)user_mode->dmPelsWidth / (double)user_mode->dmPelsHeight) { scale = (double)real_mode.dmPelsWidth / (double)user_mode->dmPelsWidth; fs_monitor->user_to_real_scale = scale; fs_monitor->top_left.x = real_mode.u1.s2.dmPosition.x; fs_monitor->top_left.y = real_mode.u1.s2.dmPosition.y + (real_mode.dmPelsHeight - user_mode->dmPelsHeight * scale) / 2; } /* Else scale to fit height */ else { scale = (double)real_mode.dmPelsHeight / (double)user_mode->dmPelsHeight; fs_monitor->user_to_real_scale = scale; fs_monitor->top_left.x = real_mode.u1.s2.dmPosition.x + (real_mode.dmPelsWidth - user_mode->dmPelsWidth * scale) / 2; fs_monitor->top_left.y = real_mode.u1.s2.dmPosition.y; } TRACE("real_mode x %d y %d width %d height %d\n", real_mode.u1.s2.dmPosition.x, real_mode.u1.s2.dmPosition.y, real_mode.dmPelsWidth, real_mode.dmPelsHeight); TRACE("user_mode x %d y %d width %d height %d\n", user_mode->u1.s2.dmPosition.x, user_mode->u1.s2.dmPosition.y, user_mode->dmPelsWidth, user_mode->dmPelsHeight); TRACE("user_to_real_scale %lf\n", fs_monitor->user_to_real_scale); TRACE("top left corner:%s\n", wine_dbgstr_point(&fs_monitor->top_left)); LeaveCriticalSection(&fs_section); return DISP_CHANGE_SUCCESSFUL; } /* Display device handler functions */ static BOOL fs_get_monitors(ULONG_PTR adapter_id, struct x11drv_monitor **new_monitors, int *count) { struct x11drv_monitor *monitor; struct fs_monitor *fs_monitor; RECT rect; INT i; if (!real_device_handler.get_monitors(adapter_id, new_monitors, count)) return FALSE; EnterCriticalSection(&fs_section); for (i = 0; i < *count; ++i) { monitor = &(*new_monitors)[i]; LIST_FOR_EACH_ENTRY(fs_monitor, &fs_monitors, struct fs_monitor, entry) { rect.left = fs_monitor->real_mode.u1.s2.dmPosition.x; rect.top = fs_monitor->real_mode.u1.s2.dmPosition.y; rect.right = rect.left + fs_monitor->real_mode.dmPelsWidth; rect.bottom = rect.top + fs_monitor->real_mode.dmPelsHeight; if (EqualRect(&rect, &monitor->rc_monitor)) { monitor->rc_monitor.left = fs_monitor->user_mode.u1.s2.dmPosition.x; monitor->rc_monitor.top = fs_monitor->user_mode.u1.s2.dmPosition.y; monitor->rc_monitor.right = monitor->rc_monitor.left + fs_monitor->user_mode.dmPelsWidth; monitor->rc_monitor.bottom = monitor->rc_monitor.top + fs_monitor->user_mode.dmPelsHeight; monitor->rc_work = monitor->rc_monitor; monitor->state_flags = DISPLAY_DEVICE_ATTACHED; if (fs_monitor->user_mode.dmPelsWidth && fs_monitor->user_mode.dmPelsHeight) monitor->state_flags |= DISPLAY_DEVICE_ACTIVE; } } } LeaveCriticalSection(&fs_section); return TRUE; } /* Fullscreen hack helpers */ /* Return whether fullscreen hack is enabled on a specific monitor */ BOOL fs_hack_enabled(HMONITOR monitor) { struct fs_monitor *fs_monitor; BOOL enabled = FALSE; TRACE("monitor %p\n", monitor); EnterCriticalSection(&fs_section); fs_monitor = fs_find_monitor_by_handle(monitor); if (fs_monitor && (fs_monitor->user_mode.dmPelsWidth != fs_monitor->real_mode.dmPelsWidth || fs_monitor->user_mode.dmPelsHeight != fs_monitor->real_mode.dmPelsHeight)) enabled = TRUE; LeaveCriticalSection(&fs_section); TRACE("enabled: %s\n", enabled ? "TRUE" : "FALSE"); return enabled; } BOOL fs_hack_mapping_required(HMONITOR monitor) { BOOL required; TRACE("monitor %p\n", monitor); /* steamcompmgr does our mapping for us */ required = !wm_is_steamcompmgr(NULL) && fs_hack_enabled(monitor); TRACE("required: %s\n", required ? "TRUE" : "FALSE"); return required; } /* Return whether integer scaling is on */ BOOL fs_hack_is_integer(void) { static int is_int = -1; if (is_int < 0) { const char *e = getenv("WINE_FULLSCREEN_INTEGER_SCALING"); is_int = e && strcmp(e, "0"); } TRACE("is_interger_scaling: %s\n", is_int ? "TRUE" : "FALSE"); return is_int; } /* Get the monitor a window is on. MonitorFromWindow() doesn't work here because it finds the * monitor with the maximum overlapped rectangle when a window is spanned over two monitors, whereas * for the fullscreen hack, the monitor where the left top corner of the window is on is the correct * one. For example, a game with a window of 3840x2160 changes the primary monitor to 1280x720, if * there is a secondary monitor of 3840x2160 to the right, MonitorFromWindow() will return the * secondary monitor instead of the primary one. */ HMONITOR fs_hack_monitor_from_hwnd(HWND hwnd) { RECT rect = {0}; if (!GetWindowRect(hwnd, &rect)) ERR("Invalid hwnd %p.\n", hwnd); TRACE("hwnd %p rect %s\n", hwnd, wine_dbgstr_rect(&rect)); rect.right = rect.left + 1; rect.bottom = rect.top + 1; return MonitorFromRect(&rect, MONITOR_DEFAULTTOPRIMARY); } /* Return the rectangle of a monitor in current mode in user virtual screen coordinates */ RECT fs_hack_current_mode(HMONITOR monitor) { struct fs_monitor *fs_monitor; RECT rect = {0}; TRACE("monitor %p\n", monitor); EnterCriticalSection(&fs_section); fs_monitor = fs_find_monitor_by_handle(monitor); if (!fs_monitor) { LeaveCriticalSection(&fs_section); return rect; } rect.left = fs_monitor->user_mode.u1.s2.dmPosition.x; rect.top = fs_monitor->user_mode.u1.s2.dmPosition.y; rect.right = rect.left + fs_monitor->user_mode.dmPelsWidth; rect.bottom = rect.top + fs_monitor->user_mode.dmPelsHeight; LeaveCriticalSection(&fs_section); TRACE("current mode rect: %s\n", wine_dbgstr_rect(&rect)); return rect; } /* Return the rectangle of a monitor in real mode in real virtual screen coordinates */ RECT fs_hack_real_mode(HMONITOR monitor) { struct fs_monitor *fs_monitor; RECT rect = {0}; TRACE("monitor %p\n", monitor); EnterCriticalSection(&fs_section); fs_monitor = fs_find_monitor_by_handle(monitor); if (!fs_monitor) { LeaveCriticalSection(&fs_section); return rect; } rect.left = fs_monitor->real_mode.u1.s2.dmPosition.x; rect.top = fs_monitor->real_mode.u1.s2.dmPosition.y; rect.right = rect.left + fs_monitor->real_mode.dmPelsWidth; rect.bottom = rect.top + fs_monitor->real_mode.dmPelsHeight; LeaveCriticalSection(&fs_section); TRACE("real mode rect: %s\n", wine_dbgstr_rect(&rect)); return rect; } /* Return whether width and height are the same as the current mode used by a monitor */ BOOL fs_hack_matches_current_mode(HMONITOR monitor, INT width, INT height) { MONITORINFO monitor_info; BOOL matched; TRACE("monitor %p\n", monitor); monitor_info.cbSize = sizeof(monitor_info); if (!GetMonitorInfoW(monitor, &monitor_info)) return FALSE; matched = (width == monitor_info.rcMonitor.right - monitor_info.rcMonitor.left) && (height == monitor_info.rcMonitor.bottom - monitor_info.rcMonitor.top); TRACE("matched: %s\n", matched ? "TRUE" : "FALSE"); return matched; } /* Transform a point in user virtual screen coordinates to real virtual screen coordinates */ void fs_hack_point_user_to_real(POINT *pos) { struct fs_monitor *fs_monitor; RECT rect; TRACE("from %d,%d\n", pos->x, pos->y); if (wm_is_steamcompmgr(NULL)) return; EnterCriticalSection(&fs_section); LIST_FOR_EACH_ENTRY(fs_monitor, &fs_monitors, struct fs_monitor, entry) { rect.left = fs_monitor->user_mode.u1.s2.dmPosition.x; rect.top = fs_monitor->user_mode.u1.s2.dmPosition.y; rect.right = rect.left + fs_monitor->user_mode.dmPelsWidth; rect.bottom = rect.top + fs_monitor->user_mode.dmPelsHeight; if (PtInRect(&rect, *pos)) { pos->x -= fs_monitor->user_mode.u1.s2.dmPosition.x; pos->y -= fs_monitor->user_mode.u1.s2.dmPosition.y; pos->x = lround(pos->x * fs_monitor->user_to_real_scale); pos->y = lround(pos->y * fs_monitor->user_to_real_scale); pos->x += fs_monitor->top_left.x; pos->y += fs_monitor->top_left.y; LeaveCriticalSection(&fs_section); TRACE("to %d,%d\n", pos->x, pos->y); return; } } LeaveCriticalSection(&fs_section); WARN("%d,%d not transformed.\n", pos->x, pos->y); } /* Transform a point in real virtual screen coordinates to user virtual screen coordinates */ void fs_hack_point_real_to_user(POINT *pos) { struct fs_monitor *fs_monitor; RECT rect; TRACE("from %d,%d\n", pos->x, pos->y); if (wm_is_steamcompmgr(NULL)) return; EnterCriticalSection(&fs_section); LIST_FOR_EACH_ENTRY(fs_monitor, &fs_monitors, struct fs_monitor, entry) { rect.left = fs_monitor->real_mode.u1.s2.dmPosition.x; rect.top = fs_monitor->real_mode.u1.s2.dmPosition.y; rect.right = rect.left + fs_monitor->real_mode.dmPelsWidth; rect.bottom = rect.top + fs_monitor->real_mode.dmPelsHeight; if (PtInRect(&rect, *pos)) { pos->x -= fs_monitor->top_left.x; pos->y -= fs_monitor->top_left.y; pos->x = lround(pos->x / fs_monitor->user_to_real_scale); pos->y = lround(pos->y / fs_monitor->user_to_real_scale); pos->x += fs_monitor->user_mode.u1.s2.dmPosition.x; pos->y += fs_monitor->user_mode.u1.s2.dmPosition.y; pos->x = max(pos->x, fs_monitor->user_mode.u1.s2.dmPosition.x); pos->y = max(pos->y, fs_monitor->user_mode.u1.s2.dmPosition.y); pos->x = min(pos->x, fs_monitor->user_mode.u1.s2.dmPosition.x + (INT)fs_monitor->user_mode.dmPelsWidth - 1); pos->y = min(pos->y, fs_monitor->user_mode.u1.s2.dmPosition.y + (INT)fs_monitor->user_mode.dmPelsHeight - 1); LeaveCriticalSection(&fs_section); TRACE("to %d,%d\n", pos->x, pos->y); return; } } LeaveCriticalSection(&fs_section); WARN("%d,%d not transformed.\n", pos->x, pos->y); } /* Transform RGNDATA in user virtual screen coordinates to real virtual screen coordinates. * This is for clipping. Be sure to use Unsorted for Xlib calls after this transformation because * this may break the requirement of using YXBanded. For example, say there are two monitors aligned * horizontally with the primary monitor on the right. Each of monitor is of real resolution * 1920x1080 and the fake primary monitor resolution is 1024x768. Then (0, 10, 1024, 768) should be * transformed to (0, 14, 1920, 1080). While (1024, 10, 2944, 1080) should be transformed to * (1920, 10, 3840, 1080) and this is breaking YXBanded because it requires y in non-decreasing order */ void fs_hack_rgndata_user_to_real(RGNDATA *data) { unsigned int i; XRectangle *xrect; RECT rect; if (!data || wm_is_steamcompmgr(NULL)) return; xrect = (XRectangle *)data->Buffer; for (i = 0; i < data->rdh.nCount; i++) { rect.left = xrect[i].x; rect.top = xrect[i].y; rect.right = xrect[i].x + xrect[i].width; rect.bottom = xrect[i].y + xrect[i].height; TRACE("from rect %s\n", wine_dbgstr_rect(&rect)); fs_hack_rect_user_to_real(&rect); TRACE("to rect %s\n", wine_dbgstr_rect(&rect)); xrect[i].x = rect.left; xrect[i].y = rect.top; xrect[i].width = rect.right - rect.left; xrect[i].height = rect.bottom - rect.top; } } /* Transform a rectangle in user virtual screen coordinates to real virtual screen coordinates. A * difference compared to fs_hack_point_user_to_real() is that fs_hack_point_user_to_real() finds * the wrong monitor if the point is on the right edge of the monitor rectangle. For example, when * there are two monitors of real size 1920x1080, the primary monitor is of user mode 1024x768 and * the secondary monitor is to the right. Rectangle (0, 0, 1024, 768) should transform to * (0, 0, 1920, 1080). If (1024, 768) is passed to fs_hack_point_user_to_real(), * fs_hack_point_user_to_real() will think (1024, 768) is on the secondary monitor, ends up * returning a wrong result to callers. */ void fs_hack_rect_user_to_real(RECT *rect) { struct fs_monitor *fs_monitor; HMONITOR monitor; POINT point; TRACE("from %s\n", wine_dbgstr_rect(rect)); if (wm_is_steamcompmgr(NULL)) return; point.x = rect->left; point.y = rect->top; monitor = MonitorFromPoint(point, MONITOR_DEFAULTTONEAREST); EnterCriticalSection(&fs_section); fs_monitor = fs_find_monitor_by_handle(monitor); if (!fs_monitor) { LeaveCriticalSection(&fs_section); WARN("%s not transformed.\n", wine_dbgstr_rect(rect)); return; } OffsetRect(rect, -fs_monitor->user_mode.u1.s2.dmPosition.x, -fs_monitor->user_mode.u1.s2.dmPosition.y); rect->left = lround(rect->left * fs_monitor->user_to_real_scale); rect->right = lround(rect->right * fs_monitor->user_to_real_scale); rect->top = lround(rect->top * fs_monitor->user_to_real_scale); rect->bottom = lround(rect->bottom * fs_monitor->user_to_real_scale); OffsetRect(rect, fs_monitor->top_left.x, fs_monitor->top_left.y); LeaveCriticalSection(&fs_section); TRACE("to %s\n", wine_dbgstr_rect(rect)); } /* Get the user_to_real_scale value in a monitor */ double fs_hack_get_user_to_real_scale(HMONITOR monitor) { struct fs_monitor *fs_monitor; double scale = 1.0; TRACE("monitor %p\n", monitor); if (wm_is_steamcompmgr(NULL)) return scale; EnterCriticalSection(&fs_section); fs_monitor = fs_find_monitor_by_handle(monitor); if (!fs_monitor) { LeaveCriticalSection(&fs_section); return scale; } scale = fs_monitor->user_to_real_scale; LeaveCriticalSection(&fs_section); TRACE("scale %lf\n", scale); return scale; } /* Get the scaled scree size of a monitor */ SIZE fs_hack_get_scaled_screen_size(HMONITOR monitor) { struct fs_monitor *fs_monitor; SIZE size = {0}; TRACE("monitor %p\n", monitor); EnterCriticalSection(&fs_section); fs_monitor = fs_find_monitor_by_handle(monitor); if (!fs_monitor) { LeaveCriticalSection(&fs_section); return size; } if (wm_is_steamcompmgr(NULL)) { LeaveCriticalSection(&fs_section); size.cx = fs_monitor->user_mode.dmPelsWidth; size.cy = fs_monitor->user_mode.dmPelsHeight; TRACE("width %d height %d\n", size.cx, size.cy); return size; } size.cx = lround(fs_monitor->user_mode.dmPelsWidth * fs_monitor->user_to_real_scale); size.cy = lround(fs_monitor->user_mode.dmPelsHeight * fs_monitor->user_to_real_scale); LeaveCriticalSection(&fs_section); TRACE("width %d height %d\n", size.cx, size.cy); return size; } /* Get the real virtual screen size instead of virtual screen size using fake modes */ RECT fs_hack_get_real_virtual_screen(void) { struct fs_monitor *fs_monitor; RECT rect, virtual = {0}; EnterCriticalSection(&fs_section); LIST_FOR_EACH_ENTRY(fs_monitor, &fs_monitors, struct fs_monitor, entry) { rect.left = fs_monitor->real_mode.u1.s2.dmPosition.x; rect.top = fs_monitor->real_mode.u1.s2.dmPosition.y; rect.right = rect.left + fs_monitor->real_mode.dmPelsWidth; rect.bottom = rect.top + fs_monitor->real_mode.dmPelsHeight; UnionRect(&virtual, &virtual, &rect); } LeaveCriticalSection(&fs_section); TRACE("real virtual screen rect:%s\n", wine_dbgstr_rect(&virtual)); return virtual; } /* Initialize the fullscreen hack, which is a layer on top of real settings handlers and real * display device handlers */ void fs_hack_init(void) { static WCHAR display_fmt[] = {'\\','\\','.','\\','D','I','S','P','L','A','Y','%','d',0}; struct x11drv_display_device_handler device_handler; struct x11drv_settings_handler settings_handler; WCHAR device_name[CCHDEVICENAME]; INT i = 0; real_device_handler = X11DRV_DisplayDevices_GetHandler(); real_settings_handler = X11DRV_Settings_GetHandler(); EnterCriticalSection(&fs_section); while (1) { sprintfW(device_name, display_fmt, ++i); if (!fs_add_monitor(device_name)) break; } LeaveCriticalSection(&fs_section); settings_handler.name = "Fullscreen Hack"; settings_handler.priority = 500; settings_handler.get_id = fs_get_id; settings_handler.get_modes = fs_get_modes; settings_handler.free_modes = fs_free_modes; settings_handler.get_current_mode = fs_get_current_mode; settings_handler.set_current_mode = fs_set_current_mode; settings_handler.convert_coordinates = NULL; X11DRV_Settings_SetHandler(&settings_handler); device_handler.name = "Fullscreen Hack"; device_handler.priority = 500; device_handler.get_gpus = real_device_handler.get_gpus; device_handler.get_adapters = real_device_handler.get_adapters; device_handler.get_monitors = fs_get_monitors; device_handler.free_gpus = real_device_handler.free_gpus; device_handler.free_adapters = real_device_handler.free_adapters; device_handler.free_monitors = real_device_handler.free_monitors; device_handler.register_event_handlers = NULL; X11DRV_DisplayDevices_SetHandler(&device_handler); }
150619.c
// SPDX-License-Identifier: GPL-2.0-only /* * wm8523.c -- WM8523 ALSA SoC Audio driver * * Copyright 2009 Wolfson Microelectronics plc * * Author: Mark Brown <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8523.h" #define WM8523_NUM_SUPPLIES 2 static const char *wm8523_supply_names[WM8523_NUM_SUPPLIES] = { "AVDD", "LINEVDD", }; #define WM8523_NUM_RATES 7 /* codec private data */ struct wm8523_priv { struct regmap *regmap; struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES]; unsigned int sysclk; unsigned int rate_constraint_list[WM8523_NUM_RATES]; struct snd_pcm_hw_constraint_list rate_constraint; }; static const struct reg_default wm8523_reg_defaults[] = { { 2, 0x0000 }, /* R2 - PSCTRL1 */ { 3, 0x1812 }, /* R3 - AIF_CTRL1 */ { 4, 0x0000 }, /* R4 - AIF_CTRL2 */ { 5, 0x0001 }, /* R5 - DAC_CTRL3 */ { 6, 0x0190 }, /* R6 - DAC_GAINL */ { 7, 0x0190 }, /* R7 - DAC_GAINR */ { 8, 0x0000 }, /* R8 - ZERO_DETECT */ }; static bool wm8523_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case WM8523_DEVICE_ID: case WM8523_REVISION: return true; default: return false; } } static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 25, 0); static const char *wm8523_zd_count_text[] = { "1024", "2048", }; static SOC_ENUM_SINGLE_DECL(wm8523_zc_count, WM8523_ZERO_DETECT, 0, wm8523_zd_count_text); static const struct snd_kcontrol_new wm8523_controls[] = { SOC_DOUBLE_R_TLV("Playback Volume", WM8523_DAC_GAINL, WM8523_DAC_GAINR, 0, 448, 0, dac_tlv), SOC_SINGLE("ZC Switch", WM8523_DAC_CTRL3, 4, 1, 0), SOC_SINGLE("Playback Deemphasis Switch", WM8523_AIF_CTRL1, 8, 1, 0), SOC_DOUBLE("Playback Switch", WM8523_DAC_CTRL3, 2, 3, 1, 1), SOC_SINGLE("Volume Ramp Up Switch", WM8523_DAC_CTRL3, 1, 1, 0), SOC_SINGLE("Volume Ramp Down Switch", WM8523_DAC_CTRL3, 0, 1, 0), SOC_ENUM("Zero Detect Count", wm8523_zc_count), }; static const struct snd_soc_dapm_widget wm8523_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUTPUT("LINEVOUTL"), SND_SOC_DAPM_OUTPUT("LINEVOUTR"), }; static const struct snd_soc_dapm_route wm8523_dapm_routes[] = { { "LINEVOUTL", NULL, "DAC" }, { "LINEVOUTR", NULL, "DAC" }, }; static const struct { int value; int ratio; } lrclk_ratios[WM8523_NUM_RATES] = { { 1, 128 }, { 2, 192 }, { 3, 256 }, { 4, 384 }, { 5, 512 }, { 6, 768 }, { 7, 1152 }, }; static const struct { int value; int ratio; } bclk_ratios[] = { { 2, 32 }, { 3, 64 }, { 4, 128 }, }; static int wm8523_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct wm8523_priv *wm8523 = snd_soc_component_get_drvdata(component); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8523->sysclk) { dev_err(component->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &wm8523->rate_constraint); return 0; } static int wm8523_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct wm8523_priv *wm8523 = snd_soc_component_get_drvdata(component); int i; u16 aifctrl1 = snd_soc_component_read(component, WM8523_AIF_CTRL1); u16 aifctrl2 = snd_soc_component_read(component, WM8523_AIF_CTRL2); /* Find a supported LRCLK ratio */ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) { if (wm8523->sysclk / params_rate(params) == lrclk_ratios[i].ratio) break; } /* Should never happen, should be handled by constraints */ if (i == ARRAY_SIZE(lrclk_ratios)) { dev_err(component->dev, "MCLK/fs ratio %d unsupported\n", wm8523->sysclk / params_rate(params)); return -EINVAL; } aifctrl2 &= ~WM8523_SR_MASK; aifctrl2 |= lrclk_ratios[i].value; if (aifctrl1 & WM8523_AIF_MSTR) { /* Find a fs->bclk ratio */ for (i = 0; i < ARRAY_SIZE(bclk_ratios); i++) if (params_width(params) * 2 <= bclk_ratios[i].ratio) break; if (i == ARRAY_SIZE(bclk_ratios)) { dev_err(component->dev, "No matching BCLK/fs ratio for word length %d\n", params_width(params)); return -EINVAL; } aifctrl2 &= ~WM8523_BCLKDIV_MASK; aifctrl2 |= bclk_ratios[i].value << WM8523_BCLKDIV_SHIFT; } aifctrl1 &= ~WM8523_WL_MASK; switch (params_width(params)) { case 16: break; case 20: aifctrl1 |= 0x8; break; case 24: aifctrl1 |= 0x10; break; case 32: aifctrl1 |= 0x18; break; } snd_soc_component_write(component, WM8523_AIF_CTRL1, aifctrl1); snd_soc_component_write(component, WM8523_AIF_CTRL2, aifctrl2); return 0; } static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_component *component = codec_dai->component; struct wm8523_priv *wm8523 = snd_soc_component_get_drvdata(component); unsigned int val; int i; wm8523->sysclk = freq; wm8523->rate_constraint.count = 0; for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) { val = freq / lrclk_ratios[i].ratio; /* Check that it's a standard rate since core can't * cope with others and having the odd rates confuses * constraint matching. */ switch (val) { case 8000: case 11025: case 16000: case 22050: case 32000: case 44100: case 48000: case 64000: case 88200: case 96000: case 176400: case 192000: dev_dbg(component->dev, "Supported sample rate: %dHz\n", val); wm8523->rate_constraint_list[i] = val; wm8523->rate_constraint.count++; break; default: dev_dbg(component->dev, "Skipping sample rate: %dHz\n", val); } } /* Need at least one supported rate... */ if (wm8523->rate_constraint.count == 0) return -EINVAL; return 0; } static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_component *component = codec_dai->component; u16 aifctrl1 = snd_soc_component_read(component, WM8523_AIF_CTRL1); aifctrl1 &= ~(WM8523_BCLK_INV_MASK | WM8523_LRCLK_INV_MASK | WM8523_FMT_MASK | WM8523_AIF_MSTR_MASK); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: aifctrl1 |= WM8523_AIF_MSTR; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: aifctrl1 |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: aifctrl1 |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: aifctrl1 |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: aifctrl1 |= 0x0023; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: aifctrl1 |= WM8523_BCLK_INV | WM8523_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aifctrl1 |= WM8523_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: aifctrl1 |= WM8523_LRCLK_INV; break; default: return -EINVAL; } snd_soc_component_write(component, WM8523_AIF_CTRL1, aifctrl1); return 0; } static int wm8523_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { struct wm8523_priv *wm8523 = snd_soc_component_get_drvdata(component); int ret; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* Full power on */ snd_soc_component_update_bits(component, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 3); break; case SND_SOC_BIAS_STANDBY: if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(component->dev, "Failed to enable supplies: %d\n", ret); return ret; } /* Sync back default/cached values */ regcache_sync(wm8523->regmap); /* Initial power up */ snd_soc_component_update_bits(component, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 1); msleep(100); } /* Power up to mute */ snd_soc_component_update_bits(component, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 2); break; case SND_SOC_BIAS_OFF: /* The chip runs through the power down sequence for us. */ snd_soc_component_update_bits(component, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 0); msleep(100); regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); break; } return 0; } #define WM8523_RATES SNDRV_PCM_RATE_8000_192000 #define WM8523_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm8523_dai_ops = { .startup = wm8523_startup, .hw_params = wm8523_hw_params, .set_sysclk = wm8523_set_dai_sysclk, .set_fmt = wm8523_set_dai_fmt, }; static struct snd_soc_dai_driver wm8523_dai = { .name = "wm8523-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, /* Mono modes not yet supported */ .channels_max = 2, .rates = WM8523_RATES, .formats = WM8523_FORMATS, }, .ops = &wm8523_dai_ops, }; static int wm8523_probe(struct snd_soc_component *component) { struct wm8523_priv *wm8523 = snd_soc_component_get_drvdata(component); wm8523->rate_constraint.list = &wm8523->rate_constraint_list[0]; wm8523->rate_constraint.count = ARRAY_SIZE(wm8523->rate_constraint_list); /* Change some default settings - latch VU and enable ZC */ snd_soc_component_update_bits(component, WM8523_DAC_GAINR, WM8523_DACR_VU, WM8523_DACR_VU); snd_soc_component_update_bits(component, WM8523_DAC_CTRL3, WM8523_ZC, WM8523_ZC); return 0; } static const struct snd_soc_component_driver soc_component_dev_wm8523 = { .probe = wm8523_probe, .set_bias_level = wm8523_set_bias_level, .controls = wm8523_controls, .num_controls = ARRAY_SIZE(wm8523_controls), .dapm_widgets = wm8523_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8523_dapm_widgets), .dapm_routes = wm8523_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8523_dapm_routes), .suspend_bias_off = 1, .idle_bias_on = 1, .use_pmdown_time = 1, .endianness = 1, .non_legacy_dai_naming = 1, }; static const struct of_device_id wm8523_of_match[] = { { .compatible = "wlf,wm8523" }, { }, }; MODULE_DEVICE_TABLE(of, wm8523_of_match); static const struct regmap_config wm8523_regmap = { .reg_bits = 8, .val_bits = 16, .max_register = WM8523_ZERO_DETECT, .reg_defaults = wm8523_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8523_reg_defaults), .cache_type = REGCACHE_RBTREE, .volatile_reg = wm8523_volatile_register, }; static int wm8523_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8523_priv *wm8523; unsigned int val; int ret, i; wm8523 = devm_kzalloc(&i2c->dev, sizeof(struct wm8523_priv), GFP_KERNEL); if (wm8523 == NULL) return -ENOMEM; wm8523->regmap = devm_regmap_init_i2c(i2c, &wm8523_regmap); if (IS_ERR(wm8523->regmap)) { ret = PTR_ERR(wm8523->regmap); dev_err(&i2c->dev, "Failed to create regmap: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(wm8523->supplies); i++) wm8523->supplies[i].supply = wm8523_supply_names[i]; ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret); return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret); return ret; } ret = regmap_read(wm8523->regmap, WM8523_DEVICE_ID, &val); if (ret < 0) { dev_err(&i2c->dev, "Failed to read ID register\n"); goto err_enable; } if (val != 0x8523) { dev_err(&i2c->dev, "Device is not a WM8523, ID is %x\n", ret); ret = -EINVAL; goto err_enable; } ret = regmap_read(wm8523->regmap, WM8523_REVISION, &val); if (ret < 0) { dev_err(&i2c->dev, "Failed to read revision register\n"); goto err_enable; } dev_info(&i2c->dev, "revision %c\n", (val & WM8523_CHIP_REV_MASK) + 'A'); ret = regmap_write(wm8523->regmap, WM8523_DEVICE_ID, 0x8523); if (ret != 0) { dev_err(&i2c->dev, "Failed to reset device: %d\n", ret); goto err_enable; } regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); i2c_set_clientdata(i2c, wm8523); ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_wm8523, &wm8523_dai, 1); return ret; err_enable: regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); return ret; } static const struct i2c_device_id wm8523_i2c_id[] = { { "wm8523", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8523_i2c_id); static struct i2c_driver wm8523_i2c_driver = { .driver = { .name = "wm8523", .of_match_table = wm8523_of_match, }, .probe = wm8523_i2c_probe, .id_table = wm8523_i2c_id, }; module_i2c_driver(wm8523_i2c_driver); MODULE_DESCRIPTION("ASoC WM8523 driver"); MODULE_AUTHOR("Mark Brown <[email protected]>"); MODULE_LICENSE("GPL");
791367.c
/* Copyright 2018 SiFive, Inc */ /* SPDX-License-Identifier: Apache-2.0 */ #include <metal/machine.h> #include <metal/uart.h> extern inline void metal_uart_init(struct metal_uart *uart, int baud_rate); extern inline int metal_uart_putc(struct metal_uart *uart, unsigned char c); extern inline int metal_uart_getc(struct metal_uart *uart, unsigned char *c); extern inline int metal_uart_get_baud_rate(struct metal_uart *uart); extern inline int metal_uart_set_baud_rate(struct metal_uart *uart, int baud_rate); struct metal_uart *metal_uart_get_device(int device_num) { if(device_num >= __METAL_DT_MAX_UARTS) { return NULL; } return &__metal_uart_table[device_num]->uart; }
81603.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE78_OS_Command_Injection__char_fromFile_w32spawnl_53d.c Label Definition File: CWE78_OS_Command_Injection.no_path.label.xml Template File: sources-sink-53d.tmpl.c */ /* * @description * CWE: 78 OS Command Injection * BadSource: fromFile Read input from a file * GoodSource: Benign input * Sink: w32spawnl * BadSink : execute command with spawnl * Flow Variant: 53 Data flow: data passed as an argument from one function through two others to a fourth; all four functions are in different source files * * */ #include "std_testcase.h" #include <wchar.h> #ifdef _WIN32 # define COMMAND_INT_PATH "%WINDIR%\\system32\\cmd.exe" # define COMMAND_INT "cmd.exe" # define COMMAND_ARG1 "/c" # define COMMAND_ARG2 "dir" # define COMMAND_ARG3 data #else /* NOT _WIN32 */ # define COMMAND_INT_PATH "/bin/sh" # define COMMAND_INT "sh" # define COMMAND_ARG1 "ls" # define COMMAND_ARG2 data # define COMMAND_ARG3 NULL #endif #ifdef _WIN32 # define FOPEN fopen #else /* fopen is used on unix-based OSs */ # define FOPEN fopen #endif #include <process.h> /* all the sinks are the same, we just want to know where the hit originated if a tool flags one */ #ifndef OMITBAD void CWE78_OS_Command_Injection__char_fromFile_w32spawnl_53d_bad_sink(char * data) { /* spawnl - specify the path where the command is located */ /* POSSIBLE FLAW: Execute command without validating input possibly leading to command injection */ _spawnl(_P_WAIT, COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE78_OS_Command_Injection__char_fromFile_w32spawnl_53d_goodG2B_sink(char * data) { /* spawnl - specify the path where the command is located */ /* POSSIBLE FLAW: Execute command without validating input possibly leading to command injection */ _spawnl(_P_WAIT, COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL); } #endif /* OMITGOOD */
151591.c
/****************************************************************************** * zstringtest.c * Copyright (c) 2019, Fehmi Noyan ISI [email protected] * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Fehmi Noyan ISI ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Fehmi Noyan ISI BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unity.h> #include <zstring.h> static char *s1; static char *s2; void setUp(void){ char buf1[]=" Free software is a matter of liberty. "; char buf2[]="zString is cool!"; size_t len1 = strlen(buf1); size_t len2 = strlen(buf2); s1 = malloc(len1); s2 = malloc(len2); strncpy(s1, buf1, len1); strncpy(s2, buf2, len2); s1[len1] = '\0'; s2[len2] = '\0'; } void tearDown(void){ free(s1); free(s2); } void test_zstrtrm_l(void){ TEST_ASSERT_EQUAL_STRING("Free software is a matter of liberty. ", zstrtrm_l(s1)); } void test_zstrtrm_l_no_change(void){ TEST_ASSERT_EQUAL_STRING("zString is cool!", zstrtrm_l(s2)); } void test_zstrchr_0(void){ TEST_ASSERT_EQUAL_INT(-1, zstrchr(s1, 'x')); } void test_zstrchr_1(void){ TEST_ASSERT_EQUAL_INT(1, zstrchr(s2, 'S')); } void test_zrmvchr(void){ TEST_ASSERT_EQUAL_STRING("Stringiscl!", zrmvchr(s2,"o z")); } void test_zrmvchr_none(void){ TEST_ASSERT_EQUAL_STRING("zString is cool!", zrmvchr(s2,"xqw")); } void test_zrepchr(void){ TEST_ASSERT_EQUAL_STRING("zString_is_cool!", zrepchr(s2,' ','_')); } void test_zrepstr(void){ char str[]="this text has some text to be text "; TEST_ASSERT_EQUAL_STRING("this XXXXXXas some XXXXXXo be XXXXX", zrepstr(str,"text","XXXXXX")); } void test_zrepstr_none(void){ TEST_ASSERT_EQUAL_STRING("zString is cool!", zrepstr(s2,"text","XXXXXX")); } void test_zstrtrm_r(void){ TEST_ASSERT_EQUAL_STRING(" Free software is a matter of liberty.", zstrtrm_r(s1)); } void test_zstrtrm_r_none(void){ TEST_ASSERT_EQUAL_STRING("zString is cool!", zstrtrm_r(s2)); } void test_zstrtrm(void){ TEST_ASSERT_EQUAL_STRING("Free software is a matter of liberty.", zstrtrm(s1)); } void test_zstrtrm_none(void){ TEST_ASSERT_EQUAL_STRING("zString is cool!", zstrtrm(s2)); } void test_zstrtok(void){ char str[] = "A,B,,,C"; const char *exp[]={"A","B",",",",","C"}; TEST_ASSERT_EQUAL_STRING("A", zstrtok(str,",")); TEST_ASSERT_EQUAL_STRING("B", zstrtok(NULL,",")); TEST_ASSERT_EQUAL_STRING(",", zstrtok(NULL,",")); TEST_ASSERT_EQUAL_STRING(",", zstrtok(NULL,",")); TEST_ASSERT_EQUAL_STRING("C", zstrtok(NULL,",")); } void test_zsubstr(void){ TEST_ASSERT_EQUAL_STRING("is cool!", zsubstr(s2,8,8)); } /* main program */ int main(){ UNITY_BEGIN(); RUN_TEST(test_zstrtrm_l); RUN_TEST(test_zstrtrm_l_no_change); RUN_TEST(test_zstrchr_0); RUN_TEST(test_zstrchr_1); RUN_TEST(test_zrmvchr); RUN_TEST(test_zrmvchr_none); RUN_TEST(test_zrepchr); RUN_TEST(test_zrepstr); RUN_TEST(test_zrepstr_none); RUN_TEST(test_zstrtrm_r); RUN_TEST(test_zstrtrm_r_none); RUN_TEST(test_zstrtrm); RUN_TEST(test_zstrtrm_none); RUN_TEST(test_zstrtok); RUN_TEST(test_zsubstr); return UNITY_END(); }
477902.c
/* * Copyright (c) 2015, Freescale Semiconductor, Inc. * Copyright 2016-2019 NXP * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include "fsl_common.h" #include "fsl_smc.h" #include "fsl_rcm.h" #include "fsl_port.h" #include "power_mode_switch.h" #include "board.h" #include "fsl_debug_console.h" #include "peripherals.h" #include "pin_mux.h" #include "fsl_pmc.h" #include "fsl_uart.h" /******************************************************************************* * Definitions ******************************************************************************/ #define APP_DEBUG_UART_BAUDRATE 9600 /* Debug console baud rate. */ #define APP_DEBUG_UART_CLKSRC_NAME kCLOCK_CoreSysClk /* System clock. */ #define APP_LLWU DEMO_LLWU_PERIPHERAL #define APP_LLWU_IRQHANDLER DEMO_LLWU_IRQHANDLER #define APP_LPTMR DEMO_LPTMR_PERIPHERAL #define APP_LPTMR_IRQHANDLER DEMO_LPTMR_IRQHANDLER #define LLWU_LPTMR_IDX 0U /* LLWU_M0IF */ #define LLWU_WAKEUP_PIN_IDX 10U /* LLWU_P10 */ #define LLWU_WAKEUP_PIN_TYPE kLLWU_ExternalPinFallingEdge #define APP_WAKEUP_BUTTON_GPIO BOARD_SW2_GPIO #define APP_WAKEUP_BUTTON_PORT BOARD_SW2_PORT #define APP_WAKEUP_BUTTON_GPIO_PIN BOARD_SW2_GPIO_PIN #define APP_WAKEUP_BUTTON_IRQ BOARD_SW2_IRQ #define APP_WAKEUP_BUTTON_IRQ_HANDLER BOARD_SW2_IRQ_HANDLER #define APP_WAKEUP_BUTTON_NAME BOARD_SW2_NAME #define APP_WAKEUP_BUTTON_IRQ_TYPE kPORT_InterruptFallingEdge /* Debug console RX pin: PORTB16 MUX: 3 */ #define DEBUG_CONSOLE_RX_PORT PORTB #define DEBUG_CONSOLE_RX_GPIO GPIOB #define DEBUG_CONSOLE_RX_PIN 16 #define DEBUG_CONSOLE_RX_PINMUX kPORT_MuxAlt3 /* Debug console TX pin: PORTB17 MUX: 3 */ #define DEBUG_CONSOLE_TX_PORT PORTB #define DEBUG_CONSOLE_TX_GPIO GPIOB #define DEBUG_CONSOLE_TX_PIN 17 #define DEBUG_CONSOLE_TX_PINMUX kPORT_MuxAlt3 #define CORE_CLK_FREQ CLOCK_GetFreq(kCLOCK_CoreSysClk) /******************************************************************************* * Prototypes ******************************************************************************/ void APP_PowerPreSwitchHook(smc_power_state_t originPowerState, app_power_mode_t targetMode); void APP_PowerPostSwitchHook(smc_power_state_t originPowerState, app_power_mode_t targetMode); /* * Set the clock configuration for RUN mode from VLPR mode. */ extern void APP_SetClockRunFromVlpr(void); /* * Set the clock configuration for VLPR mode. */ extern void APP_SetClockVlpr(void); /* * Hook function called before power mode switch. */ extern void APP_PowerPreSwitchHook(smc_power_state_t originPowerState, app_power_mode_t targetMode); /* * Hook function called after power mode switch. */ extern void APP_PowerPostSwitchHook(smc_power_state_t originPowerState, app_power_mode_t targetMode); /******************************************************************************* * Variables ******************************************************************************/ static uint8_t s_wakeupTimeout; /* Wakeup timeout. (Unit: Second) */ static app_wakeup_source_t s_wakeupSource; /* Wakeup source. */ /******************************************************************************* * Code ******************************************************************************/ void APP_SetClockVlpr(void) { const sim_clock_config_t simConfig = { .pllFllSel = 3U, /* PLLFLLSEL select IRC48MCLK. */ .er32kSrc = 2U, /* ERCLK32K selection, use RTC. */ .clkdiv1 = 0x00040000U, /* SIM_CLKDIV1. */ }; CLOCK_SetSimSafeDivs(); CLOCK_SetInternalRefClkConfig(kMCG_IrclkEnable, kMCG_IrcFast, 0U); /* MCG works in PEE mode now, will switch to BLPI mode. */ CLOCK_ExternalModeToFbeModeQuick(); /* Enter FBE. */ CLOCK_SetFbiMode(kMCG_Dmx32Default, kMCG_DrsLow, NULL); /* Enter FBI. */ CLOCK_SetLowPowerEnable(true); /* Enter BLPI. */ CLOCK_SetSimConfig(&simConfig); } void APP_SetClockRunFromVlpr(void) { const sim_clock_config_t simConfig = { .pllFllSel = 1U, /* PLLFLLSEL select PLL. */ .er32kSrc = 2U, /* ERCLK32K selection, use RTC. */ .clkdiv1 = 0x01140000U, /* SIM_CLKDIV1. */ }; const mcg_pll_config_t pll0Config = { .enableMode = 0U, .prdiv = 0x13U, .vdiv = 0x18U, }; CLOCK_SetSimSafeDivs(); /* Currently in BLPI mode, will switch to PEE mode. */ /* Enter FBI. */ CLOCK_SetLowPowerEnable(false); /* Enter FBE. */ CLOCK_SetFbeMode(7U, kMCG_Dmx32Default, kMCG_DrsLow, NULL); /* Enter PBE. */ CLOCK_SetPbeMode(kMCG_PllClkSelPll0, &pll0Config); /* Enter PEE. */ CLOCK_SetPeeMode(); CLOCK_SetSimConfig(&simConfig); } static void APP_InitDebugConsole(void) { uint32_t uartClkSrcFreq; uartClkSrcFreq = CLOCK_GetFreq(APP_DEBUG_UART_CLKSRC_NAME); DbgConsole_Init(BOARD_DEBUG_UART_INSTANCE, APP_DEBUG_UART_BAUDRATE, BOARD_DEBUG_UART_TYPE, uartClkSrcFreq); } void APP_PowerPreSwitchHook(smc_power_state_t originPowerState, app_power_mode_t targetMode) { /* Wait for debug console output finished. */ while (!(kUART_TransmissionCompleteFlag & UART_GetStatusFlags((UART_Type *)BOARD_DEBUG_UART_BASEADDR))) { } DbgConsole_Deinit(); if ((kAPP_PowerModeRun != targetMode) && (kAPP_PowerModeVlpr != targetMode)) { /* * Set pin for current leakage. * Debug console RX pin: Set to pinmux to disable. * Debug console TX pin: Don't need to change. */ PORT_SetPinMux(DEBUG_CONSOLE_RX_PORT, DEBUG_CONSOLE_RX_PIN, kPORT_PinDisabledOrAnalog); PORT_SetPinMux(DEBUG_CONSOLE_TX_PORT, DEBUG_CONSOLE_TX_PIN, kPORT_PinDisabledOrAnalog); } /* Disable the PLL in WAIT mode to same more power. FEI mode. */ if (kAPP_PowerModeWait == targetMode) { const mcg_config_t mcgConfigStruct = { .mcgMode = kMCG_ModeFEI, /* FEI - FLL with internal RTC. */ .irclkEnableMode = kMCG_IrclkEnable, /* MCGIRCLK enabled, MCGIRCLK disabled in STOP mode */ .ircs = kMCG_IrcSlow, /* Slow internal reference clock selected */ .fcrdiv = 0x0U, /* Fast IRC divider: divided by 1 */ .frdiv = 0x0U, /* FLL reference clock divider: divided by 32 */ .drs = kMCG_DrsLow, /* Low frequency range */ .dmx32 = kMCG_Dmx32Default, /* DCO has a default range of 25% */ .oscsel = kMCG_OscselOsc, /* Selects System Oscillator (OSCCLK) */ .pll0Config = { .enableMode = 0, /* MCGPLLCLK disabled */ .prdiv = 0x13U, /* PLL Reference divider: divided by 20 */ .vdiv = 0x18U, /* VCO divider: multiplied by 48 */ }, }; CLOCK_SetMcgConfig(&mcgConfigStruct); } } void APP_PowerPostSwitchHook(smc_power_state_t originPowerState, app_power_mode_t targetMode) { /* Restore the UART pins. */ if ((kAPP_PowerModeRun != targetMode) && (kAPP_PowerModeVlpr != targetMode)) { /* * Debug console RX pin is set to disable for current leakage, nee to re-configure pinmux. * Debug console TX pin: Don't need to change. */ PORT_SetPinMux(DEBUG_CONSOLE_RX_PORT, DEBUG_CONSOLE_RX_PIN, DEBUG_CONSOLE_RX_PINMUX); PORT_SetPinMux(DEBUG_CONSOLE_TX_PORT, DEBUG_CONSOLE_TX_PIN, DEBUG_CONSOLE_TX_PINMUX); } /* * For some other platforms, if enter LLS mode from VLPR mode, when wakeup, the * power mode is VLPR. But for some platforms, if enter LLS mode from VLPR mode, * when wakeup, the power mode is RUN. In this case, the clock setting is still * VLPR mode setting, so change to RUN mode setting here. */ if ((kSMC_PowerStateVlpr == originPowerState) && (kSMC_PowerStateRun == SMC_GetPowerModeState(SMC))) { APP_SetClockRunFromVlpr(); } /* * If enter stop modes when MCG in PEE mode, then after wakeup, the MCG is in PBE mode, * need to enter PEE mode manually. */ if ((kAPP_PowerModeRun != targetMode) && (kAPP_PowerModeWait != targetMode) && (kAPP_PowerModeVlpw != targetMode) && (kAPP_PowerModeVlpr != targetMode)) { if (kSMC_PowerStateRun == originPowerState) { /* Wait for PLL lock. */ while (!(kMCG_Pll0LockFlag & CLOCK_GetStatusFlags())) { } CLOCK_SetPeeMode(); } } /* Return the PEE mode when come back to RUN mode from WAIT mode. */ if ((kAPP_PowerModeWait == targetMode) && (kSMC_PowerStateRun == originPowerState)) { const mcg_config_t mcgConfigStruct = { .mcgMode = kMCG_ModePEE, /* PEE - PLL Engaged External */ .irclkEnableMode = kMCG_IrclkEnable, /* MCGIRCLK enabled, MCGIRCLK disabled in STOP mode */ .ircs = kMCG_IrcSlow, /* Slow internal reference clock selected */ .fcrdiv = 0x0U, /* Fast IRC divider: divided by 1 */ .frdiv = 0x0U, /* FLL reference clock divider: divided by 32 */ .drs = kMCG_DrsLow, /* Low frequency range */ .dmx32 = kMCG_Dmx32Default, /* DCO has a default range of 25% */ .oscsel = kMCG_OscselOsc, /* Selects System Oscillator (OSCCLK) */ .pll0Config = { .enableMode = 0, /* MCGPLLCLK disabled */ .prdiv = 0x13U, /* PLL Reference divider: divided by 20 */ .vdiv = 0x18U, /* VCO divider: multiplied by 48 */ }, }; CLOCK_SetMcgConfig(&mcgConfigStruct); } APP_InitDebugConsole(); } /*! * @brief LLWU interrupt handler. */ void APP_LLWU_IRQHANDLER(void) { /* If wakeup by LPTMR. */ if (LLWU_GetInternalWakeupModuleFlag(APP_LLWU, LLWU_LPTMR_IDX)) { /* Disable lptmr as a wakeup source, so that lptmr's IRQ Handler will be executed when reset from VLLSx mode. */ LLWU_EnableInternalModuleInterruptWakup(APP_LLWU, LLWU_LPTMR_IDX, false); } /* If wakeup by external pin. */ if (LLWU_GetExternalWakeupPinFlag(APP_LLWU, LLWU_WAKEUP_PIN_IDX)) { /* Disable WAKEUP pin as a wakeup source, so that WAKEUP pin's IRQ Handler will be executed when reset from * VLLSx mode. */ LLWU_ClearExternalWakeupPinFlag(APP_LLWU, LLWU_WAKEUP_PIN_IDX); } /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ __DSB(); } void APP_LPTMR_IRQHANDLER(void) { if (kLPTMR_TimerInterruptEnable & LPTMR_GetEnabledInterrupts(APP_LPTMR)) { LPTMR_DisableInterrupts(APP_LPTMR, kLPTMR_TimerInterruptEnable); LPTMR_ClearStatusFlags(APP_LPTMR, kLPTMR_TimerCompareFlag); LPTMR_StopTimer(APP_LPTMR); } /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ __DSB(); } void APP_WAKEUP_BUTTON_IRQ_HANDLER(void) { if ((1U << APP_WAKEUP_BUTTON_GPIO_PIN) & PORT_GetPinsInterruptFlags(APP_WAKEUP_BUTTON_PORT)) { /* Disable interrupt. */ PORT_SetPinInterruptConfig(APP_WAKEUP_BUTTON_PORT, APP_WAKEUP_BUTTON_GPIO_PIN, kPORT_InterruptOrDMADisabled); PORT_ClearPinsInterruptFlags(APP_WAKEUP_BUTTON_PORT, (1U << APP_WAKEUP_BUTTON_GPIO_PIN)); } /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ __DSB(); } /*! * @brief Get input from user about wakeup timeout */ static uint8_t APP_GetWakeupTimeout(void) { uint8_t timeout; while (1) { PRINTF("Select the wake up timeout in seconds.\r\n"); PRINTF("The allowed range is 1s ~ 9s.\r\n"); PRINTF("Eg. enter 5 to wake up in 5 seconds.\r\n"); PRINTF("\r\nWaiting for input timeout value...\r\n\r\n"); timeout = GETCHAR(); PRINTF("%c\r\n", timeout); if ((timeout > '0') && (timeout <= '9')) { return timeout - '0'; } PRINTF("Wrong value!\r\n"); } } /* Get wakeup source by user input. */ static app_wakeup_source_t APP_GetWakeupSource(void) { uint8_t ch; while (1) { PRINTF("Select the wake up source:\r\n"); PRINTF("Press T for LPTMR - Low Power Timer\r\n"); PRINTF("Press S for switch/button %s. \r\n", APP_WAKEUP_BUTTON_NAME); PRINTF("\r\nWaiting for key press..\r\n\r\n"); ch = GETCHAR(); if ((ch >= 'a') && (ch <= 'z')) { ch -= 'a' - 'A'; } if (ch == 'T') { return kAPP_WakeupSourceLptmr; } else if (ch == 'S') { return kAPP_WakeupSourcePin; } else { PRINTF("Wrong value!\r\n"); } } } /* Get wakeup timeout and wakeup source. */ void APP_GetWakeupConfig(app_power_mode_t targetMode) { /* Get wakeup source by user input. */ if (targetMode == kAPP_PowerModeVlls0) { /* In VLLS0 mode, the LPO is disabled, LPTMR could not work. */ PRINTF("Not support LPTMR wakeup because LPO is disabled in VLLS0 mode.\r\n"); s_wakeupSource = kAPP_WakeupSourcePin; } else { /* Get wakeup source by user input. */ s_wakeupSource = APP_GetWakeupSource(); } if (kAPP_WakeupSourceLptmr == s_wakeupSource) { /* Wakeup source is LPTMR, user should input wakeup timeout value. */ s_wakeupTimeout = APP_GetWakeupTimeout(); PRINTF("Will wakeup in %d seconds.\r\n", s_wakeupTimeout); } else { PRINTF("Press %s to wake up.\r\n", APP_WAKEUP_BUTTON_NAME); } } void APP_SetWakeupConfig(app_power_mode_t targetMode) { /* Set LPTMR timeout value. */ if (kAPP_WakeupSourceLptmr == s_wakeupSource) { LPTMR_SetTimerPeriod(APP_LPTMR, (LPO_CLK_FREQ * s_wakeupTimeout) - 1U); LPTMR_StartTimer(APP_LPTMR); } /* Set the wakeup module. */ if (kAPP_WakeupSourceLptmr == s_wakeupSource) { LPTMR_EnableInterrupts(APP_LPTMR, kLPTMR_TimerInterruptEnable); } else { PORT_SetPinInterruptConfig(APP_WAKEUP_BUTTON_PORT, APP_WAKEUP_BUTTON_GPIO_PIN, APP_WAKEUP_BUTTON_IRQ_TYPE); } /* If targetMode is VLLS/LLS, setup LLWU. */ if ((kAPP_PowerModeWait != targetMode) && (kAPP_PowerModeVlpw != targetMode) && (kAPP_PowerModeVlps != targetMode) && (kAPP_PowerModeStop != targetMode)) { if (kAPP_WakeupSourceLptmr == s_wakeupSource) { LLWU_EnableInternalModuleInterruptWakup(APP_LLWU, LLWU_LPTMR_IDX, true); } else { LLWU_SetExternalWakeupPinMode(APP_LLWU, LLWU_WAKEUP_PIN_IDX, LLWU_WAKEUP_PIN_TYPE); } NVIC_EnableIRQ(LLWU_IRQn); } } void APP_ShowPowerMode(smc_power_state_t powerMode) { switch (powerMode) { case kSMC_PowerStateRun: PRINTF(" Power mode: RUN\r\n"); break; case kSMC_PowerStateVlpr: PRINTF(" Power mode: VLPR\r\n"); break; default: PRINTF(" Power mode wrong\r\n"); break; } } /* * Check whether could switch to target power mode from current mode. * Return true if could switch, return false if could not switch. */ bool APP_CheckPowerMode(smc_power_state_t curPowerState, app_power_mode_t targetPowerMode) { bool modeValid = true; /* * Check wether the mode change is allowed. * * 1. If current mode is HSRUN mode, the target mode must be RUN mode. * 2. If current mode is RUN mode, the target mode must not be VLPW mode. * 3. If current mode is VLPR mode, the target mode must not be HSRUN/WAIT/STOP mode. * 4. If already in the target mode. */ switch (curPowerState) { case kSMC_PowerStateRun: if (kAPP_PowerModeVlpw == targetPowerMode) { PRINTF("Could not enter VLPW mode from RUN mode.\r\n"); modeValid = false; } break; case kSMC_PowerStateVlpr: if ((kAPP_PowerModeWait == targetPowerMode) || (kAPP_PowerModeStop == targetPowerMode)) { PRINTF("Could not enter HSRUN/STOP/WAIT modes from VLPR mode.\r\n"); modeValid = false; } break; default: PRINTF("Wrong power state.\r\n"); modeValid = false; break; } if (!modeValid) { return false; } /* Don't need to change power mode if current mode is already the target mode. */ if (((kAPP_PowerModeRun == targetPowerMode) && (kSMC_PowerStateRun == curPowerState)) || ((kAPP_PowerModeVlpr == targetPowerMode) && (kSMC_PowerStateVlpr == curPowerState))) { PRINTF("Already in the target power mode.\r\n"); return false; } return true; } /* * Power mode switch. */ void APP_PowerModeSwitch(smc_power_state_t curPowerState, app_power_mode_t targetPowerMode) { smc_power_mode_vlls_config_t vlls_config; vlls_config.enablePorDetectInVlls0 = true; switch (targetPowerMode) { case kAPP_PowerModeVlpr: APP_SetClockVlpr(); SMC_SetPowerModeVlpr(SMC, false); while (kSMC_PowerStateVlpr != SMC_GetPowerModeState(SMC)) { } break; case kAPP_PowerModeRun: /* Power mode change. */ SMC_SetPowerModeRun(SMC); while (kSMC_PowerStateRun != SMC_GetPowerModeState(SMC)) { } /* If enter RUN from VLPR, change clock after the power mode change. */ if (kSMC_PowerStateVlpr == curPowerState) { APP_SetClockRunFromVlpr(); } break; case kAPP_PowerModeWait: SMC_PreEnterWaitModes(); SMC_SetPowerModeWait(SMC); SMC_PostExitWaitModes(); break; case kAPP_PowerModeStop: SMC_PreEnterStopModes(); SMC_SetPowerModeStop(SMC, kSMC_PartialStop); SMC_PostExitStopModes(); break; case kAPP_PowerModeVlpw: SMC_PreEnterWaitModes(); SMC_SetPowerModeVlpw(SMC); SMC_PostExitWaitModes(); break; case kAPP_PowerModeVlps: SMC_PreEnterStopModes(); SMC_SetPowerModeVlps(SMC); SMC_PostExitStopModes(); break; case kAPP_PowerModeLls: SMC_PreEnterStopModes(); SMC_SetPowerModeLls(SMC); SMC_PostExitStopModes(); break; case kAPP_PowerModeVlls0: vlls_config.subMode = kSMC_StopSub0; SMC_PreEnterStopModes(); SMC_SetPowerModeVlls(SMC, &vlls_config); SMC_PostExitStopModes(); break; case kAPP_PowerModeVlls1: vlls_config.subMode = kSMC_StopSub1; SMC_PreEnterStopModes(); SMC_SetPowerModeVlls(SMC, &vlls_config); SMC_PostExitStopModes(); break; case kAPP_PowerModeVlls2: vlls_config.subMode = kSMC_StopSub2; SMC_PreEnterStopModes(); SMC_SetPowerModeVlls(SMC, &vlls_config); SMC_PostExitStopModes(); break; case kAPP_PowerModeVlls3: vlls_config.subMode = kSMC_StopSub3; SMC_PreEnterStopModes(); SMC_SetPowerModeVlls(SMC, &vlls_config); SMC_PostExitStopModes(); break; default: PRINTF("Wrong value"); break; } } /*! * @brief main demo function. */ int main(void) { uint32_t freq = 0; uint8_t ch; smc_power_state_t curPowerState; app_power_mode_t targetPowerMode; bool needSetWakeup; /* Need to set wakeup. */ /* Must configure pins before PMC_ClearPeriphIOIsolationFlag */ BOARD_InitPins(); /* Power related. */ SMC_SetPowerModeProtection(SMC, kSMC_AllowPowerModeAll); if (kRCM_SourceWakeup & RCM_GetPreviousResetSources(RCM)) /* Wakeup from VLLS. */ { PMC_ClearPeriphIOIsolationFlag(PMC); NVIC_ClearPendingIRQ(LLWU_IRQn); } BOARD_InitBootClocks(); APP_InitDebugConsole(); BOARD_InitBootPeripherals(); NVIC_EnableIRQ(APP_WAKEUP_BUTTON_IRQ); if (kRCM_SourceWakeup & RCM_GetPreviousResetSources(RCM)) /* Wakeup from VLLS. */ { PRINTF("\r\nMCU wakeup from VLLS modes...\r\n"); } while (1) { curPowerState = SMC_GetPowerModeState(SMC); freq = CLOCK_GetFreq(kCLOCK_CoreSysClk); PRINTF("\r\n#################### Power Mode Switch Demo ####################\n\r\n"); PRINTF(" Core Clock = %dHz \r\n", freq); APP_ShowPowerMode(curPowerState); PRINTF("\r\nSelect the desired operation \n\r\n"); PRINTF("Press %c for enter: RUN - Normal RUN mode\r\n", kAPP_PowerModeRun); PRINTF("Press %c for enter: WAIT - Wait mode\r\n", kAPP_PowerModeWait); PRINTF("Press %c for enter: STOP - Stop mode\r\n", kAPP_PowerModeStop); PRINTF("Press %c for enter: VLPR - Very Low Power Run mode\r\n", kAPP_PowerModeVlpr); PRINTF("Press %c for enter: VLPW - Very Low Power Wait mode\r\n", kAPP_PowerModeVlpw); PRINTF("Press %c for enter: VLPS - Very Low Power Stop mode\r\n", kAPP_PowerModeVlps); PRINTF("Press %c for enter: LLS/LLS3 - Low Leakage Stop mode\r\n", kAPP_PowerModeLls); PRINTF("Press %c for enter: VLLS0 - Very Low Leakage Stop 0 mode\r\n", kAPP_PowerModeVlls0); PRINTF("Press %c for enter: VLLS1 - Very Low Leakage Stop 1 mode\r\n", kAPP_PowerModeVlls1); PRINTF("Press %c for enter: VLLS2 - Very Low Leakage Stop 2 mode\r\n", kAPP_PowerModeVlls2); PRINTF("Press %c for enter: VLLS3 - Very Low Leakage Stop 3 mode\r\n", kAPP_PowerModeVlls3); PRINTF("\r\nWaiting for power mode select..\r\n\r\n"); /* Wait for user response */ ch = GETCHAR(); if ((ch >= 'a') && (ch <= 'z')) { ch -= 'a' - 'A'; } targetPowerMode = (app_power_mode_t)ch; if ((targetPowerMode > kAPP_PowerModeMin) && (targetPowerMode < kAPP_PowerModeMax)) { /* If could not set the target power mode, loop continue. */ if (!APP_CheckPowerMode(curPowerState, targetPowerMode)) { continue; } /* If target mode is RUN/VLPR/HSRUN, don't need to set wakeup source. */ if ((kAPP_PowerModeRun == targetPowerMode) || (kAPP_PowerModeVlpr == targetPowerMode)) { needSetWakeup = false; } else { needSetWakeup = true; } if (needSetWakeup) { APP_GetWakeupConfig(targetPowerMode); } APP_PowerPreSwitchHook(curPowerState, targetPowerMode); if (needSetWakeup) { APP_SetWakeupConfig(targetPowerMode); } APP_PowerModeSwitch(curPowerState, targetPowerMode); APP_PowerPostSwitchHook(curPowerState, targetPowerMode); PRINTF("\r\nNext loop\r\n"); } } }
698312.c
/* * MPC512x PSC in SPI mode driver. * * Copyright (C) 2007,2008 Freescale Semiconductor Inc. * Original port from 52xx driver: * Hongjun Chen <[email protected]> * * Fork of mpc52xx_psc_spi.c: * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/completion.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/spi/spi.h> #include <linux/fsl_devices.h> #include <linux/gpio.h> #include <asm/mpc52xx_psc.h> struct mpc512x_psc_spi { void (*cs_control)(struct spi_device *spi, bool on); /* driver internal data */ struct mpc52xx_psc __iomem *psc; struct mpc512x_psc_fifo __iomem *fifo; unsigned int irq; u8 bits_per_word; struct clk *clk_mclk; struct clk *clk_ipg; u32 mclk_rate; struct completion txisrdone; }; /* controller state */ struct mpc512x_psc_spi_cs { int bits_per_word; int speed_hz; }; /* set clock freq, clock ramp, bits per work * if t is NULL then reset the values to the default values */ static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi, struct spi_transfer *t) { struct mpc512x_psc_spi_cs *cs = spi->controller_state; cs->speed_hz = (t && t->speed_hz) ? t->speed_hz : spi->max_speed_hz; cs->bits_per_word = (t && t->bits_per_word) ? t->bits_per_word : spi->bits_per_word; cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; return 0; } static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) { struct mpc512x_psc_spi_cs *cs = spi->controller_state; struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); struct mpc52xx_psc __iomem *psc = mps->psc; u32 sicr; u32 ccr; int speed; u16 bclkdiv; sicr = in_be32(&psc->sicr); /* Set clock phase and polarity */ if (spi->mode & SPI_CPHA) sicr |= 0x00001000; else sicr &= ~0x00001000; if (spi->mode & SPI_CPOL) sicr |= 0x00002000; else sicr &= ~0x00002000; if (spi->mode & SPI_LSB_FIRST) sicr |= 0x10000000; else sicr &= ~0x10000000; out_be32(&psc->sicr, sicr); ccr = in_be32(&psc->ccr); ccr &= 0xFF000000; speed = cs->speed_hz; if (!speed) speed = 1000000; /* default 1MHz */ bclkdiv = (mps->mclk_rate / speed) - 1; ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); out_be32(&psc->ccr, ccr); mps->bits_per_word = cs->bits_per_word; if (mps->cs_control && gpio_is_valid(spi->cs_gpio)) mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); } static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) { struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); if (mps->cs_control && gpio_is_valid(spi->cs_gpio)) mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); } /* extract and scale size field in txsz or rxsz */ #define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2); #define EOFBYTE 1 static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, struct spi_transfer *t) { struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; size_t tx_len = t->len; size_t rx_len = t->len; u8 *tx_buf = (u8 *)t->tx_buf; u8 *rx_buf = (u8 *)t->rx_buf; if (!tx_buf && !rx_buf && t->len) return -EINVAL; while (rx_len || tx_len) { size_t txcount; u8 data; size_t fifosz; size_t rxcount; int rxtries; /* * send the TX bytes in as large a chunk as possible * but neither exceed the TX nor the RX FIFOs */ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz)); txcount = min(fifosz, tx_len); fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->rxsz)); fifosz -= in_be32(&fifo->rxcnt) + 1; txcount = min(fifosz, txcount); if (txcount) { /* fill the TX FIFO */ while (txcount-- > 0) { data = tx_buf ? *tx_buf++ : 0; if (tx_len == EOFBYTE && t->cs_change) setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); out_8(&fifo->txdata_8, data); tx_len--; } /* have the ISR trigger when the TX FIFO is empty */ reinit_completion(&mps->txisrdone); out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); wait_for_completion(&mps->txisrdone); } /* * consume as much RX data as the FIFO holds, while we * iterate over the transfer's TX data length * * only insist in draining all the remaining RX bytes * when the TX bytes were exhausted (that's at the very * end of this transfer, not when still iterating over * the transfer's chunks) */ rxtries = 50; do { /* * grab whatever was in the FIFO when we started * looking, don't bother fetching what was added to * the FIFO while we read from it -- we'll return * here eventually and prefer sending out remaining * TX data */ fifosz = in_be32(&fifo->rxcnt); rxcount = min(fifosz, rx_len); while (rxcount-- > 0) { data = in_8(&fifo->rxdata_8); if (rx_buf) *rx_buf++ = data; rx_len--; } /* * come back later if there still is TX data to send, * bail out of the RX drain loop if all of the TX data * was sent and all of the RX data was received (i.e. * when the transmission has completed) */ if (tx_len) break; if (!rx_len) break; /* * TX data transmission has completed while RX data * is still pending -- that's a transient situation * which depends on wire speed and specific * hardware implementation details (buffering) yet * should resolve very quickly * * just yield for a moment to not hog the CPU for * too long when running SPI at low speed * * the timeout range is rather arbitrary and tries * to balance throughput against system load; the * chosen values result in a minimal timeout of 50 * times 10us and thus work at speeds as low as * some 20kbps, while the maximum timeout at the * transfer's end could be 5ms _if_ nothing else * ticks in the system _and_ RX data still wasn't * received, which only occurs in situations that * are exceptional; removing the unpredictability * of the timeout either decreases throughput * (longer timeouts), or puts more load on the * system (fixed short timeouts) or requires the * use of a timeout API instead of a counter and an * unknown inner delay */ usleep_range(10, 100); } while (--rxtries > 0); if (!tx_len && rx_len && !rxtries) { /* * not enough RX bytes even after several retries * and the resulting rather long timeout? */ rxcount = in_be32(&fifo->rxcnt); dev_warn(&spi->dev, "short xfer, missing %zd RX bytes, FIFO level %zd\n", rx_len, rxcount); } /* * drain and drop RX data which "should not be there" in * the first place, for undisturbed transmission this turns * into a NOP (except for the FIFO level fetch) */ if (!tx_len && !rx_len) { while (in_be32(&fifo->rxcnt)) in_8(&fifo->rxdata_8); } } return 0; } static int mpc512x_psc_spi_msg_xfer(struct spi_master *master, struct spi_message *m) { struct spi_device *spi; unsigned cs_change; int status; struct spi_transfer *t; spi = m->spi; cs_change = 1; status = 0; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->bits_per_word || t->speed_hz) { status = mpc512x_psc_spi_transfer_setup(spi, t); if (status < 0) break; } if (cs_change) mpc512x_psc_spi_activate_cs(spi); cs_change = t->cs_change; status = mpc512x_psc_spi_transfer_rxtx(spi, t); if (status) break; m->actual_length += t->len; if (t->delay_usecs) udelay(t->delay_usecs); if (cs_change) mpc512x_psc_spi_deactivate_cs(spi); } m->status = status; m->complete(m->context); if (status || !cs_change) mpc512x_psc_spi_deactivate_cs(spi); mpc512x_psc_spi_transfer_setup(spi, NULL); spi_finalize_current_message(master); return status; } static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master) { struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); struct mpc52xx_psc __iomem *psc = mps->psc; dev_dbg(&master->dev, "%s()\n", __func__); /* Zero MR2 */ in_8(&psc->mode); out_8(&psc->mode, 0x0); /* enable transmitter/receiver */ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); return 0; } static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master) { struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); struct mpc52xx_psc __iomem *psc = mps->psc; struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; dev_dbg(&master->dev, "%s()\n", __func__); /* disable transmitter/receiver and fifo interrupt */ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); out_be32(&fifo->tximr, 0); return 0; } static int mpc512x_psc_spi_setup(struct spi_device *spi) { struct mpc512x_psc_spi_cs *cs = spi->controller_state; int ret; if (spi->bits_per_word % 8) return -EINVAL; if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; if (gpio_is_valid(spi->cs_gpio)) { ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); if (ret) { dev_err(&spi->dev, "can't get CS gpio: %d\n", ret); kfree(cs); return ret; } gpio_direction_output(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); } spi->controller_state = cs; } cs->bits_per_word = spi->bits_per_word; cs->speed_hz = spi->max_speed_hz; return 0; } static void mpc512x_psc_spi_cleanup(struct spi_device *spi) { if (gpio_is_valid(spi->cs_gpio)) gpio_free(spi->cs_gpio); kfree(spi->controller_state); } static int mpc512x_psc_spi_port_config(struct spi_master *master, struct mpc512x_psc_spi *mps) { struct mpc52xx_psc __iomem *psc = mps->psc; struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; u32 sicr; u32 ccr; int speed; u16 bclkdiv; /* Reset the PSC into a known state */ out_8(&psc->command, MPC52xx_PSC_RST_RX); out_8(&psc->command, MPC52xx_PSC_RST_TX); out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); /* Disable psc interrupts all useful interrupts are in fifo */ out_be16(&psc->isr_imr.imr, 0); /* Disable fifo interrupts, will be enabled later */ out_be32(&fifo->tximr, 0); out_be32(&fifo->rximr, 0); /* Setup fifo slice address and size */ /*out_be32(&fifo->txsz, 0x0fe00004);*/ /*out_be32(&fifo->rxsz, 0x0ff00004);*/ sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */ 0x00800000 | /* GenClk = 1 -- internal clk */ 0x00008000 | /* SPI = 1 */ 0x00004000 | /* MSTR = 1 -- SPI master */ 0x00000800; /* UseEOF = 1 -- SS low until EOF */ out_be32(&psc->sicr, sicr); ccr = in_be32(&psc->ccr); ccr &= 0xFF000000; speed = 1000000; /* default 1MHz */ bclkdiv = (mps->mclk_rate / speed) - 1; ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); out_be32(&psc->ccr, ccr); /* Set 2ms DTL delay */ out_8(&psc->ctur, 0x00); out_8(&psc->ctlr, 0x82); /* we don't use the alarms */ out_be32(&fifo->rxalarm, 0xfff); out_be32(&fifo->txalarm, 0); /* Enable FIFO slices for Rx/Tx */ out_be32(&fifo->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); out_be32(&fifo->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); mps->bits_per_word = 8; return 0; } static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) { struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id; struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; /* clear interrupt and wake up the rx/tx routine */ if (in_be32(&fifo->txisr) & in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) { out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); out_be32(&fifo->tximr, 0); complete(&mps->txisrdone); return IRQ_HANDLED; } return IRQ_NONE; } static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff) { gpio_set_value(spi->cs_gpio, onoff); } /* bus_num is used only for the case dev->platform_data == NULL */ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, u32 size, unsigned int irq, s16 bus_num) { struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); struct mpc512x_psc_spi *mps; struct spi_master *master; int ret; void *tempp; struct clk *clk; master = spi_alloc_master(dev, sizeof *mps); if (master == NULL) return -ENOMEM; dev_set_drvdata(dev, master); mps = spi_master_get_devdata(master); mps->irq = irq; if (pdata == NULL) { mps->cs_control = mpc512x_spi_cs_control; master->bus_num = bus_num; } else { mps->cs_control = pdata->cs_control; master->bus_num = pdata->bus_num; master->num_chipselect = pdata->max_chipselect; } master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; master->setup = mpc512x_psc_spi_setup; master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw; master->transfer_one_message = mpc512x_psc_spi_msg_xfer; master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw; master->cleanup = mpc512x_psc_spi_cleanup; master->dev.of_node = dev->of_node; tempp = devm_ioremap(dev, regaddr, size); if (!tempp) { dev_err(dev, "could not ioremap I/O port range\n"); ret = -EFAULT; goto free_master; } mps->psc = tempp; mps->fifo = (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc)); ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED, "mpc512x-psc-spi", mps); if (ret) goto free_master; init_completion(&mps->txisrdone); clk = devm_clk_get(dev, "mclk"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto free_master; } ret = clk_prepare_enable(clk); if (ret) goto free_master; mps->clk_mclk = clk; mps->mclk_rate = clk_get_rate(clk); clk = devm_clk_get(dev, "ipg"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto free_mclk_clock; } ret = clk_prepare_enable(clk); if (ret) goto free_mclk_clock; mps->clk_ipg = clk; ret = mpc512x_psc_spi_port_config(master, mps); if (ret < 0) goto free_ipg_clock; ret = devm_spi_register_master(dev, master); if (ret < 0) goto free_ipg_clock; return ret; free_ipg_clock: clk_disable_unprepare(mps->clk_ipg); free_mclk_clock: clk_disable_unprepare(mps->clk_mclk); free_master: spi_master_put(master); return ret; } static int mpc512x_psc_spi_do_remove(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); clk_disable_unprepare(mps->clk_mclk); clk_disable_unprepare(mps->clk_ipg); return 0; } static int mpc512x_psc_spi_of_probe(struct platform_device *op) { const u32 *regaddr_p; u64 regaddr64, size64; s16 id = -1; regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); if (!regaddr_p) { dev_err(&op->dev, "Invalid PSC address\n"); return -EINVAL; } regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); /* get PSC id (0..11, used by port_config) */ id = of_alias_get_id(op->dev.of_node, "spi"); if (id < 0) { dev_err(&op->dev, "no alias id for %s\n", op->dev.of_node->full_name); return id; } return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, irq_of_parse_and_map(op->dev.of_node, 0), id); } static int mpc512x_psc_spi_of_remove(struct platform_device *op) { return mpc512x_psc_spi_do_remove(&op->dev); } static struct of_device_id mpc512x_psc_spi_of_match[] = { { .compatible = "fsl,mpc5121-psc-spi", }, {}, }; MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); static struct platform_driver mpc512x_psc_spi_of_driver = { .probe = mpc512x_psc_spi_of_probe, .remove = mpc512x_psc_spi_of_remove, .driver = { .name = "mpc512x-psc-spi", .owner = THIS_MODULE, .of_match_table = mpc512x_psc_spi_of_match, }, }; module_platform_driver(mpc512x_psc_spi_of_driver); MODULE_AUTHOR("John Rigby"); MODULE_DESCRIPTION("MPC512x PSC SPI Driver"); MODULE_LICENSE("GPL");
336085.c
/** * \file lwgsm_sms.c * \brief SMS API */ /* * Copyright (c) 2022 Tilen MAJERLE * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE * AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * This file is part of LwGSM - Lightweight GSM-AT library. * * Author: Tilen MAJERLE <[email protected]> * Version: v0.1.1 */ #include "lwgsm/lwgsm_private.h" #include "lwgsm/lwgsm_sms.h" #include "lwgsm/lwgsm_mem.h" #if LWGSM_CFG_SMS || __DOXYGEN__ #define LWGSM_SMS_OPERATION_IDX 0 /*!< Operation index for memory array (read, delete, list) */ #define LWGSM_SMS_SEND_IDX 1 /*!< Send index for memory array */ #define LWGSM_SMS_RECEIVE_IDX 2 /*!< Receive index for memory array */ #if !__DOXYGEN__ #define CHECK_ENABLED() if (!(check_enabled() == lwgsmOK)) { return lwgsmERRNOTENABLED; } #define CHECK_READY() if (!(check_ready() == lwgsmOK)) { return lwgsmERR; } #endif /* !__DOXYGEN__ */ /** * \brief Check if sms is enabled * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ static lwgsmr_t check_enabled(void) { lwgsmr_t res; lwgsm_core_lock(); res = lwgsm.m.sms.enabled ? lwgsmOK : lwgsmERR; lwgsm_core_unlock(); return res; } /** * \brief Check if SMS is available * \return \ref lwgsmOK on success, member of \ref lwgsmr_t enumeration otherwise */ static lwgsmr_t check_ready(void) { lwgsmr_t res; lwgsm_core_lock(); res = lwgsm.m.sms.ready ? lwgsmOK : lwgsmERR; lwgsm_core_unlock(); return res; } /** * \brief Check if input memory is available in modem * \param[in] mem: Memory to test * \param[in] can_curr: Flag indicates if \ref LWGSM_MEM_CURRENT option can be used * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ static lwgsmr_t check_sms_mem(lwgsm_mem_t mem, uint8_t can_curr) { lwgsmr_t res = lwgsmERRMEM; lwgsm_core_lock(); if ((mem < LWGSM_MEM_END && lwgsm.m.sms.mem[LWGSM_SMS_OPERATION_IDX].mem_available & (1 << (uint32_t)mem)) || (can_curr && mem == LWGSM_MEM_CURRENT)) { res = lwgsmOK; } lwgsm_core_unlock(); return res; } /** * \brief Enable SMS functionality * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_enable(const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_SMS_ENABLE; LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_GET_OPT; return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 60000); } /** * \brief Disable SMS functionality * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_disable(const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { lwgsm_core_lock(); lwgsm.m.sms.enabled = 0; if (evt_fn != NULL) { evt_fn(lwgsmOK, evt_arg); } lwgsm_core_unlock(); LWGSM_UNUSED(blocking); return lwgsmOK; } /** * \brief Send SMS text to phone number * \param[in] num: String number * \param[in] text: Text to send. Maximal `160` characters * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_send(const char* num, const char* text, const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); LWGSM_ASSERT("num != NULL && num[0] > 0", num != NULL && num[0] > 0); LWGSM_ASSERT("text != NULL && text[0] > 0 && strlen(text) <= 160", text != NULL && text[0] > 0 && strlen(text) <= 160); CHECK_ENABLED(); /* Check if enabled */ CHECK_READY(); /* Check if ready */ LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_CMGS; LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CMGF; LWGSM_MSG_VAR_REF(msg).msg.sms_send.num = num; LWGSM_MSG_VAR_REF(msg).msg.sms_send.text = text; LWGSM_MSG_VAR_REF(msg).msg.sms_send.format = 1; /* Send as plain text */ return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 60000); } /** * \brief Read SMS entry at specific memory and position * \param[in] mem: Memory used to read message from * \param[in] pos: Position number in memory to read * \param[out] entry: Pointer to SMS entry structure to fill data to * \param[in] update: Flag indicates update. Set to `1` to change `UNREAD` messages to `READ` or `0` to leave as is * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_read(lwgsm_mem_t mem, size_t pos, lwgsm_sms_entry_t* entry, uint8_t update, const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); LWGSM_ASSERT("entry != NULL", entry != NULL); CHECK_ENABLED(); /* Check if enabled */ CHECK_READY(); /* Check if ready */ LWGSM_ASSERT("check_sms_mem() == lwgsmOK", check_sms_mem(mem, 1) == lwgsmOK); LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); LWGSM_MEMSET(entry, 0x00, sizeof(*entry)); /* Reset data structure */ entry->mem = mem; /* Set memory */ entry->pos = pos; /* Set device position */ LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_CMGR; if (mem == LWGSM_MEM_CURRENT) { /* Should be always false */ LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_GET;/* First get memory */ } else { LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_SET;/* First set memory */ } LWGSM_MSG_VAR_REF(msg).msg.sms_read.mem = mem; LWGSM_MSG_VAR_REF(msg).msg.sms_read.pos = pos; LWGSM_MSG_VAR_REF(msg).msg.sms_read.entry = entry; LWGSM_MSG_VAR_REF(msg).msg.sms_read.update = update; LWGSM_MSG_VAR_REF(msg).msg.sms_read.format = 1; /* Send as plain text */ return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 60000); } /** * \brief Delete SMS entry at specific memory and position * \param[in] mem: Memory used to read message from * \param[in] pos: Position number in memory to read * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_delete(lwgsm_mem_t mem, size_t pos, const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); CHECK_ENABLED(); /* Check if enabled */ CHECK_READY(); /* Check if ready */ LWGSM_ASSERT("check_sms_mem() == lwgsmOK", check_sms_mem(mem, 1) == lwgsmOK); LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_CMGD; if (mem == LWGSM_MEM_CURRENT) { /* Should be always false */ LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_GET;/* First get memory */ } else { LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_SET;/* First set memory */ } LWGSM_MSG_VAR_REF(msg).msg.sms_delete.mem = mem; LWGSM_MSG_VAR_REF(msg).msg.sms_delete.pos = pos; return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 1000); } /** * \brief Delete all SMS entries with specific status * \param[in] status: SMS status. This parameter can be one of all possible types in \ref lwgsm_sms_status_t enumeration * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_delete_all(lwgsm_sms_status_t status, const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); CHECK_ENABLED(); /* Check if enabled */ CHECK_READY(); /* Check if ready */ LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_CMGDA; LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CMGF;/* By default format = 1 */ LWGSM_MSG_VAR_REF(msg).msg.sms_delete_all.status = status; /* This command may take a while */ return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 60000); } /** * \brief List SMS from SMS memory * \param[in] mem: Memory to read entries from. Use \ref LWGSM_MEM_CURRENT to read from current memory * \param[in] stat: SMS status to read, either `read`, `unread`, `sent`, `unsent` or `all` * \param[out] entries: Pointer to array to save SMS entries * \param[in] etr: Number of entries to read * \param[out] er: Pointer to output variable to save number of entries in array * \param[in] update: Flag indicates update. Set to `1` to change `UNREAD` messages to `READ` or `0` to leave as is * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_list(lwgsm_mem_t mem, lwgsm_sms_status_t stat, lwgsm_sms_entry_t* entries, size_t etr, size_t* er, uint8_t update, const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); LWGSM_ASSERT("entires != NULL", entries != NULL); LWGSM_ASSERT("etr > 0", etr > 0); CHECK_ENABLED(); /* Check if enabled */ CHECK_READY(); /* Check if ready */ LWGSM_ASSERT("check_sms_mem() == lwgsmOK", check_sms_mem(mem, 1) == lwgsmOK); LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); if (er != NULL) { *er = 0; } LWGSM_MEMSET(entries, 0x00, sizeof(*entries) * etr);/* Reset data structure */ LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_CMGL; if (mem == LWGSM_MEM_CURRENT) { /* Should be always false */ LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_GET;/* First get memory */ } else { LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_SET;/* First set memory */ } LWGSM_MSG_VAR_REF(msg).msg.sms_list.mem = mem; LWGSM_MSG_VAR_REF(msg).msg.sms_list.status = stat; LWGSM_MSG_VAR_REF(msg).msg.sms_list.entries = entries; LWGSM_MSG_VAR_REF(msg).msg.sms_list.etr = etr; LWGSM_MSG_VAR_REF(msg).msg.sms_list.er = er; LWGSM_MSG_VAR_REF(msg).msg.sms_list.update = update; LWGSM_MSG_VAR_REF(msg).msg.sms_list.format = 1; /* Send as plain text */ return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 60000); } /** * \brief Set preferred storage for SMS * \param[in] mem1: Preferred memory for read/delete SMS operations. Use \ref LWGSM_MEM_CURRENT to keep it as is * \param[in] mem2: Preferred memory for sent/write SMS operations. Use \ref LWGSM_MEM_CURRENT to keep it as is * \param[in] mem3: Preferred memory for received SMS entries. Use \ref LWGSM_MEM_CURRENT to keep it as is * \param[in] evt_fn: Callback function called when command has finished. Set to `NULL` when not used * \param[in] evt_arg: Custom argument for event callback function * \param[in] blocking: Status whether command should be blocking or not * \return \ref lwgsmOK on success, member of \ref lwgsmr_t otherwise */ lwgsmr_t lwgsm_sms_set_preferred_storage(lwgsm_mem_t mem1, lwgsm_mem_t mem2, lwgsm_mem_t mem3, const lwgsm_api_cmd_evt_fn evt_fn, void* const evt_arg, const uint32_t blocking) { LWGSM_MSG_VAR_DEFINE(msg); CHECK_ENABLED(); /* Check if enabled */ CHECK_READY(); /* Check if ready */ LWGSM_ASSERT("check_sms_mem(1) == lwgsmOK", check_sms_mem(mem1, 1) == lwgsmOK); LWGSM_ASSERT("check_sms_mem(2) == lwgsmOK", check_sms_mem(mem2, 1) == lwgsmOK); LWGSM_ASSERT("check_sms_mem(3) == lwgsmOK", check_sms_mem(mem3, 1) == lwgsmOK); LWGSM_MSG_VAR_ALLOC(msg, blocking); LWGSM_MSG_VAR_SET_EVT(msg, evt_fn, evt_arg); LWGSM_MSG_VAR_REF(msg).cmd_def = LWGSM_CMD_CPMS_SET; /* In case any of memories is set to current, read current status first from device */ if (mem1 == LWGSM_MEM_CURRENT || mem2 == LWGSM_MEM_CURRENT || mem3 == LWGSM_MEM_CURRENT) { LWGSM_MSG_VAR_REF(msg).cmd = LWGSM_CMD_CPMS_GET; } LWGSM_MSG_VAR_REF(msg).msg.sms_memory.mem[0] = mem1; LWGSM_MSG_VAR_REF(msg).msg.sms_memory.mem[1] = mem2; LWGSM_MSG_VAR_REF(msg).msg.sms_memory.mem[2] = mem3; return lwgsmi_send_msg_to_producer_mbox(&LWGSM_MSG_VAR_REF(msg), lwgsmi_initiate_cmd, 60000); } #endif /* LWGSM_CFG_SMS || __DOXYGEN__ */
197419.c
/* * Copyright 2018 Nikolay Sivov for CodeWeavers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #define COBJMACROS #include <stdarg.h> #include "windef.h" #include "winbase.h" #include "initguid.h" #include "ole2.h" #include "rpcproxy.h" #include "msopc.h" #include "xmllite.h" #include "wine/debug.h" #include "opc_private.h" WINE_DEFAULT_DEBUG_CHANNEL(msopc); struct opc_filestream { IStream IStream_iface; LONG refcount; HANDLE hfile; }; static inline struct opc_filestream *impl_from_IStream(IStream *iface) { return CONTAINING_RECORD(iface, struct opc_filestream, IStream_iface); } static HRESULT WINAPI opc_filestream_QueryInterface(IStream *iface, REFIID iid, void **out) { TRACE("iface %p, iid %s, out %p.\n", iface, debugstr_guid(iid), out); if (IsEqualIID(iid, &IID_IStream) || IsEqualIID(iid, &IID_ISequentialStream) || IsEqualIID(iid, &IID_IUnknown)) { *out = iface; IStream_AddRef(iface); return S_OK; } *out = NULL; WARN("Unsupported interface %s.\n", debugstr_guid(iid)); return E_NOINTERFACE; } static ULONG WINAPI opc_filestream_AddRef(IStream *iface) { struct opc_filestream *stream = impl_from_IStream(iface); ULONG refcount = InterlockedIncrement(&stream->refcount); TRACE("%p increasing refcount to %u.\n", iface, refcount); return refcount; } static ULONG WINAPI opc_filestream_Release(IStream *iface) { struct opc_filestream *stream = impl_from_IStream(iface); ULONG refcount = InterlockedDecrement(&stream->refcount); TRACE("%p decreasing refcount to %u.\n", iface, refcount); if (!refcount) { CloseHandle(stream->hfile); heap_free(stream); } return refcount; } static HRESULT WINAPI opc_filestream_Read(IStream *iface, void *buff, ULONG size, ULONG *num_read) { struct opc_filestream *stream = impl_from_IStream(iface); DWORD read = 0; TRACE("iface %p, buff %p, size %u, num_read %p.\n", iface, buff, size, num_read); if (!num_read) num_read = &read; *num_read = 0; if (!ReadFile(stream->hfile, buff, size, num_read, NULL)) { WARN("Failed to read file, error %d.\n", GetLastError()); return HRESULT_FROM_WIN32(GetLastError()); } return *num_read == size ? S_OK : S_FALSE; } static HRESULT WINAPI opc_filestream_Write(IStream *iface, const void *data, ULONG size, ULONG *num_written) { struct opc_filestream *stream = impl_from_IStream(iface); DWORD written = 0; TRACE("iface %p, data %p, size %u, num_written %p.\n", iface, data, size, num_written); if (!num_written) num_written = &written; *num_written = 0; if (!WriteFile(stream->hfile, data, size, num_written, NULL)) { WARN("Failed to write to file, error %d.\n", GetLastError()); return HRESULT_FROM_WIN32(GetLastError()); } return S_OK; } static HRESULT WINAPI opc_filestream_Seek(IStream *iface, LARGE_INTEGER move, DWORD origin, ULARGE_INTEGER *newpos) { struct opc_filestream *stream = impl_from_IStream(iface); TRACE("iface %p, move %s, origin %d, newpos %p.\n", iface, wine_dbgstr_longlong(move.QuadPart), origin, newpos); if (!SetFilePointerEx(stream->hfile, move, (LARGE_INTEGER *)newpos, origin)) return HRESULT_FROM_WIN32(GetLastError()); return S_OK; } static HRESULT WINAPI opc_filestream_SetSize(IStream *iface, ULARGE_INTEGER size) { FIXME("iface %p, size %s stub!\n", iface, wine_dbgstr_longlong(size.QuadPart)); return E_NOTIMPL; } static HRESULT WINAPI opc_filestream_CopyTo(IStream *iface, IStream *dest, ULARGE_INTEGER size, ULARGE_INTEGER *num_read, ULARGE_INTEGER *written) { FIXME("iface %p, dest %p, size %s, num_read %p, written %p stub!\n", iface, dest, wine_dbgstr_longlong(size.QuadPart), num_read, written); return E_NOTIMPL; } static HRESULT WINAPI opc_filestream_Commit(IStream *iface, DWORD flags) { FIXME("iface %p, flags %#x stub!\n", iface, flags); return E_NOTIMPL; } static HRESULT WINAPI opc_filestream_Revert(IStream *iface) { FIXME("iface %p stub!\n", iface); return E_NOTIMPL; } static HRESULT WINAPI opc_filestream_LockRegion(IStream *iface, ULARGE_INTEGER offset, ULARGE_INTEGER size, DWORD lock_type) { FIXME("iface %p, offset %s, size %s, lock_type %d stub!\n", iface, wine_dbgstr_longlong(offset.QuadPart), wine_dbgstr_longlong(size.QuadPart), lock_type); return E_NOTIMPL; } static HRESULT WINAPI opc_filestream_UnlockRegion(IStream *iface, ULARGE_INTEGER offset, ULARGE_INTEGER size, DWORD lock_type) { FIXME("iface %p, offset %s, size %s, lock_type %d stub!\n", iface, wine_dbgstr_longlong(offset.QuadPart), wine_dbgstr_longlong(size.QuadPart), lock_type); return E_NOTIMPL; } static HRESULT WINAPI opc_filestream_Stat(IStream *iface, STATSTG *statstg, DWORD flag) { struct opc_filestream *stream = impl_from_IStream(iface); BY_HANDLE_FILE_INFORMATION fi; TRACE("iface %p, statstg %p, flag %d.\n", iface, statstg, flag); if (!statstg) return E_POINTER; memset(&fi, 0, sizeof(fi)); GetFileInformationByHandle(stream->hfile, &fi); memset(statstg, 0, sizeof(*statstg)); statstg->type = STGTY_STREAM; statstg->cbSize.u.LowPart = fi.nFileSizeLow; statstg->cbSize.u.HighPart = fi.nFileSizeHigh; statstg->mtime = fi.ftLastWriteTime; statstg->ctime = fi.ftCreationTime; statstg->atime = fi.ftLastAccessTime; return S_OK; } static HRESULT WINAPI opc_filestream_Clone(IStream *iface, IStream **result) { FIXME("iface %p, result %p stub!\n", iface, result); return E_NOTIMPL; } static const IStreamVtbl opc_filestream_vtbl = { opc_filestream_QueryInterface, opc_filestream_AddRef, opc_filestream_Release, opc_filestream_Read, opc_filestream_Write, opc_filestream_Seek, opc_filestream_SetSize, opc_filestream_CopyTo, opc_filestream_Commit, opc_filestream_Revert, opc_filestream_LockRegion, opc_filestream_UnlockRegion, opc_filestream_Stat, opc_filestream_Clone, }; static HRESULT opc_filestream_create(const WCHAR *filename, OPC_STREAM_IO_MODE io_mode, SECURITY_ATTRIBUTES *sa, DWORD flags, IStream **out) { struct opc_filestream *stream; DWORD access, creation; if (!filename || !out) return E_POINTER; switch (io_mode) { case OPC_STREAM_IO_READ: access = GENERIC_READ; creation = OPEN_EXISTING; break; case OPC_STREAM_IO_WRITE: access = GENERIC_WRITE; creation = CREATE_ALWAYS; break; default: return E_INVALIDARG; } if (!(stream = heap_alloc_zero(sizeof(*stream)))) return E_OUTOFMEMORY; stream->hfile = CreateFileW(filename, access, 0, sa, creation, flags, NULL); if (stream->hfile == INVALID_HANDLE_VALUE) { HRESULT hr = HRESULT_FROM_WIN32(GetLastError()); heap_free(stream); return hr; } stream->IStream_iface.lpVtbl = &opc_filestream_vtbl; stream->refcount = 1; *out = &stream->IStream_iface; TRACE("Created file stream %p.\n", *out); return S_OK; } static HRESULT WINAPI opc_factory_QueryInterface(IOpcFactory *iface, REFIID iid, void **out) { TRACE("iface %p, iid %s, out %p.\n", iface, debugstr_guid(iid), out); if (IsEqualIID(iid, &IID_IOpcFactory) || IsEqualIID(iid, &IID_IUnknown)) { *out = iface; IOpcFactory_AddRef(iface); return S_OK; } WARN("Unsupported interface %s.\n", debugstr_guid(iid)); return E_NOINTERFACE; } static ULONG WINAPI opc_factory_AddRef(IOpcFactory *iface) { return 2; } static ULONG WINAPI opc_factory_Release(IOpcFactory *iface) { return 1; } static HRESULT WINAPI opc_factory_CreatePackageRootUri(IOpcFactory *iface, IOpcUri **uri) { TRACE("iface %p, uri %p.\n", iface, uri); if (!uri) return E_POINTER; return opc_root_uri_create(uri); } static HRESULT WINAPI opc_factory_CreatePartUri(IOpcFactory *iface, LPCWSTR uri, IOpcPartUri **out) { static const WCHAR rootW[] = {'/',0}; IUri *part_uri, *root_uri, *combined; HRESULT hr; TRACE("iface %p, uri %s, out %p.\n", iface, debugstr_w(uri), out); if (!out) return E_POINTER; *out = NULL; if (FAILED(hr = CreateUri(uri, Uri_CREATE_ALLOW_RELATIVE, 0, &part_uri))) { WARN("Failed to create uri, hr %#x.\n", hr); return hr; } if (FAILED(hr = CreateUri(rootW, Uri_CREATE_ALLOW_RELATIVE, 0, &root_uri))) { WARN("Failed to create root uri, hr %#x.\n", hr); IUri_Release(part_uri); return hr; } hr = CoInternetCombineIUri(root_uri, part_uri, 0, &combined, 0); IUri_Release(root_uri); IUri_Release(part_uri); if (FAILED(hr)) { WARN("Failed to combine URIs, hr %#x.\n", hr); return hr; } hr = opc_part_uri_create(combined, NULL, out); IUri_Release(combined); return hr; } static HRESULT WINAPI opc_factory_CreateStreamOnFile(IOpcFactory *iface, LPCWSTR filename, OPC_STREAM_IO_MODE io_mode, SECURITY_ATTRIBUTES *sa, DWORD flags, IStream **stream) { TRACE("iface %p, filename %s, io_mode %d, sa %p, flags %#x, stream %p.\n", iface, debugstr_w(filename), io_mode, sa, flags, stream); return opc_filestream_create(filename, io_mode, sa, flags, stream); } static HRESULT WINAPI opc_factory_CreatePackage(IOpcFactory *iface, IOpcPackage **package) { TRACE("iface %p, package %p.\n", iface, package); return opc_package_create(iface, package); } static HRESULT WINAPI opc_factory_ReadPackageFromStream(IOpcFactory *iface, IStream *stream, OPC_READ_FLAGS flags, IOpcPackage **package) { FIXME("iface %p, stream %p, flags %#x, package %p stub!\n", iface, stream, flags, package); return E_NOTIMPL; } static HRESULT WINAPI opc_factory_WritePackageToStream(IOpcFactory *iface, IOpcPackage *package, OPC_WRITE_FLAGS flags, IStream *stream) { TRACE("iface %p, package %p, flags %#x, stream %p.\n", iface, package, flags, stream); if (!package || !stream) return E_POINTER; return opc_package_write(package, flags, stream); } static HRESULT WINAPI opc_factory_CreateDigitalSignatureManager(IOpcFactory *iface, IOpcPackage *package, IOpcDigitalSignatureManager **signature_manager) { FIXME("iface %p, package %p, signature_manager %p stub!\n", iface, package, signature_manager); return E_NOTIMPL; } static const IOpcFactoryVtbl opc_factory_vtbl = { opc_factory_QueryInterface, opc_factory_AddRef, opc_factory_Release, opc_factory_CreatePackageRootUri, opc_factory_CreatePartUri, opc_factory_CreateStreamOnFile, opc_factory_CreatePackage, opc_factory_ReadPackageFromStream, opc_factory_WritePackageToStream, opc_factory_CreateDigitalSignatureManager, }; static IOpcFactory opc_factory = { &opc_factory_vtbl }; static HRESULT WINAPI opc_class_factory_QueryInterface(IClassFactory *iface, REFIID iid, void **out) { TRACE("iface %p, iid %s, out %p.\n", iface, debugstr_guid(iid), out); if (IsEqualGUID(iid, &IID_IClassFactory) || IsEqualGUID(iid, &IID_IUnknown)) { IClassFactory_AddRef(iface); *out = iface; return S_OK; } *out = NULL; WARN("Unsupported interface %s.\n", debugstr_guid(iid)); return E_NOINTERFACE; } static ULONG WINAPI opc_class_factory_AddRef(IClassFactory *iface) { return 2; } static ULONG WINAPI opc_class_factory_Release(IClassFactory *iface) { return 1; } static HRESULT WINAPI opc_class_factory_CreateInstance(IClassFactory *iface, IUnknown *outer, REFIID iid, void **out) { TRACE("iface %p, outer %p, iid %s, out %p.\n", iface, outer, debugstr_guid(iid), out); if (outer) return CLASS_E_NOAGGREGATION; return IOpcFactory_QueryInterface(&opc_factory, iid, out); } static HRESULT WINAPI opc_class_factory_LockServer(IClassFactory *iface, BOOL dolock) { FIXME("iface %p, dolock %d stub!\n", iface, dolock); return S_OK; } static const struct IClassFactoryVtbl opc_class_factory_vtbl = { opc_class_factory_QueryInterface, opc_class_factory_AddRef, opc_class_factory_Release, opc_class_factory_CreateInstance, opc_class_factory_LockServer }; static IClassFactory opc_class_factory = { &opc_class_factory_vtbl }; HRESULT WINAPI DllGetClassObject(REFCLSID clsid, REFIID iid, void **out) { TRACE("clsid %s, iid %s, out %p\n", debugstr_guid(clsid), debugstr_guid(iid), out); if (IsEqualCLSID(clsid, &CLSID_OpcFactory)) return IClassFactory_QueryInterface(&opc_class_factory, iid, out); WARN("Unsupported class %s.\n", debugstr_guid(clsid)); return E_NOTIMPL; } HRESULT WINAPI DllCanUnloadNow(void) { return S_FALSE; } static HINSTANCE OPC_hInstance; BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD reason, void *reserved) { OPC_hInstance = hInstDLL; switch (reason) { case DLL_PROCESS_ATTACH: DisableThreadLibraryCalls(hInstDLL); break; } return TRUE; } HRESULT WINAPI DllRegisterServer(void) { return __wine_register_resources( OPC_hInstance ); } HRESULT WINAPI DllUnregisterServer(void) { return __wine_unregister_resources( OPC_hInstance ); }
571961.c
#include <stdio.h> #include <stdbool.h> #include <string.h> #include "./str.c" #include "./server_mod.c" bool _assert_eq_int(int line, int exp, int real) { if (exp != real) { printf("line:%d Assert Failed %d != %d\n", line, exp, real); return false; } return true; } #define assert_eq_int(e, r) _assert_eq_int(__LINE__, (e), (r)) bool _assert_eq_str(int line, char* exp, char* real) { if (strcmp(exp, real) != 0) { printf("line:%d Assert Failed %s != %s\n", line, exp, real); return false; } return true; } #define assert_eq_str(e, r) _assert_eq_str(__LINE__, (e), (r)) bool _assert_eq_uint8_p(int line, int num, uint8_t* exp, uint8_t* real) { for (int i = 0; i < num; i++) { if (exp != real) { printf("line:%d Assert Failed %d != %d at %d\n", line, exp[i], real[i], i); return false; } } return true; } #define assert_eq_uint8_p(n, e, r) _assert_eq_uint8_p(__LINE__, (n), (e), (r)) int main(void){ assert_eq_int(10, c16('A')); struct response_t ires = { .get = true, .post = false, .restart = false, .ok = false }; struct response_t res; res = ires; exec_http(10, "GET /cmd?q=HOGE", &res); res = ires; exec_http(10, "POST /prog\r\n\r\nA", &res); }
436798.c
//***************************************************************************** // // kentec320x240x16_ssd2119.c - Display driver for the Kentec K350QVG-V2-F // TFT display attached to the LCD controller via // an 8-bit LIDD interface. // // Copyright (c) 2013-2014 Texas Instruments Incorporated. All rights reserved. // Software License Agreement // // Texas Instruments (TI) is supplying this software for use solely and // exclusively on TI's microcontroller products. The software is owned by // TI and/or its suppliers, and is protected under applicable copyright // laws. You may not combine this software with "viral" open-source // software in order to form a larger program. // // THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS. // NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT // NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY // CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL // DAMAGES, FOR ANY REASON WHATSOEVER. // // This is part of revision 2.1.0.12573 of the DK-TM4C129X Firmware Package. // //***************************************************************************** #include <stdbool.h> #include <stdint.h> #include "inc/hw_gpio.h" #include "inc/hw_ints.h" #include "inc/hw_memmap.h" #include "inc/hw_types.h" #include "inc/hw_sysctl.h" #include "driverlib/gpio.h" #include "driverlib/interrupt.h" #include "driverlib/sysctl.h" #include "driverlib/timer.h" #include "driverlib/rom.h" #include "driverlib/lcd.h" #include "grlib/grlib.h" #include "drivers/kentec320x240x16_ssd2119.h" //***************************************************************************** // //! \addtogroup kentec320x240x16_ssd2119_api //! @{ // //***************************************************************************** //***************************************************************************** // // This driver operates in four different screen orientations. They are: // // * Portrait - The screen is taller than it is wide, and the flex connector is // on the left of the display. This is selected by defining // PORTRAIT. // // * Landscape - The screen is wider than it is tall, and the flex connector is // on the bottom of the display. This is selected by defining // LANDSCAPE. // // * Portrait flip - The screen is taller than it is wide, and the flex // connector is on the right of the display. This is // selected by defining PORTRAIT_FLIP. // // * Landscape flip - The screen is wider than it is tall, and the flex // connector is on the top of the display. This is // selected by defining LANDSCAPE_FLIP. // // These can also be imagined in terms of screen rotation; if portrait mode is // 0 degrees of screen rotation, landscape is 90 degrees of counter-clockwise // rotation, portrait flip is 180 degrees of rotation, and landscape flip is // 270 degress of counter-clockwise rotation. // // If no screen orientation is selected, landscape mode will be used. // //***************************************************************************** #if ! defined(PORTRAIT) && ! defined(PORTRAIT_FLIP) && \ ! defined(LANDSCAPE) && ! defined(LANDSCAPE_FLIP) #define LANDSCAPE_FLIP #endif //***************************************************************************** // // Various definitions controlling coordinate space mapping and drawing // direction in the four supported orientations. // //***************************************************************************** #ifdef PORTRAIT #define HORIZ_DIRECTION 0x28 #define VERT_DIRECTION 0x20 #define MAPPED_X(x, y) (319 - (y)) #define MAPPED_Y(x, y) (x) #endif #ifdef LANDSCAPE #define HORIZ_DIRECTION 0x00 #define VERT_DIRECTION 0x08 #define MAPPED_X(x, y) (319 - (x)) #define MAPPED_Y(x, y) (239 - (y)) #endif #ifdef PORTRAIT_FLIP #define HORIZ_DIRECTION 0x18 #define VERT_DIRECTION 0x10 #define MAPPED_X(x, y) (y) #define MAPPED_Y(x, y) (239 - (x)) #endif #ifdef LANDSCAPE_FLIP #define HORIZ_DIRECTION 0x30 #define VERT_DIRECTION 0x38 #define MAPPED_X(x, y) (x) #define MAPPED_Y(x, y) (y) #endif //***************************************************************************** // // Various internal SD2119 registers name labels // //***************************************************************************** #define SSD2119_DEVICE_CODE_READ_REG \ 0x00 #define SSD2119_OSC_START_REG 0x00 #define SSD2119_OUTPUT_CTRL_REG 0x01 #define SSD2119_LCD_DRIVE_AC_CTRL_REG \ 0x02 #define SSD2119_PWR_CTRL_1_REG 0x03 #define SSD2119_DISPLAY_CTRL_REG \ 0x07 #define SSD2119_FRAME_CYCLE_CTRL_REG \ 0x0b #define SSD2119_PWR_CTRL_2_REG 0x0c #define SSD2119_PWR_CTRL_3_REG 0x0d #define SSD2119_PWR_CTRL_4_REG 0x0e #define SSD2119_GATE_SCAN_START_REG \ 0x0f #define SSD2119_SLEEP_MODE_1_REG \ 0x10 #define SSD2119_ENTRY_MODE_REG 0x11 #define SSD2119_SLEEP_MODE_2_REG \ 0x12 #define SSD2119_GEN_IF_CTRL_REG 0x15 #define SSD2119_PWR_CTRL_5_REG 0x1e #define SSD2119_RAM_DATA_REG 0x22 #define SSD2119_FRAME_FREQ_REG 0x25 #define SSD2119_ANALOG_SET_REG 0x26 #define SSD2119_VCOM_OTP_1_REG 0x28 #define SSD2119_VCOM_OTP_2_REG 0x29 #define SSD2119_GAMMA_CTRL_1_REG \ 0x30 #define SSD2119_GAMMA_CTRL_2_REG \ 0x31 #define SSD2119_GAMMA_CTRL_3_REG \ 0x32 #define SSD2119_GAMMA_CTRL_4_REG \ 0x33 #define SSD2119_GAMMA_CTRL_5_REG \ 0x34 #define SSD2119_GAMMA_CTRL_6_REG \ 0x35 #define SSD2119_GAMMA_CTRL_7_REG \ 0x36 #define SSD2119_GAMMA_CTRL_8_REG \ 0x37 #define SSD2119_GAMMA_CTRL_9_REG \ 0x3a #define SSD2119_GAMMA_CTRL_10_REG \ 0x3b #define SSD2119_V_RAM_POS_REG 0x44 #define SSD2119_H_RAM_START_REG 0x45 #define SSD2119_H_RAM_END_REG 0x46 #define SSD2119_X_RAM_ADDR_REG 0x4e #define SSD2119_Y_RAM_ADDR_REG 0x4f #define ENTRY_MODE_DEFAULT 0x6830 #define MAKE_ENTRY_MODE(x) ((ENTRY_MODE_DEFAULT & 0xff00) | (x)) //***************************************************************************** // // Read Access Timing // ------------------ // // Direction OOOIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIOOOOOOOOOOOOO // // ~RD ----- -------------------------- // \ / | // ------------------ // < Trdl >< Trdh > // < Tcycle > // < Tacc > // /------------------| // DATA ------------- ------------------ // \------------------/ // < Tdh > // // Delays < Trad >< Tdhd >< Trhd >< Trcd > // // This design keeps CS tied low so pulse width constraints relating to CS // have been transfered to ~RD here. // // Tcycle Read Cycle Time 1000nS // Tacc Data Access Time 100nS // Trdl Read Data Low 500nS // Trdh Read Data High 500nS // Tdh Data Hold Time 100nS // // Trad (READ_DATA_ACCESS_DELAY) controls the delay between asserting ~RD and // reading the data from the bus. // Tdhd (READ_DATA_HOLD_DELAY) controls the delay after reading the data and // before deasserting ~RD. // Trhd (READ_HOLD_DELAY) controls the delay between deasserting ~RD and // switching the data bus direction back to output. // Trcd (READ_DATA_CYCLE_DELAY) controls the delay after switching the // direction of the data bus. // //***************************************************************************** //***************************************************************************** // // The delay to impose after setting the state of the read/write line and // before reading the data bus. This is expressed in terms of cycles of a // tight loop whose body performs a single GPIO register access and needs to // comply with the 500nS read cycle pulse width constraint. // //***************************************************************************** #define READ_DATA_ACCESS_DELAY 5 //***************************************************************************** // // The delay to impose after reading the data and before resetting the state of // the read/write line during a read operation. This is expressed in terms of // cycles of a tight loop whose body performs a single GPIO register access and // needs to comply with the 500nS read cycle pulse width constraint. // //***************************************************************************** #define READ_DATA_HOLD_DELAY 5 //***************************************************************************** // // The delay to impose after deasserting ~RD and before setting the bus back to // an output. This is expressed in terms of cycles of a tight loop whose body // performs a single GPIO register access. // //***************************************************************************** #define READ_HOLD_DELAY 5 //***************************************************************************** // // The delay to impose after completing a read cycle and before returning to // the caller. This is expressed in terms of cycles of a tight loop whose body // performs a single GPIO register access and needs to comply with the 1000nS // read cycle pulse width constraint. // //***************************************************************************** #define READ_DATA_CYCLE_DELAY 5 //***************************************************************************** // // The dimensions of the LCD panel. // //***************************************************************************** #define LCD_HORIZONTAL_MAX 320 #define LCD_VERTICAL_MAX 240 //***************************************************************************** // // Translates a 24-bit RGB color to a display driver-specific color. // // \param c is the 24-bit RGB color. The least-significant byte is the blue // channel, the next byte is the green channel, and the third byte is the red // channel. // // This macro translates a 24-bit RGB color into a value that can be written // into the display's frame buffer in order to reproduce that color, or the // closest possible approximation of that color. // // \return Returns the display-driver specific color. // //***************************************************************************** #define DPYCOLORTRANSLATE(c) ((((c) & 0x00f80000) >> 8) | \ (((c) & 0x0000fc00) >> 5) | \ (((c) & 0x000000f8) >> 3)) //***************************************************************************** // // Writes a data word to the SSD2119. // //***************************************************************************** static inline void WriteData(uint16_t ui16Data) { // // Split the write into two bytes and pass them to the LCD controller. // LCDIDDDataWrite(LCD0_BASE, 0, ui16Data >> 8); LCDIDDDataWrite(LCD0_BASE, 0, ui16Data & 0xff); } //***************************************************************************** // // Writes a command to the SSD2119. // //***************************************************************************** static inline void WriteCommand(uint8_t ui8Data) { // // Pass the write on to the controller. // LCDIDDCommandWrite(LCD0_BASE, 0, (uint16_t)ui8Data); } //***************************************************************************** // //! Draws a pixel on the screen. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! \param i32X is the X coordinate of the pixel. //! \param i32Y is the Y coordinate of the pixel. //! \param ui32Value is the color of the pixel. //! //! This function sets the given pixel to a particular color. The coordinates //! of the pixel are assumed to be within the extents of the display. //! //! \return None. // //***************************************************************************** static void Kentec320x240x16_SSD2119PixelDraw(void *pvDisplayData, int32_t i32X, int32_t i32Y, uint32_t ui32Value) { // // Set the X address of the display cursor. // WriteCommand(SSD2119_X_RAM_ADDR_REG); WriteData(MAPPED_X(i32X, i32Y)); // // Set the Y address of the display cursor. // WriteCommand(SSD2119_Y_RAM_ADDR_REG); WriteData(MAPPED_Y(i32X, i32Y)); // // Write the pixel value. // WriteCommand(SSD2119_RAM_DATA_REG); WriteData(ui32Value); } //***************************************************************************** // //! Draws a horizontal sequence of pixels on the screen. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! \param i32X is the X coordinate of the first pixel. //! \param i32Y is the Y coordinate of the first pixel. //! \param i32X0 is sub-pixel offset within the pixel data, which is valid for //! 1 or 4 bit per pixel formats. //! \param i32Count is the number of pixels to draw. //! \param i32BPP is the number of bits per pixel; must be 1, 4, or 8. //! \param pui8Data is a pointer to the pixel data. For 1 and 4 bit per pixel //! formats, the most significant bit(s) represent the left-most pixel. //! \param pui8Palette is a pointer to the palette used to draw the pixels. //! //! This function draws a horizontal sequence of pixels on the screen, using //! the supplied palette. For 1 bit per pixel format, the palette contains //! pre-translated colors; for 4 and 8 bit per pixel formats, the palette //! contains 24-bit RGB values that must be translated before being written to //! the display. //! //! \return None. // //***************************************************************************** static void Kentec320x240x16_SSD2119PixelDrawMultiple(void *pvDisplayData, int32_t i32X, int32_t i32Y, int32_t i32X0, int32_t i32Count, int32_t i32BPP, const uint8_t *pui8Data, const uint8_t *pui8Palette) { uint32_t ui32Byte; // // Set the cursor increment to left to right, followed by top to bottom. // WriteCommand(SSD2119_ENTRY_MODE_REG); WriteData(MAKE_ENTRY_MODE(HORIZ_DIRECTION)); // // Set the starting X address of the display cursor. // WriteCommand(SSD2119_X_RAM_ADDR_REG); WriteData(MAPPED_X(i32X, i32Y)); // // Set the Y address of the display cursor. // WriteCommand(SSD2119_Y_RAM_ADDR_REG); WriteData(MAPPED_Y(i32X, i32Y)); // // Write the data RAM write command. // WriteCommand(SSD2119_RAM_DATA_REG); // // Determine how to interpret the pixel data based on the number of bits // per pixel. // switch(i32BPP & ~GRLIB_DRIVER_FLAG_NEW_IMAGE) { // // The pixel data is in 1 bit per pixel format. // case 1: { // // Loop while there are more pixels to draw. // while(i32Count) { // // Get the next byte of image data. // ui32Byte = *pui8Data++; // // Loop through the pixels in this byte of image data. // for(; (i32X0 < 8) && i32Count; i32X0++, i32Count--) { // // Draw this pixel in the appropriate color. // WriteData(((uint32_t *)pui8Palette)[(ui32Byte >> (7 - i32X0)) & 1]); } // // Start at the beginning of the next byte of image data. // i32X0 = 0; } // // The image data has been drawn. // break; } // // The pixel data is in 4 bit per pixel format. // case 4: { // // Loop while there are more pixels to draw. "Duff's device" is // used to jump into the middle of the loop if the first nibble of // the pixel data should not be used. Duff's device makes use of // the fact that a case statement is legal anywhere within a // sub-block of a switch statement. See // http://en.wikipedia.org/wiki/Duff's_device for detailed // information about Duff's device. // switch(i32X0 & 1) { case 0: while(i32Count) { // // Get the upper nibble of the next byte of pixel data // and extract the corresponding entry from the // palette. // ui32Byte = (*pui8Data >> 4) * 3; ui32Byte = (*(uint32_t *)(pui8Palette + ui32Byte) & 0x00ffffff); // // Translate this palette entry and write it to the // screen. // WriteData(DPYCOLORTRANSLATE(ui32Byte)); // // Decrement the count of pixels to draw. // i32Count--; // // See if there is another pixel to draw. // if(i32Count) { case 1: // // Get the lower nibble of the next byte of pixel // data and extract the corresponding entry from // the palette. // ui32Byte = (*pui8Data++ & 15) * 3; ui32Byte = (*(uint32_t *)(pui8Palette + ui32Byte) & 0x00ffffff); // // Translate this palette entry and write it to the // screen. // WriteData(DPYCOLORTRANSLATE(ui32Byte)); // // Decrement the count of pixels to draw. // i32Count--; } } } // // The image data has been drawn. // break; } // // The pixel data is in 8 bit per pixel format. // case 8: { // // Loop while there are more pixels to draw. // while(i32Count--) { // // Get the next byte of pixel data and extract the // corresponding entry from the palette. // ui32Byte = *pui8Data++ * 3; ui32Byte = *(uint32_t *)(pui8Palette + ui32Byte) & 0x00ffffff; // // Translate this palette entry and write it to the screen. // WriteData(DPYCOLORTRANSLATE(ui32Byte)); } // // The image data has been drawn. // break; } } } //***************************************************************************** // //! Draws a horizontal line. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! \param i32X1 is the X coordinate of the start of the line. //! \param i32X2 is the X coordinate of the end of the line. //! \param i32Y is the Y coordinate of the line. //! \param ui32Value is the color of the line. //! //! This function draws a horizontal line on the display. The coordinates of //! the line are assumed to be within the extents of the display. //! //! \return None. // //***************************************************************************** static void Kentec320x240x16_SSD2119LineDrawH(void *pvDisplayData, int32_t i32X1, int32_t i32X2, int32_t i32Y, uint32_t ui32Value) { // // Set the cursor increment to left to right, followed by top to bottom. // WriteCommand(SSD2119_ENTRY_MODE_REG); WriteData(MAKE_ENTRY_MODE(HORIZ_DIRECTION)); // // Set the starting X address of the display cursor. // WriteCommand(SSD2119_X_RAM_ADDR_REG); WriteData(MAPPED_X(i32X1, i32Y)); // // Set the Y address of the display cursor. // WriteCommand(SSD2119_Y_RAM_ADDR_REG); WriteData(MAPPED_Y(i32X1, i32Y)); // // Write the data RAM write command. // WriteCommand(SSD2119_RAM_DATA_REG); // // Loop through the pixels of this horizontal line. // while(i32X1++ <= i32X2) { // // Write the pixel value. // WriteData(ui32Value); } } //***************************************************************************** // //! Draws a vertical line. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! \param i32X is the X coordinate of the line. //! \param i32Y1 is the Y coordinate of the start of the line. //! \param i32Y2 is the Y coordinate of the end of the line. //! \param ui32Value is the color of the line. //! //! This function draws a vertical line on the display. The coordinates of the //! line are assumed to be within the extents of the display. //! //! \return None. // //***************************************************************************** static void Kentec320x240x16_SSD2119LineDrawV(void *pvDisplayData, int32_t i32X, int32_t i32Y1, int32_t i32Y2, uint32_t ui32Value) { // // Set the cursor increment to top to bottom, followed by left to right. // WriteCommand(SSD2119_ENTRY_MODE_REG); WriteData(MAKE_ENTRY_MODE(VERT_DIRECTION)); // // Set the X address of the display cursor. // WriteCommand(SSD2119_X_RAM_ADDR_REG); WriteData(MAPPED_X(i32X, i32Y1)); // // Set the starting Y address of the display cursor. // WriteCommand(SSD2119_Y_RAM_ADDR_REG); WriteData(MAPPED_Y(i32X, i32Y1)); // // Write the data RAM write command. // WriteCommand(SSD2119_RAM_DATA_REG); // // Loop through the pixels of this vertical line. // while(i32Y1++ <= i32Y2) { // // Write the pixel value. // WriteData(ui32Value); } } //***************************************************************************** // //! Fills a rectangle. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! \param psRect is a pointer to the structure describing the rectangle. //! \param ui32Value is the color of the rectangle. //! //! This function fills a rectangle on the display. The coordinates of the //! rectangle are assumed to be within the extents of the display, and the //! rectangle specification is fully inclusive (in other words, both i16XMin //! and i16XMax are drawn, along with i16YMin and i16YMax). //! //! \return None. // //***************************************************************************** static void Kentec320x240x16_SSD2119RectFill(void *pvDisplayData, const tRectangle *psRect, uint32_t ui32Value) { int32_t i32Count; // // Write the Y extents of the rectangle. // WriteCommand(SSD2119_ENTRY_MODE_REG); WriteData(MAKE_ENTRY_MODE(HORIZ_DIRECTION)); // // Write the X extents of the rectangle. // WriteCommand(SSD2119_H_RAM_START_REG); #if (defined PORTRAIT) || (defined LANDSCAPE) WriteData(MAPPED_X(psRect->i16XMax, psRect->i16YMax)); #else WriteData(MAPPED_X(psRect->i16XMin, psRect->i16YMin)); #endif WriteCommand(SSD2119_H_RAM_END_REG); #if (defined PORTRAIT) || (defined LANDSCAPE) WriteData(MAPPED_X(psRect->i16XMin, psRect->i16YMin)); #else WriteData(MAPPED_X(psRect->i16XMax, psRect->i16YMax)); #endif // // Write the Y extents of the rectangle // WriteCommand(SSD2119_V_RAM_POS_REG); #if (defined LANDSCAPE_FLIP) || (defined PORTRAIT) WriteData(MAPPED_Y(psRect->i16XMin, psRect->i16YMin) | (MAPPED_Y(psRect->i16XMax, psRect->i16YMax) << 8)); #else WriteData(MAPPED_Y(psRect->i16XMax, psRect->i16YMax) | (MAPPED_Y(psRect->i16XMin, psRect->i16YMin) << 8)); #endif // // Set the display cursor to the upper left of the rectangle (in // application coordinate space). // WriteCommand(SSD2119_X_RAM_ADDR_REG); WriteData(MAPPED_X(psRect->i16XMin, psRect->i16YMin)); WriteCommand(SSD2119_Y_RAM_ADDR_REG); WriteData(MAPPED_Y(psRect->i16XMin, psRect->i16YMin)); // // Tell the controller to write data into its RAM. // WriteCommand(SSD2119_RAM_DATA_REG); // // Loop through the pixels of this filled rectangle. // for(i32Count = ((psRect->i16XMax - psRect->i16XMin + 1) * (psRect->i16YMax - psRect->i16YMin + 1)); i32Count >= 0; i32Count--) { // // Write the pixel value. // WriteData(ui32Value); } // // Reset the X extents to the entire screen. // WriteCommand(SSD2119_H_RAM_START_REG); WriteData(0x0000); WriteCommand(SSD2119_H_RAM_END_REG); WriteData(0x013f); // // Reset the Y extent to the full screen // WriteCommand(SSD2119_V_RAM_POS_REG); WriteData(0xef00); } //***************************************************************************** // //! Translates a 24-bit RGB color to a display driver-specific color. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! \param ui32Value is the 24-bit RGB color. The least-significant byte is //! the blue channel, the next byte is the green channel, and the third byte is //! the red channel. //! //! This function translates a 24-bit RGB color into a value that can be //! written into the display's frame buffer in order to reproduce that color, //! or the closest possible approximation of that color. //! //! \return Returns the display-driver specific color. // //***************************************************************************** static uint32_t Kentec320x240x16_SSD2119ColorTranslate(void *pvDisplayData, uint32_t ui32Value) { // // Translate from a 24-bit RGB color to a 5-6-5 RGB color. // return(DPYCOLORTRANSLATE(ui32Value)); } //***************************************************************************** // //! Flushes any cached drawing operations. //! //! \param pvDisplayData is a pointer to the driver-specific data for this //! display driver. //! //! This functions flushes any cached drawing operations to the display. This //! is useful when a local frame buffer is used for drawing operations, and the //! flush would copy the local frame buffer to the display. For the SSD2119 //! driver, the flush is a no operation. //! //! \return None. // //***************************************************************************** static void Kentec320x240x16_SSD2119Flush(void *pvDisplayData) { // // There is nothing to be done. // } //***************************************************************************** // //! The display structure that describes the driver for the Kentec K350QVG-V2-F //! TFT panel with an SSD2119 controller. // //***************************************************************************** const tDisplay g_sKentec320x240x16_SSD2119 = { sizeof(tDisplay), 0, #if defined(PORTRAIT) || defined(PORTRAIT_FLIP) 240, 320, #else 320, 240, #endif Kentec320x240x16_SSD2119PixelDraw, Kentec320x240x16_SSD2119PixelDrawMultiple, Kentec320x240x16_SSD2119LineDrawH, Kentec320x240x16_SSD2119LineDrawV, Kentec320x240x16_SSD2119RectFill, Kentec320x240x16_SSD2119ColorTranslate, Kentec320x240x16_SSD2119Flush }; //***************************************************************************** // //! Initializes the display driver. //! //! \param ui32SysClock is the frequency of the system clock. //! //! This function initializes the LCD controller and the SSD2119 display //! controller on the panel, preparing it to display data. //! //! \return None. // //***************************************************************************** void Kentec320x240x16_SSD2119Init(uint32_t ui32SysClock) { uint32_t ui32ClockMS, ui32Count; tLCDIDDTiming sTimings; // // Determine the number of system clock cycles in 1mS // ui32ClockMS = CYCLES_FROM_TIME_US(ui32SysClock, 1000); // // Divide by 3 to get the number of SysCtlDelay loops in 1mS. // ui32ClockMS /= 3; // // Enable the LCD controller. // SysCtlPeripheralEnable(SYSCTL_PERIPH_LCD0); // // Assert the LCD reset signal. // GPIOPinWrite(GPIO_PORTF_BASE, GPIO_PIN_6, 0); // // Delay for 50ms. // SysCtlDelay(50 * ui32ClockMS); // // Deassert the LCD reset signal. // GPIOPinWrite(GPIO_PORTF_BASE, GPIO_PIN_6, GPIO_PIN_6); // // Delay for 50ms while the LCD comes out of reset. // SysCtlDelay(50 * ui32ClockMS); // // Configure the LCD controller for LIDD-mode operation. // LCDModeSet(LCD0_BASE, LCD_MODE_LIDD, ui32SysClock, ui32SysClock); // // Configure DMA-related parameters. // LCDDMAConfigSet(LCD0_BASE, LCD_DMA_BURST_4); // // Set control signal parameters and polarities. // LCDIDDConfigSet(LCD0_BASE, LIDD_CONFIG_ASYNC_MPU80); // // Set the LIDD interface timings for the Kentec display. Note that the // inter-transaction delay is set at at 50nS to match the write case. // Software needs to ensure that it delays at least 450nS more between each // read or the read timings will be violated. // sTimings.ui8WSSetup = CYCLES_FROM_TIME_NS(ui32SysClock, 5); sTimings.ui8WSDuration = CYCLES_FROM_TIME_NS(ui32SysClock, 40); sTimings.ui8WSHold = CYCLES_FROM_TIME_NS(ui32SysClock, 5); sTimings.ui8RSSetup = CYCLES_FROM_TIME_NS(ui32SysClock, 0); sTimings.ui8RSDuration = CYCLES_FROM_TIME_NS(ui32SysClock, 500); sTimings.ui8RSHold = CYCLES_FROM_TIME_NS(ui32SysClock, 100); sTimings.ui8DelayCycles = CYCLES_FROM_TIME_NS(ui32SysClock, 50); LCDIDDTimingSet(LCD0_BASE, 0, &sTimings); // // Enter sleep mode (if not already there). // WriteCommand(SSD2119_SLEEP_MODE_1_REG); WriteData(0x0001); // // Set initial power parameters. // WriteCommand(SSD2119_PWR_CTRL_5_REG); WriteData(0x00b2); WriteCommand(SSD2119_VCOM_OTP_1_REG); WriteData(0x0006); // // Start the oscillator. // WriteCommand(SSD2119_OSC_START_REG); WriteData(0x0001); // // Set pixel format and basic display orientation (scanning direction). // WriteCommand(SSD2119_OUTPUT_CTRL_REG); WriteData(0x30ef); WriteCommand(SSD2119_LCD_DRIVE_AC_CTRL_REG); WriteData(0x0600); // // Exit sleep mode. // WriteCommand(SSD2119_SLEEP_MODE_1_REG); WriteData(0x0000); // // Delay 30mS // SysCtlDelay(30 * ui32ClockMS); // // Configure pixel color format and MCU interface parameters. // WriteCommand(SSD2119_ENTRY_MODE_REG); WriteData(ENTRY_MODE_DEFAULT); // // Set analog parameters. // WriteCommand(SSD2119_SLEEP_MODE_2_REG); WriteData(0x0999); WriteCommand(SSD2119_ANALOG_SET_REG); WriteData(0x3800); // // Enable the display. // WriteCommand(SSD2119_DISPLAY_CTRL_REG); WriteData(0x0033); // // Set VCIX2 voltage to 6.1V. // WriteCommand(SSD2119_PWR_CTRL_2_REG); WriteData(0x0005); // // Configure gamma correction. // WriteCommand(SSD2119_GAMMA_CTRL_1_REG); WriteData(0x0000); WriteCommand(SSD2119_GAMMA_CTRL_2_REG); WriteData(0x0303); WriteCommand(SSD2119_GAMMA_CTRL_3_REG); WriteData(0x0407); WriteCommand(SSD2119_GAMMA_CTRL_4_REG); WriteData(0x0301); WriteCommand(SSD2119_GAMMA_CTRL_5_REG); WriteData(0x0301); WriteCommand(SSD2119_GAMMA_CTRL_6_REG); WriteData(0x0403); WriteCommand(SSD2119_GAMMA_CTRL_7_REG); WriteData(0x0707); WriteCommand(SSD2119_GAMMA_CTRL_8_REG); WriteData(0x0400); WriteCommand(SSD2119_GAMMA_CTRL_9_REG); WriteData(0x0a00); WriteCommand(SSD2119_GAMMA_CTRL_10_REG); WriteData(0x1000); // // Configure Vlcd63 and VCOMl. // WriteCommand(SSD2119_PWR_CTRL_3_REG); WriteData(0x000a); WriteCommand(SSD2119_PWR_CTRL_4_REG); WriteData(0x2e00); // // Set the display size and ensure that the GRAM window is set to allow // access to the full display buffer. // WriteCommand(SSD2119_V_RAM_POS_REG); WriteData((LCD_VERTICAL_MAX-1) << 8); WriteCommand(SSD2119_H_RAM_START_REG); WriteData(0x0000); WriteCommand(SSD2119_H_RAM_END_REG); WriteData(LCD_HORIZONTAL_MAX-1); WriteCommand(SSD2119_X_RAM_ADDR_REG); WriteData(0x0000); WriteCommand(SSD2119_Y_RAM_ADDR_REG); WriteData(0x0000); // // Clear the contents of the display buffer. // WriteCommand(SSD2119_RAM_DATA_REG); for(ui32Count = 0; ui32Count < (320 * 240); ui32Count++) { WriteData(0x0000); } } //***************************************************************************** // // Close the Doxygen group. //! @} // //*****************************************************************************
697304.c
/* * Copyright (C) 2017 Ptarmigan Project * SPDX-License-Identifier: Apache-2.0 * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** @file ln_commit_tx_util.c * @brief ln_commit_tx_ex */ #include <stdio.h> #include <inttypes.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <stdarg.h> #include <assert.h> #include "mbedtls/sha256.h" //#include "mbedtls/ripemd160.h" //#include "mbedtls/ecp.h" #include "btc_keys.h" #include "ln_commit_tx_util.h" #include "ln_local.h" #include "ln_signer.h" /************************************************************************** * macros **************************************************************************/ #define M_SZ_OBSCURED_COMMIT_NUM (6) #if defined(USE_BITCOIN) // https://github.com/lightningnetwork/lightning-rfc/blob/master/03-transactions.md#fee-calculation #define M_FEE_HTLC_TIMEOUT_WEIGHT ((uint64_t)663) #define M_FEE_HTLC_SUCCESS_WEIGHT ((uint64_t)703) #elif defined(USE_ELEMENTS) // https://github.com/ElementsProject/lightning/blob/a30ee2b7cd08053a2269712150204e9007976b04/common/htlc_tx.h#L22 #define M_FEE_HTLC_TIMEOUT_WEIGHT ((uint64_t)(663 + 330)) #define M_FEE_HTLC_SUCCESS_WEIGHT ((uint64_t)(703 + 330)) #endif /************************************************************************** * prototypes **************************************************************************/ /************************************************************************** * public functions **************************************************************************/ uint64_t HIDDEN ln_commit_tx_calc_obscured_commit_num_mask(const uint8_t *pOpenPayBasePt, const uint8_t *pAcceptPayBasePt) { uint64_t obs = 0; uint8_t base[32]; btc_md_sha256cat(base, pOpenPayBasePt, BTC_SZ_PUBKEY, pAcceptPayBasePt, BTC_SZ_PUBKEY); for (int lp = 0; lp < M_SZ_OBSCURED_COMMIT_NUM; lp++) { obs <<= 8; obs |= base[sizeof(base) - M_SZ_OBSCURED_COMMIT_NUM + lp]; } return obs; } uint64_t HIDDEN ln_commit_tx_calc_obscured_commit_num(uint64_t ObscuredCommitNumBase, uint64_t CommitNum) { return ObscuredCommitNumBase ^ CommitNum; } uint64_t HIDDEN ln_commit_tx_calc_commit_num_from_tx(uint32_t Sequence, uint32_t Locktime, uint64_t ObscuredCommitNumBase) { uint64_t commit_num = ((uint64_t)(Sequence & 0xffffff)) << 24; commit_num |= (uint64_t)(Locktime & 0xffffff); return commit_num ^ ObscuredCommitNumBase; } void HIDDEN ln_commit_tx_htlc_info_init(ln_commit_tx_htlc_info_t *pHtlcInfo) { pHtlcInfo->type = LN_COMMIT_TX_OUTPUT_TYPE_NONE; pHtlcInfo->htlc_idx = (uint16_t)-1; pHtlcInfo->cltv_expiry = 0; pHtlcInfo->amount_msat = 0; pHtlcInfo->payment_hash = NULL; utl_buf_init(&pHtlcInfo->wit_script); } void HIDDEN ln_commit_tx_htlc_info_free(ln_commit_tx_htlc_info_t *pHtlcInfo) { utl_buf_free(&pHtlcInfo->wit_script); } void HIDDEN ln_commit_tx_base_fee_calc( ln_commit_tx_base_fee_info_t *pBaseFeeInfo, const ln_commit_tx_htlc_info_t **ppHtlcInfo, int Num) { pBaseFeeInfo->htlc_success_fee = M_FEE_HTLC_SUCCESS_WEIGHT * pBaseFeeInfo->feerate_per_kw / 1000; pBaseFeeInfo->htlc_timeout_fee = M_FEE_HTLC_TIMEOUT_WEIGHT * pBaseFeeInfo->feerate_per_kw / 1000; pBaseFeeInfo->commit_fee = 0; uint64_t commit_fee_weight = LN_FEE_COMMIT_BASE_WEIGHT; uint64_t dust_msat = 0; for (int lp = 0; lp < Num; lp++) { switch (ppHtlcInfo[lp]->type) { case LN_COMMIT_TX_OUTPUT_TYPE_OFFERED: if (LN_MSAT2SATOSHI(ppHtlcInfo[lp]->amount_msat) >= pBaseFeeInfo->dust_limit_satoshi + pBaseFeeInfo->htlc_timeout_fee) { commit_fee_weight += LN_FEE_COMMIT_HTLC_WEIGHT; } else { dust_msat += ppHtlcInfo[lp]->amount_msat; } break; case LN_COMMIT_TX_OUTPUT_TYPE_RECEIVED: if (LN_MSAT2SATOSHI(ppHtlcInfo[lp]->amount_msat) >= pBaseFeeInfo->dust_limit_satoshi + pBaseFeeInfo->htlc_success_fee) { commit_fee_weight += LN_FEE_COMMIT_HTLC_WEIGHT; } else { dust_msat += ppHtlcInfo[lp]->amount_msat; } break; default: break; } } pBaseFeeInfo->commit_fee = commit_fee_weight * pBaseFeeInfo->feerate_per_kw / 1000; LOGD("pBaseFeeInfo->commit_fee= %" PRIu64 "(weight=%" PRIu64 ", feerate_per_kw=%" PRIu32 ")\n", pBaseFeeInfo->commit_fee, commit_fee_weight, pBaseFeeInfo->feerate_per_kw); //XXX: probably not correct // the base fee should be added after it has been calculated (after being divided by 1000) //pBaseFeeInfo->_rough_actual_fee = (commit_fee_weight * pBaseFeeInfo->feerate_per_kw + dust_msat) / 1000; pBaseFeeInfo->_rough_actual_fee = pBaseFeeInfo->commit_fee + dust_msat / 1000; } bool HIDDEN ln_commit_tx_create( btc_tx_t *pTx, utl_buf_t *pSig, const ln_commit_tx_info_t *pCommitTxInfoTrimmed, const ln_derkey_local_keys_t *pLocalKeys, uint64_t AmountInputs) { uint8_t sig[LN_SZ_SIGNATURE]; if (!ln_commit_tx_create_rs(pTx, sig, pCommitTxInfoTrimmed, pLocalKeys, AmountInputs)) return false; if (!btc_sig_rs2der(pSig, sig)) return false; return true; } bool HIDDEN ln_commit_tx_create_rs( btc_tx_t *pTx, uint8_t *pSig, const ln_commit_tx_info_t *pCommitTxInfoTrimmed, const ln_derkey_local_keys_t *pLocalKeys, uint64_t AmountInputs) { assert(pCommitTxInfoTrimmed->b_trimmed); //output // to_local (P2WSH) if (pCommitTxInfoTrimmed->to_local.satoshi) { if (!btc_sw_add_vout_p2wsh_wit( pTx, pCommitTxInfoTrimmed->to_local.satoshi, &pCommitTxInfoTrimmed->to_local.wit_script)) return false; pTx->vout[pTx->vout_cnt - 1].opt = LN_COMMIT_TX_OUTPUT_TYPE_TO_LOCAL; } // to_remote (P2WPKH) if (pCommitTxInfoTrimmed->to_remote.satoshi) { if (!btc_sw_add_vout_p2wpkh_pub( pTx, pCommitTxInfoTrimmed->to_remote.satoshi, pCommitTxInfoTrimmed->to_remote.pubkey)) return false; pTx->vout[pTx->vout_cnt - 1].opt = LN_COMMIT_TX_OUTPUT_TYPE_TO_REMOTE; } // HTLCs for (uint16_t lp = 0; lp < pCommitTxInfoTrimmed->num_htlc_infos; lp++) { if (!pCommitTxInfoTrimmed->pp_htlc_info[lp]->amount_msat) continue; //trimmed if (!btc_sw_add_vout_p2wsh_wit( pTx, LN_MSAT2SATOSHI(pCommitTxInfoTrimmed->pp_htlc_info[lp]->amount_msat), &pCommitTxInfoTrimmed->pp_htlc_info[lp]->wit_script)) return false; pTx->vout[pTx->vout_cnt - 1].opt = lp; } //input btc_vin_t *vin = btc_tx_add_vin(pTx, pCommitTxInfoTrimmed->fund.txid, pCommitTxInfoTrimmed->fund.txid_index); vin->sequence = LN_SEQUENCE(pCommitTxInfoTrimmed->obscured_commit_num); //locktime pTx->locktime = LN_LOCKTIME(pCommitTxInfoTrimmed->obscured_commit_num); //sort vin/vout btc_tx_sort_bip69(pTx); #ifdef USE_ELEMENTS if (pCommitTxInfoTrimmed->base_fee_info.commit_fee > 0) { for (uint16_t lp = 0; lp < pTx->vout_cnt; lp++) { AmountInputs -= pTx->vout[lp].value; } if (!btc_tx_add_vout_fee(pTx, AmountInputs)) return false; pTx->vout[pTx->vout_cnt - 1].opt = LN_COMMIT_TX_OUTPUT_TYPE_TO_REMOTE; } else { LOGE("THROUGH: no fee value(bug?)\n"); } #else (void)AmountInputs; #endif //sign uint8_t sighash[BTC_SZ_HASH256]; if (!btc_sw_sighash_p2wsh_wit( pTx, sighash, 0, pCommitTxInfoTrimmed->fund.satoshi, pCommitTxInfoTrimmed->fund.p_wit_script)) { LOGE("fail: calc sighash\n"); return false; } if (!ln_signer_sign_rs(pSig, sighash, pLocalKeys, LN_BASEPOINT_IDX_FUNDING)) { LOGE("fail: sign\n"); return false; } return true; } void HIDDEN ln_commit_tx_info_sub_fee_and_trim_outputs(ln_commit_tx_info_t *pCommitTxInfo, bool ToLocalIsFounder) { assert(!pCommitTxInfo->b_trimmed); uint64_t fee_local = ToLocalIsFounder ? pCommitTxInfo->base_fee_info.commit_fee : 0; uint64_t fee_remote = ToLocalIsFounder ? 0 : pCommitTxInfo->base_fee_info.commit_fee; //to_local if (pCommitTxInfo->to_local.satoshi >= pCommitTxInfo->base_fee_info.dust_limit_satoshi + fee_local) { LOGD(" add local: %" PRIu64 " - %" PRIu64 " sat\n", pCommitTxInfo->to_local.satoshi, fee_local); pCommitTxInfo->to_local.satoshi -= fee_local; //sub fee } else { LOGD(" [local output]below dust: %" PRIu64 " < %" PRIu64 " + %" PRIu64 "\n", pCommitTxInfo->to_local.satoshi, pCommitTxInfo->base_fee_info.dust_limit_satoshi, fee_local); #ifdef USE_ELEMENTS pCommitTxInfo->base_fee_info.commit_fee += pCommitTxInfo->to_local.satoshi; #endif pCommitTxInfo->to_local.satoshi = 0; //trimmed } //to_remote if (pCommitTxInfo->to_remote.satoshi >= pCommitTxInfo->base_fee_info.dust_limit_satoshi + fee_remote) { LOGD(" add P2WPKH remote: %" PRIu64 " sat - %" PRIu64 " sat\n", pCommitTxInfo->to_remote.satoshi, fee_remote); pCommitTxInfo->to_remote.satoshi -= fee_remote; //sub fee } else { LOGD(" [remote output]below dust: %" PRIu64 " < %" PRIu64 " + %" PRIu64 "\n", pCommitTxInfo->to_remote.satoshi, pCommitTxInfo->base_fee_info.dust_limit_satoshi, fee_remote); #ifdef USE_ELEMENTS pCommitTxInfo->base_fee_info.commit_fee += pCommitTxInfo->to_remote.satoshi; #endif pCommitTxInfo->to_remote.satoshi = 0; //trimmed } //HTLCs for (uint16_t lp = 0; lp < pCommitTxInfo->num_htlc_infos; lp++) { uint64_t output_sat = LN_MSAT2SATOSHI(pCommitTxInfo->pp_htlc_info[lp]->amount_msat); uint64_t fee; LOGD("lp=%d\n", lp); switch (pCommitTxInfo->pp_htlc_info[lp]->type) { case LN_COMMIT_TX_OUTPUT_TYPE_OFFERED: fee = pCommitTxInfo->base_fee_info.htlc_timeout_fee; LOGD(" HTLC: offered=%" PRIu64 " sat, fee=%" PRIu64 "\n", output_sat, fee); break; case LN_COMMIT_TX_OUTPUT_TYPE_RECEIVED: fee = pCommitTxInfo->base_fee_info.htlc_success_fee; LOGD(" HTLC: received=%" PRIu64 " sat, fee=%" PRIu64 "\n", output_sat, fee); break; default: LOGE(" HTLC: type=%d ???\n", pCommitTxInfo->pp_htlc_info[lp]->type); assert(0); } if (output_sat >= pCommitTxInfo->base_fee_info.dust_limit_satoshi + fee) { LOGD("script.len=%d\n", pCommitTxInfo->pp_htlc_info[lp]->wit_script.len); //btc_script_print(pCommitTxInfo->pp_htlc_info[lp]->wit_script.buf, pCommitTxInfo->pp_htlc_info[lp]->wit_script.len); } else { LOGD(" [HTLC]below dust: %" PRIu64 " < %" PRIu64 "(dust_limit) + %" PRIu64 "(fee)\n", output_sat, pCommitTxInfo->base_fee_info.dust_limit_satoshi, fee); #ifdef USE_ELEMENTS pCommitTxInfo->base_fee_info.commit_fee += output_sat; #endif pCommitTxInfo->pp_htlc_info[lp]->amount_msat = 0; //trimmed } } pCommitTxInfo->b_trimmed = true; } uint16_t HIDDEN ln_commit_tx_info_get_num_htlc_outputs(ln_commit_tx_info_t *pCommitTxInfoTrimmed) { uint16_t num_htlc_outputs = 0; for (uint16_t lp = 0; lp < pCommitTxInfoTrimmed->num_htlc_infos; lp++) { if (!pCommitTxInfoTrimmed->pp_htlc_info[lp]->amount_msat) continue; //trimmed num_htlc_outputs++; } return num_htlc_outputs; } /******************************************************************** * private functions ********************************************************************/
339093.c
/* gencode.c -- Motorola 68HC11 & 68HC12 Emulator Generator Copyright 1999-2015 Free Software Foundation, Inc. Written by Stephane Carrez ([email protected]) This file is part of GDB, GAS, and the GNU binutils. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <string.h> #include <stdarg.h> #include <errno.h> #include "ansidecl.h" #include "opcode/m68hc11.h" #define TABLE_SIZE(X) (sizeof(X) / sizeof(X[0])) /* Combination of CCR flags. */ #define M6811_ZC_BIT M6811_Z_BIT|M6811_C_BIT #define M6811_NZ_BIT M6811_N_BIT|M6811_Z_BIT #define M6811_NZV_BIT M6811_N_BIT|M6811_Z_BIT|M6811_V_BIT #define M6811_NZC_BIT M6811_N_BIT|M6811_Z_BIT|M6811_C_BIT #define M6811_NVC_BIT M6811_N_BIT|M6811_V_BIT|M6811_C_BIT #define M6811_ZVC_BIT M6811_Z_BIT|M6811_V_BIT|M6811_C_BIT #define M6811_NZVC_BIT M6811_ZVC_BIT|M6811_N_BIT #define M6811_HNZVC_BIT M6811_NZVC_BIT|M6811_H_BIT #define M6811_HNVC_BIT M6811_NVC_BIT|M6811_H_BIT #define M6811_VC_BIT M6811_V_BIT|M6811_C_BIT /* Flags when the insn only changes some CCR flags. */ #define CHG_NONE 0,0,0 #define CHG_Z 0,0,M6811_Z_BIT #define CHG_C 0,0,M6811_C_BIT #define CHG_ZVC 0,0,M6811_ZVC_BIT #define CHG_NZC 0,0,M6811_NZC_BIT #define CHG_NZV 0,0,M6811_NZV_BIT #define CHG_NZVC 0,0,M6811_NZVC_BIT #define CHG_HNZVC 0,0,M6811_HNZVC_BIT #define CHG_ALL 0,0,0xff /* The insn clears and changes some flags. */ #define CLR_I 0,M6811_I_BIT,0 #define CLR_C 0,M6811_C_BIT,0 #define CLR_V 0,M6811_V_BIT,0 #define CLR_V_CHG_ZC 0,M6811_V_BIT,M6811_ZC_BIT #define CLR_V_CHG_NZ 0,M6811_V_BIT,M6811_NZ_BIT #define CLR_V_CHG_ZVC 0,M6811_V_BIT,M6811_ZVC_BIT #define CLR_N_CHG_ZVC 0,M6811_N_BIT,M6811_ZVC_BIT /* Used by lsr */ #define CLR_VC_CHG_NZ 0,M6811_VC_BIT,M6811_NZ_BIT /* The insn sets some flags. */ #define SET_I M6811_I_BIT,0,0 #define SET_C M6811_C_BIT,0,0 #define SET_V M6811_V_BIT,0,0 #define SET_Z_CLR_NVC M6811_Z_BIT,M6811_NVC_BIT,0 #define SET_C_CLR_V_CHG_NZ M6811_C_BIT,M6811_V_BIT,M6811_NZ_BIT #define SET_Z_CHG_HNVC M6811_Z_BIT,0,M6811_HNVC_BIT #define _M 0xff static int cpu_type; struct m6811_opcode_pattern { const char *name; const char *pattern; const char *ccr_update; }; /* * { "test", M6811_OP_NONE, 1, 0x00, 5, _M, CHG_NONE }, * Name -+ +---- Insn CCR changes * Format ------+ +---------- Max # cycles * Size -----------------+ +--------------- Min # cycles * +-------------------- Opcode */ struct m6811_opcode_pattern m6811_opcode_patterns[] = { /* Move 8 and 16 bits. We need two implementations: one that sets the flags and one that preserve them. */ { "movtst8", "dst8 = src8", "cpu_ccr_update_tst8 (proc, dst8)" }, { "movtst16", "dst16 = src16", "cpu_ccr_update_tst16 (proc, dst16)" }, { "mov8", "dst8 = src8" }, { "mov16", "dst16 = src16" }, { "lea16", "dst16 = addr" }, /* Conditional branches. 'addr' is the address of the branch. */ { "bra", "cpu_set_pc (proc, addr)" }, { "bhi", "if ((cpu_get_ccr (proc) & (M6811_C_BIT|M6811_Z_BIT)) == 0)\n@ \ cpu_set_pc (proc, addr)" }, { "bls", "if ((cpu_get_ccr (proc) & (M6811_C_BIT|M6811_Z_BIT)))\n@ \ cpu_set_pc (proc, addr)" }, { "bcc", "if (!cpu_get_ccr_C (proc))\n@ cpu_set_pc (proc, addr)" }, { "bcs", "if (cpu_get_ccr_C (proc))\n@ cpu_set_pc (proc, addr)" }, { "bne", "if (!cpu_get_ccr_Z (proc))\n@ cpu_set_pc (proc, addr)" }, { "beq", "if (cpu_get_ccr_Z (proc))\n@ cpu_set_pc (proc, addr)" }, { "bvc", "if (!cpu_get_ccr_V (proc))\n@ cpu_set_pc (proc, addr)" }, { "bvs", "if (cpu_get_ccr_V (proc))\n@ cpu_set_pc (proc, addr)" }, { "bpl", "if (!cpu_get_ccr_N (proc))\n@ cpu_set_pc (proc, addr)" }, { "bmi", "if (cpu_get_ccr_N (proc))\n@ cpu_set_pc (proc, addr)" }, { "bge", "if ((cpu_get_ccr_N (proc) ^ cpu_get_ccr_V (proc)) == 0)\n@ cpu_set_pc (proc, addr)" }, { "blt", "if ((cpu_get_ccr_N (proc) ^ cpu_get_ccr_V (proc)))\n@ cpu_set_pc (proc, addr)" }, { "bgt", "if ((cpu_get_ccr_Z (proc) | (cpu_get_ccr_N (proc) ^ cpu_get_ccr_V (proc))) == 0)\n@ \ cpu_set_pc (proc, addr)" }, { "ble", "if ((cpu_get_ccr_Z (proc) | (cpu_get_ccr_N (proc) ^ cpu_get_ccr_V (proc))))\n@ \ cpu_set_pc (proc, addr)" }, /* brclr and brset perform a test and a conditional jump at the same time. Flags are not changed. */ { "brclr8", "if ((src8 & dst8) == 0)\n@ cpu_set_pc (proc, addr)" }, { "brset8", "if (((~src8) & dst8) == 0)\n@ cpu_set_pc (proc, addr)" }, { "rts11", "addr = cpu_m68hc11_pop_uint16 (proc); cpu_set_pc (proc, addr); cpu_return(proc)" }, { "rts12", "addr = cpu_m68hc12_pop_uint16 (proc); cpu_set_pc (proc, addr); cpu_return(proc)" }, { "mul16", "dst16 = ((uint16) src8 & 0x0FF) * ((uint16) dst8 & 0x0FF)", "cpu_set_ccr_C (proc, src8 & 0x80)" }, { "neg8", "dst8 = - src8", "cpu_set_ccr_C (proc, src8 == 0); cpu_ccr_update_tst8 (proc, dst8)" }, { "com8", "dst8 = ~src8", "cpu_set_ccr_C (proc, 1); cpu_ccr_update_tst8 (proc, dst8);" }, { "clr8", "dst8 = 0", "cpu_set_ccr (proc, (cpu_get_ccr (proc) & (M6811_S_BIT|M6811_X_BIT|M6811_H_BIT| \ M6811_I_BIT)) | M6811_Z_BIT)"}, { "clr16","dst16 = 0", "cpu_set_ccr (proc, (cpu_get_ccr (proc) & (M6811_S_BIT|M6811_X_BIT|M6811_H_BIT| \ M6811_I_BIR)) | M6811_Z_BIT)"}, /* 8-bits shift and rotation. */ { "lsr8", "dst8 = src8 >> 1", "cpu_set_ccr_C (proc, src8 & 1); cpu_ccr_update_shift8 (proc, dst8)" }, { "lsl8", "dst8 = src8 << 1", "cpu_set_ccr_C (proc, (src8 & 0x80) >> 7); cpu_ccr_update_shift8 (proc, dst8)" }, { "asr8", "dst8 = (src8 >> 1) | (src8 & 0x80)", "cpu_set_ccr_C (proc, src8 & 1); cpu_ccr_update_shift8 (proc, dst8)" }, { "ror8", "dst8 = (src8 >> 1) | (cpu_get_ccr_C (proc) << 7)", "cpu_set_ccr_C (proc, src8 & 1); cpu_ccr_update_shift8 (proc, dst8)" }, { "rol8", "dst8 = (src8 << 1) | (cpu_get_ccr_C (proc))", "cpu_set_ccr_C (proc, (src8 & 0x80) >> 7); cpu_ccr_update_shift8 (proc, dst8)" }, /* 16-bits shift instructions. */ { "lsl16", "dst16 = src16 << 1", "cpu_set_ccr_C (proc, (src16&0x8000) >> 15); cpu_ccr_update_shift16 (proc, dst16)"}, { "lsr16", "dst16 = src16 >> 1", "cpu_set_ccr_C (proc, src16 & 1); cpu_ccr_update_shift16 (proc, dst16)"}, { "dec8", "dst8 = src8 - 1", "cpu_ccr_update_tst8 (proc, dst8)" }, { "inc8", "dst8 = src8 + 1", "cpu_ccr_update_tst8 (proc, dst8)" }, { "tst8", 0, "cpu_set_ccr_C (proc, 0); cpu_ccr_update_tst8 (proc, src8)" }, { "sub8", "cpu_ccr_update_sub8 (proc, dst8 - src8, dst8, src8);\ dst8 = dst8 - src8", 0 }, { "add8", "cpu_ccr_update_add8 (proc, dst8 + src8, dst8, src8);\ dst8 = dst8 + src8", 0 }, { "sbc8", "if (cpu_get_ccr_C (proc))\n@ \ {\n\ cpu_ccr_update_sub8 (proc, dst8 - src8 - 1, dst8, src8);\n\ dst8 = dst8 - src8 - 1;\n\ }\n\ else\n\ {\n\ cpu_ccr_update_sub8 (proc, dst8 - src8, dst8, src8);\n\ dst8 = dst8 - src8;\n\ }", 0 }, { "adc8", "if (cpu_get_ccr_C (proc))\n@ \ {\n\ cpu_ccr_update_add8 (proc, dst8 + src8 + 1, dst8, src8);\n\ dst8 = dst8 + src8 + 1;\n\ }\n\ else\n\ {\n\ cpu_ccr_update_add8 (proc, dst8 + src8, dst8, src8);\n\ dst8 = dst8 + src8;\n\ }", 0 }, /* 8-bits logical operations. */ { "and8", "dst8 = dst8 & src8", "cpu_ccr_update_tst8 (proc, dst8)" }, { "eor8", "dst8 = dst8 ^ src8", "cpu_ccr_update_tst8 (proc, dst8)" }, { "or8", "dst8 = dst8 | src8", "cpu_ccr_update_tst8 (proc, dst8)" }, { "bclr8","dst8 = (~dst8) & src8", "cpu_ccr_update_tst8 (proc, dst8)" }, /* 16-bits add and subtract instructions. */ { "sub16", "cpu_ccr_update_sub16 (proc, dst16 - src16, dst16, src16);\ dst16 = dst16 - src16", 0 }, { "add16", "cpu_ccr_update_add16 (proc, dst16 + src16, dst16, src16);\ dst16 = dst16 + src16", 0 }, { "inc16", "dst16 = src16 + 1", "cpu_set_ccr_Z (proc, dst16 == 0)" }, { "dec16", "dst16 = src16 - 1", "cpu_set_ccr_Z (proc, dst16 == 0)" }, /* Special increment/decrement for the stack pointer: flags are not changed. */ { "ins16", "dst16 = src16 + 1" }, { "des16", "dst16 = src16 - 1" }, { "jsr_11_16", "cpu_m68hc11_push_uint16 (proc, cpu_get_pc (proc)); cpu_call (proc, addr)"}, { "jsr_12_16", "cpu_m68hc12_push_uint16 (proc, cpu_get_pc (proc)); cpu_call (proc, addr)"}, /* xgdx and xgdx patterns. Flags are not changed. */ { "xgdxy16", "dst16 = cpu_get_d (proc); cpu_set_d (proc, src16)"}, { "stop", "cpu_special (proc, M6811_STOP)"}, /* tsx, tsy, txs, tys don't affect the flags. Sp value is corrected by +/- 1. */ { "tsxy16", "dst16 = src16 + 1;"}, { "txys16", "dst16 = src16 - 1;"}, /* Add b to X or Y with an unsigned extension 8->16. Flags not changed. */ { "abxy16","dst16 = dst16 + (uint16) src8"}, /* After 'daa', the Z flag is undefined. Mark it as changed. */ { "daa8", "cpu_special (proc, M6811_DAA)" }, { "nop", 0 }, /* Integer divide: (parallel (set IX (div D IX)) (set D (mod D IX))) */ { "idiv16", "if (src16 == 0)\n{\n\ dst16 = 0xffff;\ }\nelse\n{\n\ cpu_set_d (proc, dst16 % src16);\ dst16 = dst16 / src16;\ }", "cpu_set_ccr_Z (proc, dst16 == 0); cpu_set_ccr_V (proc, 0);\ cpu_set_ccr_C (proc, src16 == 0)" }, /* Fractional divide: (parallel (set IX (div (mul D 65536) IX) (set D (mod (mul D 65536) IX)))) */ { "fdiv16", "if (src16 <= dst16 )\n{\n\ dst16 = 0xffff;\n\ cpu_set_ccr_Z (proc, 0);\n\ cpu_set_ccr_V (proc, 1);\n\ cpu_set_ccr_C (proc, dst16 == 0);\n\ }\nelse\n{\n\ unsigned long l = (unsigned long) (dst16) << 16;\n\ cpu_set_d (proc, (uint16) (l % (unsigned long) (src16)));\n\ dst16 = (uint16) (l / (unsigned long) (src16));\n\ cpu_set_ccr_V (proc, 0);\n\ cpu_set_ccr_C (proc, 0);\n\ cpu_set_ccr_Z (proc, dst16 == 0);\n\ }", 0 }, /* Operations to get/set the CCR. */ { "clv", 0, "cpu_set_ccr_V (proc, 0)" }, { "sev", 0, "cpu_set_ccr_V (proc, 1)" }, { "clc", 0, "cpu_set_ccr_C (proc, 0)" }, { "sec", 0, "cpu_set_ccr_C (proc, 1)" }, { "cli", 0, "cpu_set_ccr_I (proc, 0)" }, { "sei", 0, "cpu_set_ccr_I (proc, 1)" }, /* Some special instructions are implemented by 'cpu_special'. */ { "rti11", "cpu_special (proc, M6811_RTI)" }, { "rti12", "cpu_special (proc, M6812_RTI)" }, { "wai", "cpu_special (proc, M6811_WAI)" }, { "test", "cpu_special (proc, M6811_TEST)" }, { "swi", "cpu_special (proc, M6811_SWI)" }, { "syscall","cpu_special (proc, M6811_EMUL_SYSCALL)" }, { "page2", "cpu_page2_interp (proc)", 0 }, { "page3", "cpu_page3_interp (proc)", 0 }, { "page4", "cpu_page4_interp (proc)", 0 }, /* 68HC12 special instructions. */ { "bgnd", "cpu_special (proc, M6812_BGND)" }, { "call8", "cpu_special (proc, M6812_CALL)" }, { "call_ind", "cpu_special (proc, M6812_CALL_INDIRECT)" }, { "dbcc8", "cpu_dbcc (proc)" }, { "ediv", "cpu_special (proc, M6812_EDIV)" }, { "emul", "{ uint32 src1 = (uint32) cpu_get_d (proc);\ uint32 src2 = (uint32) cpu_get_y (proc);\ src1 *= src2;\ cpu_set_d (proc, src1);\ cpu_set_y (proc, src1 >> 16);\ cpu_set_ccr_Z (proc, src1 == 0);\ cpu_set_ccr_C (proc, src1 & 0x08000);\ cpu_set_ccr_N (proc, src1 & 0x80000000);}" }, { "emuls", "cpu_special (proc, M6812_EMULS)" }, { "mem", "cpu_special (proc, M6812_MEM)" }, { "rtc", "cpu_special (proc, M6812_RTC)" }, { "emacs", "cpu_special (proc, M6812_EMACS)" }, { "idivs", "cpu_special (proc, M6812_IDIVS)" }, { "edivs", "cpu_special (proc, M6812_EDIVS)" }, { "exg8", "cpu_exg (proc, src8)" }, { "move8", "cpu_move8 (proc, op)" }, { "move16","cpu_move16 (proc, op)" }, { "max8", "cpu_ccr_update_sub8 (proc, dst8 - src8, dst8, src8);\ if (dst8 < src8) dst8 = src8" }, { "min8", "cpu_ccr_update_sub8 (proc, dst8 - src8, dst8, src8);\ if (dst8 > src8) dst8 = src8" }, { "max16", "cpu_ccr_update_sub16 (proc, dst16 - src16, dst16, src16);\ if (dst16 < src16) dst16 = src16" }, { "min16", "cpu_ccr_update_sub16 (proc, dst16 - src16, dst16, src16);\ if (dst16 > src16) dst16 = src16" }, { "rev", "cpu_special (proc, M6812_REV);" }, { "revw", "cpu_special (proc, M6812_REVW);" }, { "wav", "cpu_special (proc, M6812_WAV);" }, { "tbl8", "cpu_special (proc, M6812_ETBL);" }, { "tbl16", "cpu_special (proc, M6812_ETBL);" } }; /* Definition of an opcode of the 68HC11. */ struct m6811_opcode_def { const char *name; const char *operands; const char *insn_pattern; unsigned char insn_size; unsigned char insn_code; unsigned char insn_min_cycles; unsigned char insn_max_cycles; unsigned char set_flags_mask; unsigned char clr_flags_mask; unsigned char chg_flags_mask; }; /* * { "dex", "x->x", "dec16", 1, 0x00, 5, _M, CHG_NONE }, * Name -+ +----- Insn CCR changes * Operands ---+ +------------ Max # cycles * Pattern -----------+ +--------------- Min # cycles * Size -----------------+ +-------------------- Opcode * * Operands Fetch operand Save result * ------- -------------- ------------ * x->x src16 = x x = dst16 * d->d src16 = d d = dst16 * b,a->a src8 = b dst8 = a a = dst8 * sp->x src16 = sp x = dst16 * (sp)->a src8 = pop8 a = dst8 * a->(sp) src8 = a push8 dst8 * (x)->(x) src8 = (IND, X) (IND, X) = dst8 * (y)->a src8 = (IND, Y) a = dst8 * ()->b src8 = (EXT) b = dst8 */ struct m6811_opcode_def m6811_page1_opcodes[] = { { "test", 0, 0, 1, 0x00, 5, _M, CHG_NONE }, { "nop", 0, 0, 1, 0x01, 2, 2, CHG_NONE }, { "idiv", "x,d->x", "idiv16", 1, 0x02, 3, 41, CLR_V_CHG_ZC}, { "fdiv", "x,d->x", "fdiv16", 1, 0x03, 3, 41, CHG_ZVC}, { "lsrd", "d->d", "lsr16", 1, 0x04, 3, 3, CLR_N_CHG_ZVC }, { "asld", "d->d", "lsl16", 1, 0x05, 3, 3, CHG_NZVC }, { "tap", "a->ccr", "mov8", 1, 0x06, 2, 2, CHG_ALL}, { "tpa", "ccr->a", "mov8", 1, 0x07, 2, 2, CHG_NONE }, { "inx", "x->x", "inc16", 1, 0x08, 3, 3, CHG_Z }, { "dex", "x->x", "dec16", 1, 0x09, 3, 3, CHG_Z }, { "clv", 0, 0, 1, 0x0a, 2, 2, CLR_V }, { "sev", 0, 0, 1, 0x0b, 2, 2, SET_V }, { "clc", 0, 0, 1, 0x0c, 2, 2, CLR_C }, { "sec", 0, 0, 1, 0x0d, 2, 2, SET_C }, { "cli", 0, 0, 1, 0x0e, 2, 2, CLR_I }, { "sei", 0, 0, 1, 0x0f, 2, 2, SET_I }, { "sba", "b,a->a", "sub8", 1, 0x10, 2, 2, CHG_NZVC }, { "cba", "b,a", "sub8", 1, 0x11, 2, 2, CHG_NZVC }, { "brset","*,#,r", "brset8", 4, 0x12, 6, 6, CHG_NONE }, { "brclr","*,#,r", "brclr8", 4, 0x13, 6, 6, CHG_NONE }, { "bset", "*,#->*", "or8", 3, 0x14, 6, 6, CLR_V_CHG_NZ }, { "bclr", "*,#->*", "bclr8", 3, 0x15, 6, 6, CLR_V_CHG_NZ }, { "tab", "a->b", "movtst8", 1, 0x16, 2, 2, CLR_V_CHG_NZ }, { "tba", "b->a", "movtst8", 1, 0x17, 2, 2, CLR_V_CHG_NZ }, { "page2", 0, "page2", 1, 0x18, 0, 0, CHG_NONE }, { "page3", 0, "page3", 1, 0x1a, 0, 0, CHG_NONE }, /* After 'daa', the Z flag is undefined. Mark it as changed. */ { "daa", "", "daa8", 1, 0x19, 2, 2, CHG_NZVC }, { "aba", "b,a->a", "add8", 1, 0x1b, 2, 2, CHG_HNZVC}, { "bset", "(x),#->(x)","or8", 3, 0x1c, 7, 7, CLR_V_CHG_NZ }, { "bclr", "(x),#->(x)","bclr8", 3, 0x1d, 7, 7, CLR_V_CHG_NZ }, { "brset","(x),#,r", "brset8", 4, 0x1e, 7, 7, CHG_NONE }, { "brclr","(x),#,r", "brclr8", 4, 0x1f, 7, 7, CHG_NONE }, /* Relative branch. All of them take 3 bytes. Flags not changed. */ { "bra", "r", 0, 2, 0x20, 3, 3, CHG_NONE }, { "brn", "r", "nop", 2, 0x21, 3, 3, CHG_NONE }, { "bhi", "r", 0, 2, 0x22, 3, 3, CHG_NONE }, { "bls", "r", 0, 2, 0x23, 3, 3, CHG_NONE }, { "bcc", "r", 0, 2, 0x24, 3, 3, CHG_NONE }, { "bcs", "r", 0, 2, 0x25, 3, 3, CHG_NONE }, { "bne", "r", 0, 2, 0x26, 3, 3, CHG_NONE }, { "beq", "r", 0, 2, 0x27, 3, 3, CHG_NONE }, { "bvc", "r", 0, 2, 0x28, 3, 3, CHG_NONE }, { "bvs", "r", 0, 2, 0x29, 3, 3, CHG_NONE }, { "bpl", "r", 0, 2, 0x2a, 3, 3, CHG_NONE }, { "bmi", "r", 0, 2, 0x2b, 3, 3, CHG_NONE }, { "bge", "r", 0, 2, 0x2c, 3, 3, CHG_NONE }, { "blt", "r", 0, 2, 0x2d, 3, 3, CHG_NONE }, { "bgt", "r", 0, 2, 0x2e, 3, 3, CHG_NONE }, { "ble", "r", 0, 2, 0x2f, 3, 3, CHG_NONE }, { "tsx", "sp->x", "tsxy16", 1, 0x30, 3, 3, CHG_NONE }, { "ins", "sp->sp", "ins16", 1, 0x31, 3, 3, CHG_NONE }, { "pula", "(sp)->a", "mov8", 1, 0x32, 4, 4, CHG_NONE }, { "pulb", "(sp)->b", "mov8", 1, 0x33, 4, 4, CHG_NONE }, { "des", "sp->sp", "des16", 1, 0x34, 3, 3, CHG_NONE }, { "txs", "x->sp", "txys16", 1, 0x35, 3, 3, CHG_NONE }, { "psha", "a->(sp)", "mov8", 1, 0x36, 3, 3, CHG_NONE }, { "pshb", "b->(sp)", "mov8", 1, 0x37, 3, 3, CHG_NONE }, { "pulx", "(sp)->x", "mov16", 1, 0x38, 5, 5, CHG_NONE }, { "rts", 0, "rts11", 1, 0x39, 5, 5, CHG_NONE }, { "abx", "b,x->x", "abxy16", 1, 0x3a, 3, 3, CHG_NONE }, { "rti", 0, "rti11", 1, 0x3b, 12, 12, CHG_ALL}, { "pshx", "x->(sp)", "mov16", 1, 0x3c, 4, 4, CHG_NONE }, { "mul", "b,a->d", "mul16", 1, 0x3d, 3, 10, CHG_C }, { "wai", 0, 0, 1, 0x3e, 14, _M, CHG_NONE }, { "swi", 0, 0, 1, 0x3f, 14, _M, CHG_NONE }, { "nega", "a->a", "neg8", 1, 0x40, 2, 2, CHG_NZVC }, { "syscall", "", "syscall", 1, 0x41, 2, 2, CHG_NONE }, { "coma", "a->a", "com8", 1, 0x43, 2, 2, SET_C_CLR_V_CHG_NZ }, { "lsra", "a->a", "lsr8", 1, 0x44, 2, 2, CLR_N_CHG_ZVC}, { "rora", "a->a", "ror8", 1, 0x46, 2, 2, CHG_NZVC }, { "asra", "a->a", "asr8", 1, 0x47, 2, 2, CHG_NZVC }, { "asla", "a->a", "lsl8", 1, 0x48, 2, 2, CHG_NZVC }, { "rola", "a->a", "rol8", 1, 0x49, 2, 2, CHG_NZVC }, { "deca", "a->a", "dec8", 1, 0x4a, 2, 2, CHG_NZV }, { "inca", "a->a", "inc8", 1, 0x4c, 2, 2, CHG_NZV }, { "tsta", "a", "tst8", 1, 0x4d, 2, 2, CLR_V_CHG_NZ }, { "clra", "->a", "clr8", 1, 0x4f, 2, 2, SET_Z_CLR_NVC }, { "negb", "b->b", "neg8", 1, 0x50, 2, 2, CHG_NZVC }, { "comb", "b->b", "com8", 1, 0x53, 2, 2, SET_C_CLR_V_CHG_NZ }, { "lsrb", "b->b", "lsr8", 1, 0x54, 2, 2, CLR_N_CHG_ZVC }, { "rorb", "b->b", "ror8", 1, 0x56, 2, 2, CHG_NZVC }, { "asrb", "b->b", "asr8", 1, 0x57, 2, 2, CHG_NZVC }, { "aslb", "b->b", "lsl8", 1, 0x58, 2, 2, CHG_NZVC }, { "rolb", "b->b", "rol8", 1, 0x59, 2, 2, CHG_NZVC }, { "decb", "b->b", "dec8", 1, 0x5a, 2, 2, CHG_NZV }, { "incb", "b->b", "inc8", 1, 0x5c, 2, 2, CHG_NZV }, { "tstb", "b", "tst8", 1, 0x5d, 2, 2, CLR_V_CHG_NZ }, { "clrb", "->b", "clr8", 1, 0x5f, 2, 2, SET_Z_CLR_NVC }, { "neg", "(x)->(x)", "neg8", 2, 0x60, 6, 6, CHG_NZVC }, { "com", "(x)->(x)", "com8", 2, 0x63, 6, 6, SET_C_CLR_V_CHG_NZ }, { "lsr", "(x)->(x)", "lsr8", 2, 0x64, 6, 6, CLR_N_CHG_ZVC }, { "ror", "(x)->(x)", "ror8", 2, 0x66, 6, 6, CHG_NZVC }, { "asr", "(x)->(x)", "asr8", 2, 0x67, 6, 6, CHG_NZVC }, { "asl", "(x)->(x)", "lsl8", 2, 0x68, 6, 6, CHG_NZVC }, { "rol", "(x)->(x)", "rol8", 2, 0x69, 6, 6, CHG_NZVC }, { "dec", "(x)->(x)", "dec8", 2, 0x6a, 6, 6, CHG_NZV }, { "inc", "(x)->(x)", "inc8", 2, 0x6c, 6, 6, CHG_NZV }, { "tst", "(x)", "tst8", 2, 0x6d, 6, 6, CLR_V_CHG_NZ }, { "jmp", "&(x)", "bra", 2, 0x6e, 3, 3, CHG_NONE }, { "clr", "->(x)", "clr8", 2, 0x6f, 6, 6, SET_Z_CLR_NVC }, { "neg", "()->()", "neg8", 3, 0x70, 6, 6, CHG_NZVC }, { "com", "()->()", "com8", 3, 0x73, 6, 6, SET_C_CLR_V_CHG_NZ }, { "lsr", "()->()", "lsr8", 3, 0x74, 6, 6, CLR_V_CHG_ZVC }, { "ror", "()->()", "ror8", 3, 0x76, 6, 6, CHG_NZVC }, { "asr", "()->()", "asr8", 3, 0x77, 6, 6, CHG_NZVC }, { "asl", "()->()", "lsl8", 3, 0x78, 6, 6, CHG_NZVC }, { "rol", "()->()", "rol8", 3, 0x79, 6, 6, CHG_NZVC }, { "dec", "()->()", "dec8", 3, 0x7a, 6, 6, CHG_NZV }, { "inc", "()->()", "inc8", 3, 0x7c, 6, 6, CHG_NZV }, { "tst", "()", "tst8", 3, 0x7d, 6, 6, CLR_V_CHG_NZ }, { "jmp", "&()", "bra", 3, 0x7e, 3, 3, CHG_NONE }, { "clr", "->()", "clr8", 3, 0x7f, 6, 6, SET_Z_CLR_NVC }, { "suba", "#,a->a", "sub8", 2, 0x80, 2, 2, CHG_NZVC }, { "cmpa", "#,a", "sub8", 2, 0x81, 2, 2, CHG_NZVC }, { "sbca", "#,a->a", "sbc8", 2, 0x82, 2, 2, CHG_NZVC }, { "subd", "#,d->d", "sub16", 3, 0x83, 4, 4, CHG_NZVC }, { "anda", "#,a->a", "and8", 2, 0x84, 2, 2, CLR_V_CHG_NZ }, { "bita", "#,a", "and8", 2, 0x85, 2, 2, CLR_V_CHG_NZ }, { "ldaa", "#->a", "movtst8", 2, 0x86, 2, 2, CLR_V_CHG_NZ }, { "eora", "#,a->a", "eor8", 2, 0x88, 2, 2, CLR_V_CHG_NZ }, { "adca", "#,a->a", "adc8", 2, 0x89, 2, 2, CHG_HNZVC }, { "oraa", "#,a->a", "or8", 2, 0x8a, 2, 2, CLR_V_CHG_NZ }, { "adda", "#,a->a", "add8", 2, 0x8b, 2, 2, CHG_HNZVC }, { "cmpx", "#,x", "sub16", 3, 0x8c, 4, 4, CHG_NZVC }, { "bsr", "r", "jsr_11_16", 2, 0x8d, 6, 6, CHG_NONE }, { "lds", "#->sp", "movtst16", 3, 0x8e, 3, 3, CLR_V_CHG_NZ }, { "xgdx", "x->x", "xgdxy16", 1, 0x8f, 3, 3, CHG_NONE }, { "suba", "*,a->a", "sub8", 2, 0x90, 3, 3, CHG_NZVC }, { "cmpa", "*,a", "sub8", 2, 0x91, 3, 3, CHG_NZVC }, { "sbca", "*,a->a", "sbc8", 2, 0x92, 3, 3, CHG_NZVC }, { "subd", "*,d->d", "sub16", 2, 0x93, 5, 5, CHG_NZVC }, { "anda", "*,a->a", "and8", 2, 0x94, 3, 3, CLR_V_CHG_NZ }, { "bita", "*,a", "and8", 2, 0x95, 3, 3, CLR_V_CHG_NZ }, { "ldaa", "*->a", "movtst8", 2, 0x96, 3, 3, CLR_V_CHG_NZ }, { "staa", "a->*", "movtst8", 2, 0x97, 3, 3, CLR_V_CHG_NZ }, { "eora", "*,a->a", "eor8", 2, 0x98, 3, 3, CLR_V_CHG_NZ }, { "adca", "*,a->a", "adc8", 2, 0x99, 3, 3, CHG_HNZVC }, { "oraa", "*,a->a", "or8", 2, 0x9a, 3, 3, CLR_V_CHG_NZ }, { "adda", "*,a->a", "add8", 2, 0x9b, 3, 3, CHG_HNZVC }, { "cmpx", "*,x", "sub16", 2, 0x9c, 5, 5, CHG_NZVC }, { "jsr", "*", "jsr_11_16", 2, 0x9d, 5, 5, CHG_NONE }, { "lds", "*->sp", "movtst16", 2, 0x9e, 4, 4, CLR_V_CHG_NZ }, { "sts", "sp->*", "movtst16", 2, 0x9f, 4, 4, CLR_V_CHG_NZ }, { "suba", "(x),a->a", "sub8", 2, 0xa0, 4, 4, CHG_NZVC }, { "cmpa", "(x),a", "sub8", 2, 0xa1, 4, 4, CHG_NZVC }, { "sbca", "(x),a->a", "sbc8", 2, 0xa2, 4, 4, CHG_NZVC }, { "subd", "(x),d->d", "sub16", 2, 0xa3, 6, 6, CHG_NZVC }, { "anda", "(x),a->a", "and8", 2, 0xa4, 4, 4, CLR_V_CHG_NZ }, { "bita", "(x),a", "and8", 2, 0xa5, 4, 4, CLR_V_CHG_NZ }, { "ldaa", "(x)->a", "movtst8", 2, 0xa6, 4, 4, CLR_V_CHG_NZ }, { "staa", "a->(x)", "movtst8", 2, 0xa7, 4, 4, CLR_V_CHG_NZ }, { "eora", "(x),a->a", "eor8", 2, 0xa8, 4, 4, CLR_V_CHG_NZ }, { "adca", "(x),a->a", "adc8", 2, 0xa9, 4, 4, CHG_HNZVC }, { "oraa", "(x),a->a", "or8", 2, 0xaa, 4, 4, CLR_V_CHG_NZ }, { "adda", "(x),a->a", "add8", 2, 0xab, 4, 4, CHG_HNZVC }, { "cmpx", "(x),x", "sub16", 2, 0xac, 6, 6, CHG_NZVC }, { "jsr", "&(x)", "jsr_11_16", 2, 0xad, 6, 6, CHG_NONE }, { "lds", "(x)->sp", "movtst16", 2, 0xae, 5, 5, CLR_V_CHG_NZ }, { "sts", "sp->(x)", "movtst16", 2, 0xaf, 5, 5, CLR_V_CHG_NZ }, { "suba", "(),a->a", "sub8", 3, 0xb0, 4, 4, CHG_NZVC }, { "cmpa", "(),a", "sub8", 3, 0xb1, 4, 4, CHG_NZVC }, { "sbca", "(),a->a", "sbc8", 3, 0xb2, 4, 4, CHG_NZVC }, { "subd", "(),d->d", "sub16", 3, 0xb3, 6, 6, CHG_NZVC }, { "anda", "(),a->a", "and8", 3, 0xb4, 4, 4, CLR_V_CHG_NZ }, { "bita", "(),a", "and8", 3, 0xb5, 4, 4, CLR_V_CHG_NZ }, { "ldaa", "()->a", "movtst8", 3, 0xb6, 4, 4, CLR_V_CHG_NZ }, { "staa", "a->()", "movtst8", 3, 0xb7, 4, 4, CLR_V_CHG_NZ }, { "eora", "(),a->a", "eor8", 3, 0xb8, 4, 4, CLR_V_CHG_NZ }, { "adca", "(),a->a", "adc8", 3, 0xb9, 4, 4, CHG_HNZVC }, { "oraa", "(),a->a", "or8", 3, 0xba, 4, 4, CLR_V_CHG_NZ }, { "adda", "(),a->a", "add8", 3, 0xbb, 4, 4, CHG_HNZVC }, { "cmpx", "(),x", "sub16", 3, 0xbc, 5, 5, CHG_NZVC }, { "jsr", "&()", "jsr_11_16", 3, 0xbd, 6, 6, CHG_NONE }, { "lds", "()->sp", "movtst16", 3, 0xbe, 5, 5, CLR_V_CHG_NZ }, { "sts", "sp->()", "movtst16", 3, 0xbf, 5, 5, CLR_V_CHG_NZ }, { "subb", "#,b->b", "sub8", 2, 0xc0, 2, 2, CHG_NZVC }, { "cmpb", "#,b", "sub8", 2, 0xc1, 2, 2, CHG_NZVC }, { "sbcb", "#,b->b", "sbc8", 2, 0xc2, 2, 2, CHG_NZVC }, { "addd", "#,d->d", "add16", 3, 0xc3, 4, 4, CHG_NZVC }, { "andb", "#,b->b", "and8", 2, 0xc4, 2, 2, CLR_V_CHG_NZ }, { "bitb", "#,b", "and8", 2, 0xc5, 2, 2, CLR_V_CHG_NZ }, { "ldab", "#->b", "movtst8", 2, 0xc6, 2, 2, CLR_V_CHG_NZ }, { "eorb", "#,b->b", "eor8", 2, 0xc8, 2, 2, CLR_V_CHG_NZ }, { "adcb", "#,b->b", "adc8", 2, 0xc9, 2, 2, CHG_HNZVC }, { "orab", "#,b->b", "or8", 2, 0xca, 2, 2, CLR_V_CHG_NZ }, { "addb", "#,b->b", "add8", 2, 0xcb, 2, 2, CHG_HNZVC }, { "ldd", "#->d", "movtst16", 3, 0xcc, 3, 3, CLR_V_CHG_NZ }, { "page4",0, "page4", 1, 0xcd, 0, 0, CHG_NONE }, { "ldx", "#->x", "movtst16", 3, 0xce, 3, 3, CLR_V_CHG_NZ }, { "stop", 0, 0, 1, 0xcf, 2, 2, CHG_NONE }, { "subb", "*,b->b", "sub8", 2, 0xd0, 3, 3, CHG_NZVC }, { "cmpb", "*,b", "sub8", 2, 0xd1, 3, 3, CHG_NZVC }, { "sbcb", "*,b->b", "sbc8", 2, 0xd2, 3, 3, CHG_NZVC }, { "addd", "*,d->d", "add16", 2, 0xd3, 5, 5, CHG_NZVC }, { "andb", "*,b->b", "and8", 2, 0xd4, 3, 3, CLR_V_CHG_NZ }, { "bitb", "*,b", "and8", 2, 0xd5, 3, 3, CLR_V_CHG_NZ }, { "ldab", "*->b", "movtst8", 2, 0xd6, 3, 3, CLR_V_CHG_NZ }, { "stab", "b->*", "movtst8", 2, 0xd7, 3, 3, CLR_V_CHG_NZ }, { "eorb", "*,b->b", "eor8", 2, 0xd8, 3, 3, CLR_V_CHG_NZ }, { "adcb", "*,b->b", "adc8", 2, 0xd9, 3, 3, CHG_HNZVC }, { "orab", "*,b->b", "or8", 2, 0xda, 3, 3, CLR_V_CHG_NZ }, { "addb", "*,b->b", "add8", 2, 0xdb, 3, 3, CHG_HNZVC }, { "ldd", "*->d", "movtst16", 2, 0xdc, 4, 4, CLR_V_CHG_NZ }, { "std", "d->*", "movtst16", 2, 0xdd, 4, 4, CLR_V_CHG_NZ }, { "ldx", "*->x", "movtst16", 2, 0xde, 4, 4, CLR_V_CHG_NZ }, { "stx", "x->*", "movtst16", 2, 0xdf, 4, 4, CLR_V_CHG_NZ }, { "subb", "(x),b->b", "sub8", 2, 0xe0, 4, 4, CHG_NZVC }, { "cmpb", "(x),b", "sub8", 2, 0xe1, 4, 4, CHG_NZVC }, { "sbcb", "(x),b->b", "sbc8", 2, 0xe2, 4, 4, CHG_NZVC }, { "addd", "(x),d->d", "add16", 2, 0xe3, 6, 6, CHG_NZVC }, { "andb", "(x),b->b", "and8", 2, 0xe4, 4, 4, CLR_V_CHG_NZ }, { "bitb", "(x),b", "and8", 2, 0xe5, 4, 4, CLR_V_CHG_NZ }, { "ldab", "(x)->b", "movtst8", 2, 0xe6, 4, 4, CLR_V_CHG_NZ }, { "stab", "b->(x)", "movtst8", 2, 0xe7, 4, 4, CLR_V_CHG_NZ }, { "eorb", "(x),b->b", "eor8", 2, 0xe8, 4, 4, CLR_V_CHG_NZ }, { "adcb", "(x),b->b", "adc8", 2, 0xe9, 4, 4, CHG_HNZVC }, { "orab", "(x),b->b", "or8", 2, 0xea, 4, 4, CLR_V_CHG_NZ }, { "addb", "(x),b->b", "add8", 2, 0xeb, 4, 4, CHG_HNZVC }, { "ldd", "(x)->d", "movtst16", 2, 0xec, 5, 5, CLR_V_CHG_NZ }, { "std", "d->(x)", "movtst16", 2, 0xed, 5, 5, CLR_V_CHG_NZ }, { "ldx", "(x)->x", "movtst16", 2, 0xee, 5, 5, CLR_V_CHG_NZ }, { "stx", "x->(x)", "movtst16", 2, 0xef, 5, 5, CLR_V_CHG_NZ }, { "subb", "(),b->b", "sub8", 3, 0xf0, 4, 4, CHG_NZVC }, { "cmpb", "(),b", "sub8", 3, 0xf1, 4, 4, CHG_NZVC }, { "sbcb", "(),b->b", "sbc8", 3, 0xf2, 4, 4, CHG_NZVC }, { "addd", "(),d->d", "add16", 3, 0xf3, 6, 6, CHG_NZVC }, { "andb", "(),b->b", "and8", 3, 0xf4, 4, 4, CLR_V_CHG_NZ }, { "bitb", "(),b", "and8", 3, 0xf5, 4, 4, CLR_V_CHG_NZ }, { "ldab", "()->b", "movtst8", 3, 0xf6, 4, 4, CLR_V_CHG_NZ }, { "stab", "b->()", "movtst8", 3, 0xf7, 4, 4, CLR_V_CHG_NZ }, { "eorb", "(),b->b", "eor8", 3, 0xf8, 4, 4, CLR_V_CHG_NZ }, { "adcb", "(),b->b", "eor8", 3, 0xf9, 4, 4, CHG_HNZVC }, { "orab", "(),b->b", "or8", 3, 0xfa, 4, 4, CLR_V_CHG_NZ }, { "addb", "(),b->b", "add8", 3, 0xfb, 4, 4, CHG_HNZVC }, { "ldd", "()->d", "movtst16", 3, 0xfc, 5, 5, CLR_V_CHG_NZ }, { "std", "d->()", "movtst16", 3, 0xfd, 5, 5, CLR_V_CHG_NZ }, { "ldx", "()->x", "movtst16", 3, 0xfe, 5, 5, CLR_V_CHG_NZ }, { "stx", "x->()", "movtst16", 3, 0xff, 5, 5, CLR_V_CHG_NZ } }; /* Page 2 opcodes */ /* * { "dex", "x->x", "dec16", 1, 0x00, 5, _M, CHG_NONE }, * Name -+ +----- Insn CCR changes * Operands ---+ +------------ Max # cycles * Pattern -----------+ +--------------- Min # cycles * Size -----------------+ +-------------------- Opcode */ struct m6811_opcode_def m6811_page2_opcodes[] = { { "iny", "y->y", "inc16", 2, 0x08, 4, 4, CHG_Z }, { "dey", "y->y", "dec16", 2, 0x09, 4, 4, CHG_Z }, { "bset", "(y),#->(y)","or8", 4, 0x1c, 8, 8, CLR_V_CHG_NZ }, { "bclr", "(y),#->(y)","bclr8", 4, 0x1d, 8, 8, CLR_V_CHG_NZ }, { "brset","(y),#,r", "brset8", 5, 0x1e, 8, 8, CHG_NONE }, { "brclr","(y),#,r", "brclr8", 5, 0x1f, 8, 8, CHG_NONE }, { "tsy", "sp->y", "tsxy16", 2, 0x30, 4, 4, CHG_NONE }, { "tys", "y->sp", "txys16", 2, 0x35, 4, 4, CHG_NONE }, { "puly", "(sp)->y", "mov16", 2, 0x38, 6, 6, CHG_NONE }, { "aby", "b,y->y", "abxy16", 2, 0x3a, 4, 4, CHG_NONE }, { "pshy", "y->(sp)", "mov16", 2, 0x3c, 5, 5, CHG_NONE }, { "neg", "(y)->(y)", "neg8", 3, 0x60, 7, 7, CHG_NZVC }, { "com", "(y)->(y)", "com8", 3, 0x63, 7, 7, SET_C_CLR_V_CHG_NZ}, { "lsr", "(y)->(y)", "lsr8", 3, 0x64, 7, 7, CLR_V_CHG_ZVC }, { "ror", "(y)->(y)", "ror8", 3, 0x66, 7, 7, CHG_NZVC }, { "asr", "(y)->(y)", "asr8", 3, 0x67, 7, 7, CHG_NZVC }, { "asl", "(y)->(y)", "lsl8", 3, 0x68, 7, 7, CHG_NZVC }, { "rol", "(y)->(y)", "rol8", 3, 0x69, 7, 7, CHG_NZVC }, { "dec", "(y)->(y)", "dec8", 3, 0x6a, 7, 7, CHG_NZV }, { "inc", "(y)->(y)", "inc8", 3, 0x6c, 7, 7, CHG_NZV }, { "tst", "(y)", "tst8", 3, 0x6d, 7, 7, CLR_V_CHG_NZ }, { "jmp", "&(y)", "bra", 3, 0x6e, 4, 4, CHG_NONE }, { "clr", "->(y)", "clr8", 3, 0x6f, 7, 7, SET_Z_CLR_NVC }, { "cmpy", "#,y", "sub16", 4, 0x8c, 5, 5, CHG_NZVC }, { "xgdy", "y->y", "xgdxy16", 2, 0x8f, 4, 4, CHG_NONE }, { "cmpy", "*,y", "sub16", 3, 0x9c, 6, 6, CHG_NZVC }, { "suba", "(y),a->a", "sub8", 3, 0xa0, 5, 5, CHG_NZVC }, { "cmpa", "(y),a", "sub8", 3, 0xa1, 5, 5, CHG_NZVC }, { "sbca", "(y),a->a", "sbc8", 3, 0xa2, 5, 5, CHG_NZVC }, { "subd", "(y),d->d", "sub16", 3, 0xa3, 7, 7, CHG_NZVC }, { "anda", "(y),a->a", "and8", 3, 0xa4, 5, 5, CLR_V_CHG_NZ }, { "bita", "(y),a", "and8", 3, 0xa5, 5, 5, CLR_V_CHG_NZ }, { "ldaa", "(y)->a", "movtst8", 3, 0xa6, 5, 5, CLR_V_CHG_NZ }, { "staa", "a->(y)", "movtst8", 3, 0xa7, 5, 5, CLR_V_CHG_NZ }, { "eora", "(y),a->a", "eor8", 3, 0xa8, 5, 5, CLR_V_CHG_NZ }, { "adca", "(y),a->a", "adc8", 3, 0xa9, 5, 5, CHG_HNZVC }, { "oraa", "(y),a->a", "or8", 3, 0xaa, 5, 5, CLR_V_CHG_NZ }, { "adda", "(y),a->a", "add8", 3, 0xab, 5, 5, CHG_HNZVC }, { "cmpy", "(y),y", "sub16", 3, 0xac, 7, 7, CHG_NZVC }, { "jsr", "&(y)", "jsr_11_16", 3, 0xad, 6, 6, CHG_NONE }, { "lds", "(y)->sp", "movtst16", 3, 0xae, 6, 6, CLR_V_CHG_NZ }, { "sts", "sp->(y)", "movtst16", 3, 0xaf, 6, 6, CLR_V_CHG_NZ }, { "cmpy", "(),y", "sub16", 4, 0xbc, 7, 7, CHG_NZVC }, { "ldy", "#->y", "movtst16", 4, 0xce, 4, 4, CLR_V_CHG_NZ }, { "ldy", "*->y", "movtst16", 3, 0xde, 5, 5, CLR_V_CHG_NZ }, { "sty", "y->*", "movtst16", 3, 0xdf, 5, 5, CLR_V_CHG_NZ }, { "subb", "(y),b->b", "sub8", 3, 0xe0, 5, 5, CHG_NZVC }, { "cmpb", "(y),b", "sub8", 3, 0xe1, 5, 5, CHG_NZVC }, { "sbcb", "(y),b->b", "sbc8", 3, 0xe2, 5, 5, CHG_NZVC }, { "addd", "(y),d->d", "add16", 3, 0xe3, 7, 7, CHG_NZVC }, { "andb", "(y),b->b", "and8", 3, 0xe4, 5, 5, CLR_V_CHG_NZ }, { "bitb", "(y),b", "and8", 3, 0xe5, 5, 5, CLR_V_CHG_NZ }, { "ldab", "(y)->b", "movtst8", 3, 0xe6, 5, 5, CLR_V_CHG_NZ }, { "stab", "b->(y)", "movtst8", 3, 0xe7, 5, 5, CLR_V_CHG_NZ }, { "eorb", "(y),b->b", "eor8", 3, 0xe8, 5, 5, CLR_V_CHG_NZ }, { "adcb", "(y),b->b", "adc8", 3, 0xe9, 5, 5, CHG_HNZVC }, { "orab", "(y),b->b", "or8", 3, 0xea, 5, 5, CLR_V_CHG_NZ }, { "addb", "(y),b->b", "add8", 3, 0xeb, 5, 5, CHG_HNZVC }, { "ldd", "(y)->d", "movtst16", 3, 0xec, 6, 6, CLR_V_CHG_NZ }, { "std", "d->(y)", "movtst16", 3, 0xed, 6, 6, CLR_V_CHG_NZ }, { "ldy", "(y)->y", "movtst16", 3, 0xee, 6, 6, CLR_V_CHG_NZ }, { "sty", "y->(y)", "movtst16", 3, 0xef, 6, 6, CLR_V_CHG_NZ }, { "ldy", "()->y", "movtst16", 4, 0xfe, 6, 6, CLR_V_CHG_NZ }, { "sty", "y->()", "movtst16", 4, 0xff, 6, 6, CLR_V_CHG_NZ } }; /* Page 3 opcodes */ /* * { "dex", "x->x", "dec16", 1, 0x00, 5, _M, CHG_NONE }, * Name -+ +----- Insn CCR changes * Operands ---+ +------------ Max # cycles * Pattern -----------+ +--------------- Min # cycles * Size -----------------+ +-------------------- Opcode */ struct m6811_opcode_def m6811_page3_opcodes[] = { { "cmpd", "#,d", "sub16", 4, 0x83, 5, 5, CHG_NZVC }, { "cmpd", "*,d", "sub16", 3, 0x93, 6, 6, CHG_NZVC }, { "cmpd", "(x),d", "sub16", 3, 0xa3, 7, 7, CHG_NZVC }, { "cmpy", "(x),y", "sub16", 3, 0xac, 7, 7, CHG_NZVC }, { "cmpd", "(),d", "sub16", 4, 0xb3, 7, 7, CHG_NZVC }, { "ldy", "(x)->y", "movtst16", 3, 0xee, 6, 6, CLR_V_CHG_NZ }, { "sty", "y->(x)", "movtst16", 3, 0xef, 6, 6, CLR_V_CHG_NZ } }; /* Page 4 opcodes */ /* * { "dex", "x->x", "dec16", 1, 0x00, 5, _M, CHG_NONE }, * Name -+ +----- Insn CCR changes * Operands ---+ +------------ Max # cycles * Pattern -----------+ +--------------- Min # cycles * Size -----------------+ +-------------------- Opcode */ struct m6811_opcode_def m6811_page4_opcodes[] = { { "syscall", "", "syscall", 2, 0x03, 6, 6, CHG_NONE }, { "cmpd", "(y),d", "sub16", 3, 0xa3, 7, 7, CHG_NZVC }, { "cmpx", "(y),x", "sub16", 3, 0xac, 7, 7, CHG_NZVC }, { "ldx", "(y)->x", "movtst16", 3, 0xee, 6, 6, CLR_V_CHG_NZ }, { "stx", "x->(y)", "movtst16", 3, 0xef, 6, 6, CLR_V_CHG_NZ } }; /* 68HC12 opcodes */ /* * { "dex", "x->x", "dec16", 1, 0x00, 5, _M, CHG_NONE }, * Name -+ +----- Insn CCR changes * Operands ---+ +------------ Max # cycles * Pattern -----------+ +--------------- Min # cycles * Size -----------------+ +-------------------- Opcode */ struct m6811_opcode_def m6812_page1_opcodes[] = { { "adca", "#,a->a", "adc8", 2, 0x89, 1, 1, CHG_HNZVC }, { "adca", "*,a->a", "adc8", 2, 0x99, 3, 3, CHG_HNZVC }, { "adca", "(),a->a", "adc8", 3, 0xb9, 3, 3, CHG_HNZVC }, { "adca", "[],a->a", "adc8", 2, 0xa9, 3, 3, CHG_HNZVC }, { "adcb", "#,b->b", "adc8", 2, 0xc9, 1, 1, CHG_HNZVC }, { "adcb", "*,b->b", "adc8", 3, 0xd9, 3, 3, CHG_HNZVC }, { "adcb", "(),b->b", "adc8", 3, 0xf9, 3, 3, CHG_HNZVC }, { "adcb", "[],b->b", "adc8", 2, 0xe9, 3, 3, CHG_HNZVC }, { "adda", "#,a->a", "add8", 2, 0x8b, 1, 1, CHG_HNZVC }, { "adda", "*,a->a", "add8", 3, 0x9b, 3, 3, CHG_HNZVC }, { "adda", "(),a->a", "add8", 3, 0xbb, 3, 3, CHG_HNZVC }, { "adda", "[],a->a", "add8", 2, 0xab, 3, 3, CHG_HNZVC }, { "addb", "#,b->b", "add8", 2, 0xcb, 1, 1, CHG_HNZVC }, { "addb", "*,b->b", "add8", 3, 0xdb, 3, 3, CHG_HNZVC }, { "addb", "(),b->b", "add8", 3, 0xfb, 3, 3, CHG_HNZVC }, { "addb", "[],b->b", "add8", 2, 0xeb, 3, 3, CHG_HNZVC }, { "addd", "#,d->d", "add16", 3, 0xc3, 2, 2, CHG_NZVC }, { "addd", "*,d->d", "add16", 2, 0xd3, 3, 3, CHG_NZVC }, { "addd", "(),d->d", "add16", 3, 0xf3, 3, 3, CHG_NZVC }, { "addd", "[],d->d", "add16", 2, 0xe3, 3, 3, CHG_NZVC }, { "anda", "#,a->a", "and8", 2, 0x84, 1, 1, CLR_V_CHG_NZ }, { "anda", "*,a->a", "and8", 2, 0x94, 3, 3, CLR_V_CHG_NZ }, { "anda", "(),a->a", "and8", 3, 0xb4, 3, 3, CLR_V_CHG_NZ }, { "anda", "[],a->a", "and8", 2, 0xa4, 3, 3, CLR_V_CHG_NZ }, { "andb", "#,b->b", "and8", 2, 0xc4, 1, 1, CLR_V_CHG_NZ }, { "andb", "*,b->b", "and8", 2, 0xd4, 3, 3, CLR_V_CHG_NZ }, { "andb", "(),b->b", "and8", 3, 0xf4, 3, 3, CLR_V_CHG_NZ }, { "andb", "[],b->b", "and8", 2, 0xe4, 3, 3, CLR_V_CHG_NZ }, { "andcc", "#,ccr->ccr", "and8", 2, 0x10, 1, 1, CHG_ALL }, { "asl", "()->()", "lsl8", 3, 0x78, 4, 4, CHG_NZVC }, { "asl", "[]->[]", "lsl8", 2, 0x68, 3, 3, CHG_NZVC }, { "asla", "a->a", "lsl8", 1, 0x48, 1, 1, CHG_NZVC }, { "aslb", "b->b", "lsl8", 1, 0x58, 1, 1, CHG_NZVC }, { "asld", "d->d", "lsl16", 1, 0x59, 1, 1, CHG_NZVC }, { "asr", "()->()", "asr8", 3, 0x77, 4, 4, CHG_NZVC }, { "asr", "[]->[]", "asr8", 2, 0x67, 3, 3, CHG_NZVC }, { "asra", "a->a", "asr8", 1, 0x47, 1, 1, CHG_NZVC }, { "asrb", "b->b", "asr8", 1, 0x57, 1, 1, CHG_NZVC }, { "bcc", "r", 0, 2, 0x24, 1, 3, CHG_NONE }, { "bclr", "*,#->*", "bclr8", 3, 0x4d, 4, 4, CLR_V_CHG_NZ }, { "bclr", "(),#->()", "bclr8", 4, 0x1d, 4, 4, CLR_V_CHG_NZ }, { "bclr", "[],#->[]", "bclr8", 3, 0x0d, 4, 4, CLR_V_CHG_NZ }, { "bcs", "r", 0, 2, 0x25, 1, 3, CHG_NONE }, { "beq", "r", 0, 2, 0x27, 1, 3, CHG_NONE }, { "bge", "r", 0, 2, 0x2c, 1, 3, CHG_NONE }, { "bgnd", 0, 0, 1, 0x00, 5, 5, CHG_NONE }, { "bgt", "r", 0, 2, 0x2e, 1, 3, CHG_NONE }, { "bhi", "r", 0, 2, 0x22, 1, 3, CHG_NONE }, { "bita", "#,a", "and8", 2, 0x85, 1, 1, CLR_V_CHG_NZ }, { "bita", "*,a", "and8", 2, 0x95, 3, 3, CLR_V_CHG_NZ }, { "bita", "(),a", "and8", 3, 0xb5, 3, 3, CLR_V_CHG_NZ }, { "bita", "[],a", "and8", 2, 0xa5, 3, 3, CLR_V_CHG_NZ }, { "bitb", "#,b", "and8", 2, 0xc5, 1, 1, CLR_V_CHG_NZ }, { "bitb", "*,b", "and8", 2, 0xd5, 3, 3, CLR_V_CHG_NZ }, { "bitb", "(),b", "and8", 3, 0xf5, 3, 3, CLR_V_CHG_NZ }, { "bitb", "[],b", "and8", 2, 0xe5, 3, 3, CLR_V_CHG_NZ }, { "ble", "r", 0, 2, 0x2f, 1, 3, CHG_NONE }, { "bls", "r", 0, 2, 0x23, 1, 3, CHG_NONE }, { "blt", "r", 0, 2, 0x2d, 1, 3, CHG_NONE }, { "bmi", "r", 0, 2, 0x2b, 1, 3, CHG_NONE }, { "bne", "r", 0, 2, 0x26, 1, 3, CHG_NONE }, { "bpl", "r", 0, 2, 0x2a, 1, 3, CHG_NONE }, { "bra", "r", 0, 2, 0x20, 1, 3, CHG_NONE }, { "brclr", "*,#,r", "brclr8", 4, 0x4f, 4, 4, CHG_NONE }, { "brclr", "(),#,r", "brclr8", 5, 0x1f, 5, 5, CHG_NONE }, { "brclr", "[],#,r", "brclr8", 4, 0x0f, 4, 4, CHG_NONE }, { "brn", "r", "nop", 2, 0x21, 1, 3, CHG_NONE }, { "brset", "*,#,r", "brset8", 4, 0x4e, 4, 4, CHG_NONE }, { "brset", "(),#,r", "brset8", 5, 0x1e, 5, 5, CHG_NONE }, { "brset", "[],#,r", "brset8", 4, 0x0e, 4, 4, CHG_NONE }, { "bset", "*,#->*", "or8", 3, 0x4c, 4, 4, CLR_V_CHG_NZ }, { "bset", "(),#->()", "or8", 4, 0x1c, 4, 4, CLR_V_CHG_NZ }, { "bset", "[],#->[]", "or8", 3, 0x0c, 4, 4, CLR_V_CHG_NZ }, { "bsr", "r", "jsr_12_16", 2, 0x07, 4, 4, CHG_NONE }, { "bvc", "r", 0, 2, 0x28, 1, 3, CHG_NONE }, { "bvs", "r", 0, 2, 0x29, 1, 3, CHG_NONE }, { "call", "", "call8", 4, 0x4a, 8, 8, CHG_NONE }, { "call", "", "call_ind",2, 0x4b, 8, 8, CHG_NONE }, { "clr", "->()", "clr8", 3, 0x79, 3, 3, SET_Z_CLR_NVC }, { "clr", "->[]", "clr8", 2, 0x69, 2, 2, SET_Z_CLR_NVC }, { "clra", "->a", "clr8", 1, 0x87, 1, 1, SET_Z_CLR_NVC }, { "clrb", "->b", "clr8", 1, 0xc7, 1, 1, SET_Z_CLR_NVC }, { "cpa", "#,a", "sub8", 2, 0x81, 1, 1, CHG_NZVC }, { "cpa", "*,a", "sub8", 2, 0x91, 3, 3, CHG_NZVC }, { "cpa", "(),a", "sub8", 3, 0xb1, 3, 3, CHG_NZVC }, { "cpa", "[],a", "sub8", 2, 0xa1, 3, 3, CHG_NZVC }, { "cpb", "#,b", "sub8", 2, 0xc1, 1, 1, CHG_NZVC }, { "cpb", "*,b", "sub8", 2, 0xd1, 3, 3, CHG_NZVC }, { "cpb", "(),b", "sub8", 3, 0xf1, 3, 3, CHG_NZVC }, { "cpb", "[],b", "sub8", 2, 0xe1, 3, 3, CHG_NZVC }, { "com", "()->()", "com8", 3, 0x71, 4, 4, SET_C_CLR_V_CHG_NZ }, { "com", "[]->[]", "com8", 2, 0x61, 3, 3, SET_C_CLR_V_CHG_NZ }, { "coma", "a->a", "com8", 1, 0x41, 1, 1, SET_C_CLR_V_CHG_NZ }, { "comb", "b->b", "com8", 1, 0x51, 1, 1, SET_C_CLR_V_CHG_NZ }, { "cpd", "#,d", "sub16", 3, 0x8c, 2, 2, CHG_NZVC }, { "cpd", "*,d", "sub16", 2, 0x9c, 3, 3, CHG_NZVC }, { "cpd", "(),d", "sub16", 3, 0xbc, 3, 3, CHG_NZVC }, { "cpd", "[],d", "sub16", 2, 0xac, 3, 3, CHG_NZVC }, { "cps", "#,sp", "sub16", 3, 0x8f, 2, 2, CHG_NZVC }, { "cps", "*,sp", "sub16", 2, 0x9f, 3, 3, CHG_NZVC }, { "cps", "(),sp", "sub16", 3, 0xbf, 3, 3, CHG_NZVC }, { "cps", "[],sp", "sub16", 2, 0xaf, 3, 3, CHG_NZVC }, { "cpx", "#,x", "sub16", 3, 0x8e, 2, 2, CHG_NZVC }, { "cpx", "*,x", "sub16", 2, 0x9e, 3, 3, CHG_NZVC }, { "cpx", "(),x", "sub16", 3, 0xbe, 3, 3, CHG_NZVC }, { "cpx", "[],x", "sub16", 2, 0xae, 3, 3, CHG_NZVC }, { "cpy", "#,y", "sub16", 3, 0x8d, 2, 2, CHG_NZVC }, { "cpy", "*,y", "sub16", 2, 0x9d, 3, 3, CHG_NZVC }, { "cpy", "(),y", "sub16", 3, 0xbd, 3, 3, CHG_NZVC }, { "cpy", "[],y", "sub16", 2, 0xad, 3, 3, CHG_NZVC }, /* dbeq, dbne, ibeq, ibne, tbeq, tbne */ { "dbeq", 0, "dbcc8", 3, 0x04, 3, 3, CHG_NONE }, { "dec", "()->()", "dec8", 3, 0x73, 4, 4, CHG_NZV }, { "dec", "[]->[]", "dec8", 2, 0x63, 3, 3, CHG_NZV }, { "deca", "a->a", "dec8", 1, 0x43, 1, 1, CHG_NZV }, { "decb", "b->b", "dec8", 1, 0x53, 1, 1, CHG_NZV }, { "dex", "x->x", "dec16", 1, 0x09, 1, 1, CHG_Z }, { "dey", "y->y", "dec16", 1, 0x03, 1, 1, CHG_Z }, { "ediv", 0, 0, 1, 0x11, 11, 11, CHG_NZVC }, { "emul", 0, 0, 1, 0x13, 3, 3, CHG_NZC }, { "eora", "#,a->a", "eor8", 2, 0x88, 1, 1, CLR_V_CHG_NZ }, { "eora", "*,a->a", "eor8", 2, 0x98, 3, 3, CLR_V_CHG_NZ }, { "eora", "(),a->a", "eor8", 3, 0xb8, 3, 3, CLR_V_CHG_NZ }, { "eora", "[],a->a", "eor8", 2, 0xa8, 3, 3, CLR_V_CHG_NZ }, { "eorb", "#,b->b", "eor8", 2, 0xc8, 1, 1, CLR_V_CHG_NZ }, { "eorb", "*,b->b", "eor8", 2, 0xd8, 3, 3, CLR_V_CHG_NZ }, { "eorb", "(),b->b", "eor8", 3, 0xf8, 3, 3, CLR_V_CHG_NZ }, { "eorb", "[],b->b", "eor8", 2, 0xe8, 3, 3, CLR_V_CHG_NZ }, /* exg, sex, tfr */ { "exg", "#", "exg8", 2, 0xb7, 1, 1, CHG_NONE }, { "inc", "()->()", "inc8", 3, 0x72, 4, 4, CHG_NZV }, { "inc", "[]->[]", "inc8", 2, 0x62, 3, 3, CHG_NZV }, { "inca", "a->a", "inc8", 1, 0x42, 1, 1, CHG_NZV }, { "incb", "b->b", "inc8", 1, 0x52, 1, 1, CHG_NZV }, { "inx", "x->x", "inc16", 1, 0x08, 1, 1, CHG_Z }, { "iny", "y->y", "inc16", 1, 0x02, 1, 1, CHG_Z }, { "jmp", "&()", "bra", 3, 0x06, 3, 3, CHG_NONE }, { "jmp", "&[]", "bra", 2, 0x05, 3, 3, CHG_NONE }, { "jsr", "*", "jsr_12_16", 2, 0x17, 4, 4, CHG_NONE }, { "jsr", "&()", "jsr_12_16", 3, 0x16, 4, 4, CHG_NONE }, { "jsr", "&[]", "jsr_12_16", 2, 0x15, 4, 4, CHG_NONE }, { "ldaa", "#->a", "movtst8", 2, 0x86, 1, 1, CLR_V_CHG_NZ }, { "ldaa", "*->a", "movtst8", 2, 0x96, 3, 3, CLR_V_CHG_NZ }, { "ldaa", "()->a", "movtst8", 3, 0xb6, 3, 3, CLR_V_CHG_NZ }, { "ldaa", "[]->a", "movtst8", 2, 0xa6, 3, 3, CLR_V_CHG_NZ }, { "ldab", "#->b", "movtst8", 2, 0xc6, 1, 1, CLR_V_CHG_NZ }, { "ldab", "*->b", "movtst8", 2, 0xd6, 3, 3, CLR_V_CHG_NZ }, { "ldab", "()->b", "movtst8", 3, 0xf6, 3, 3, CLR_V_CHG_NZ }, { "ldab", "[]->b", "movtst8", 2, 0xe6, 3, 3, CLR_V_CHG_NZ }, { "ldd", "#->d", "movtst16", 3, 0xcc, 2, 2, CLR_V_CHG_NZ }, { "ldd", "*->d", "movtst16", 2, 0xdc, 3, 3, CLR_V_CHG_NZ }, { "ldd", "()->d", "movtst16", 3, 0xfc, 3, 3, CLR_V_CHG_NZ }, { "ldd", "[]->d", "movtst16", 2, 0xec, 3, 3, CLR_V_CHG_NZ }, { "lds", "#->sp", "movtst16", 3, 0xcf, 2, 2, CLR_V_CHG_NZ }, { "lds", "*->sp", "movtst16", 2, 0xdf, 3, 3, CLR_V_CHG_NZ }, { "lds", "()->sp", "movtst16", 3, 0xff, 3, 3, CLR_V_CHG_NZ }, { "lds", "[]->sp", "movtst16", 2, 0xef, 3, 3, CLR_V_CHG_NZ }, { "ldx", "#->x", "movtst16", 3, 0xce, 2, 2, CLR_V_CHG_NZ }, { "ldx", "*->x", "movtst16", 2, 0xde, 3, 3, CLR_V_CHG_NZ }, { "ldx", "()->x", "movtst16", 3, 0xfe, 3, 3, CLR_V_CHG_NZ }, { "ldx", "[]->x", "movtst16", 2, 0xee, 3, 3, CLR_V_CHG_NZ }, { "ldy", "#->y", "movtst16", 3, 0xcd, 2, 2, CLR_V_CHG_NZ }, { "ldy", "*->y", "movtst16", 2, 0xdd, 3, 3, CLR_V_CHG_NZ }, { "ldy", "()->y", "movtst16", 3, 0xfd, 3, 3, CLR_V_CHG_NZ }, { "ldy", "[]->y", "movtst16", 2, 0xed, 3, 3, CLR_V_CHG_NZ }, { "leas", "&[]->sp", "lea16", 2, 0x1b, 2, 2, CHG_NONE }, { "leax", "&[]->x", "lea16", 2, 0x1a, 2, 2, CHG_NONE }, { "leay", "&[]->y", "lea16", 2, 0x19, 2, 2, CHG_NONE }, { "lsr", "()->()", "lsr8", 3, 0x74, 4, 4, CLR_N_CHG_ZVC }, { "lsr", "[]->[]", "lsr8", 2, 0x64, 3, 3, CLR_N_CHG_ZVC }, { "lsra", "a->a", "lsr8", 1, 0x44, 1, 1, CLR_N_CHG_ZVC }, { "lsrb", "b->b", "lsr8", 1, 0x54, 1, 1, CLR_N_CHG_ZVC }, { "lsrd", "d->d", "lsr16", 1, 0x49, 1, 1, CLR_N_CHG_ZVC }, { "mem", 0, 0, 1, 0x01, 5, 5, CHG_HNZVC }, { "mul", "b,a->d", "mul16", 1, 0x12, 3, 3, CHG_C }, { "neg", "()->()", "neg8", 3, 0x70, 4, 4, CHG_NZVC }, { "neg", "[]->[]", "neg8", 2, 0x60, 3, 3, CHG_NZVC }, { "nega", "a->a", "neg8", 1, 0x40, 1, 1, CHG_NZVC }, { "negb", "b->b", "neg8", 1, 0x50, 1, 1, CHG_NZVC }, { "nop", "", "nop", 1, 0xa7, 1, 1, CHG_NONE }, { "oraa", "#,a->a", "or8", 2, 0x8a, 1, 1, CLR_V_CHG_NZ }, { "oraa", "*,a->a", "or8", 2, 0x9a, 3, 3, CLR_V_CHG_NZ }, { "oraa", "(),a->a", "or8", 3, 0xba, 3, 3, CLR_V_CHG_NZ }, { "oraa", "[],a->a", "or8", 2, 0xaa, 3, 3, CLR_V_CHG_NZ }, { "orab", "#,b->b", "or8", 2, 0xca, 1, 1, CLR_V_CHG_NZ }, { "orab", "*,b->b", "or8", 2, 0xda, 3, 3, CLR_V_CHG_NZ }, { "orab", "(),b->b", "or8", 3, 0xfa, 3, 3, CLR_V_CHG_NZ }, { "orab", "[],b->b", "or8", 2, 0xea, 3, 3, CLR_V_CHG_NZ }, { "orcc", "#,ccr->ccr", "or8", 2, 0x14, 1, 1, CHG_ALL }, { "page2", 0, "page2", 1, 0x18, 0, 0, CHG_NONE }, { "psha", "a->(sp)", "mov8", 1, 0x36, 2, 2, CHG_NONE }, { "pshb", "b->(sp)", "mov8", 1, 0x37, 2, 2, CHG_NONE }, { "pshc", "ccr->(sp)", "mov8", 1, 0x39, 2, 2, CHG_NONE }, { "pshd", "d->(sp)", "mov16", 1, 0x3b, 2, 2, CHG_NONE }, { "pshx", "x->(sp)", "mov16", 1, 0x34, 2, 2, CHG_NONE }, { "pshy", "y->(sp)", "mov16", 1, 0x35, 2, 2, CHG_NONE }, { "pula", "(sp)->a", "mov8", 1, 0x32, 3, 3, CHG_NONE }, { "pulb", "(sp)->b", "mov8", 1, 0x33, 3, 3, CHG_NONE }, { "pulc", "(sp)->ccr", "mov8", 1, 0x38, 3, 3, CHG_ALL }, { "puld", "(sp)->d", "mov16", 1, 0x3a, 3, 3, CHG_NONE }, { "pulx", "(sp)->x", "mov16", 1, 0x30, 3, 3, CHG_NONE }, { "puly", "(sp)->y", "mov16", 1, 0x31, 3, 3, CHG_NONE }, { "rol", "()->()", "rol8", 3, 0x75, 4, 4, CHG_NZVC }, { "rol", "[]->[]", "rol8", 2, 0x65, 3, 3, CHG_NZVC }, { "rola", "a->a", "rol8", 1, 0x45, 1, 1, CHG_NZVC }, { "rolb", "b->b", "rol8", 1, 0x55, 1, 1, CHG_NZVC }, { "ror", "()->()", "ror8", 3, 0x76, 4, 4, CHG_NZVC }, { "ror", "[]->[]", "ror8", 2, 0x66, 3, 3, CHG_NZVC }, { "rora", "a->a", "ror8", 1, 0x46, 1, 1, CHG_NZVC }, { "rorb", "b->b", "ror8", 1, 0x56, 1, 1, CHG_NZVC }, { "rtc", 0, 0, 1, 0x0a, 6, 6, CHG_NONE }, { "rti", 0, "rti12", 1, 0x0b, 8, 10, CHG_ALL}, { "rts", 0, "rts12", 1, 0x3d, 5, 5, CHG_NONE }, { "sbca", "#,a->a", "sbc8", 2, 0x82, 1, 1, CHG_NZVC }, { "sbca", "*,a->a", "sbc8", 2, 0x92, 3, 3, CHG_NZVC }, { "sbca", "(),a->a", "sbc8", 3, 0xb2, 3, 3, CHG_NZVC }, { "sbca", "[],a->a", "sbc8", 2, 0xa2, 3, 3, CHG_NZVC }, { "sbcb", "#,b->b", "sbc8", 2, 0xc2, 1, 1, CHG_NZVC }, { "sbcb", "*,b->b", "sbc8", 2, 0xd2, 3, 3, CHG_NZVC }, { "sbcb", "(),b->b", "sbc8", 3, 0xf2, 3, 3, CHG_NZVC }, { "sbcb", "[],b->b", "sbc8", 2, 0xe2, 3, 3, CHG_NZVC }, { "staa", "a->*", "movtst8", 2, 0x5a, 2, 2, CLR_V_CHG_NZ }, { "staa", "a->()", "movtst8", 3, 0x7a, 3, 3, CLR_V_CHG_NZ }, { "staa", "a->[]", "movtst8", 2, 0x6a, 2, 2, CLR_V_CHG_NZ }, { "stab", "b->*", "movtst8", 2, 0x5b, 2, 2, CLR_V_CHG_NZ }, { "stab", "b->()", "movtst8", 3, 0x7b, 3, 3, CLR_V_CHG_NZ }, { "stab", "b->[]", "movtst8", 2, 0x6b, 2, 2, CLR_V_CHG_NZ }, { "std", "d->*", "movtst16", 2, 0x5c, 2, 2, CLR_V_CHG_NZ }, { "std", "d->()", "movtst16", 3, 0x7c, 3, 3, CLR_V_CHG_NZ }, { "std", "d->[]", "movtst16", 2, 0x6c, 2, 2, CLR_V_CHG_NZ }, { "sts", "sp->*", "movtst16", 2, 0x5f, 2, 2, CLR_V_CHG_NZ }, { "sts", "sp->()", "movtst16", 3, 0x7f, 3, 3, CLR_V_CHG_NZ }, { "sts", "sp->[]", "movtst16", 2, 0x6f, 2, 2, CLR_V_CHG_NZ }, { "stx", "x->*", "movtst16", 2, 0x5e, 2, 2, CLR_V_CHG_NZ }, { "stx", "x->()", "movtst16", 3, 0x7e, 3, 3, CLR_V_CHG_NZ }, { "stx", "x->[]", "movtst16", 2, 0x6e, 2, 2, CLR_V_CHG_NZ }, { "sty", "y->*", "movtst16", 2, 0x5d, 2, 2, CLR_V_CHG_NZ }, { "sty", "y->()", "movtst16", 3, 0x7d, 3, 3, CLR_V_CHG_NZ }, { "sty", "y->[]", "movtst16", 2, 0x6d, 2, 2, CLR_V_CHG_NZ }, { "suba", "#,a->a", "sub8", 2, 0x80, 1, 1, CHG_NZVC }, { "suba", "*,a->a", "sub8", 2, 0x90, 3, 3, CHG_NZVC }, { "suba", "(),a->a", "sub8", 3, 0xb0, 3, 3, CHG_NZVC }, { "suba", "[],a->a", "sub8", 2, 0xa0, 3, 3, CHG_NZVC }, { "subb", "#,b->b", "sub8", 2, 0xc0, 1, 1, CHG_NZVC }, { "subb", "*,b->b", "sub8", 2, 0xd0, 3, 3, CHG_NZVC }, { "subb", "(),b->b", "sub8", 3, 0xf0, 3, 3, CHG_NZVC }, { "subb", "[],b->b", "sub8", 2, 0xe0, 3, 3, CHG_NZVC }, { "subd", "#,d->d", "sub16", 3, 0x83, 2, 2, CHG_NZVC }, { "subd", "*,d->d", "sub16", 2, 0x93, 3, 3, CHG_NZVC }, { "subd", "(),d->d", "sub16", 3, 0xb3, 3, 3, CHG_NZVC }, { "subd", "[],d->d", "sub16", 2, 0xa3, 3, 3, CHG_NZVC }, { "swi", 0, 0, 1, 0x3f, 9, 9, CHG_NONE }, { "tst", "()", "tst8", 3, 0xf7, 3, 3, CLR_VC_CHG_NZ }, { "tst", "[]", "tst8", 2, 0xe7, 3, 3, CLR_VC_CHG_NZ }, { "tsta", "a", "tst8", 1, 0x97, 1, 1, CLR_VC_CHG_NZ }, { "tstb", "b", "tst8", 1, 0xd7, 1, 1, CLR_VC_CHG_NZ }, { "wai", 0, 0, 1, 0x3e, 8, _M, CHG_NONE } }; struct m6811_opcode_def m6812_page2_opcodes[] = { { "cba", "b,a", "sub8", 2, 0x17, 2, 2, CHG_NZVC }, /* After 'daa', the Z flag is undefined. Mark it as changed. */ { "daa", 0, "daa8", 2, 0x07, 3, 3, CHG_NZVC }, { "edivs", 0, 0, 2, 0x14, 12, 12, CHG_NZVC }, { "emacs", 0, 0, 2, 0x12, 13, 13, CHG_NZVC }, { "emaxd", "[],d->d", "max16", 3, 0x1a, 4, 4, CHG_NZVC }, { "emaxm", "[],d->[]", "max16", 3, 0x1e, 4, 4, CHG_NZVC }, { "emind", "[],d->d", "min16", 3, 0x1b, 4, 4, CHG_NZVC }, { "eminm", "[],d->[]", "min16", 3, 0x1f, 4, 4, CHG_NZVC }, { "emuls", 0, 0, 2, 0x13, 3, 3, CHG_NZC }, { "etbl", "[]", "tbl16", 3, 0x3f, 10, 10, CHG_NZC }, { "fdiv", "x,d->x", "fdiv16", 2, 0x11, 12, 12, CHG_ZVC }, { "idiv", "x,d->x", "idiv16", 2, 0x10, 12, 12, CLR_V_CHG_ZC }, { "idivs", 0, 0, 2, 0x15, 12, 12, CHG_NZVC }, { "lbcc", "R", "bcc", 4, 0x24, 3, 4, CHG_NONE }, { "lbcs", "R", "bcs", 4, 0x25, 3, 4, CHG_NONE }, { "lbeq", "R", "beq", 4, 0x27, 3, 4, CHG_NONE }, { "lbge", "R", "bge", 4, 0x2c, 3, 4, CHG_NONE }, { "lbgt", "R", "bgt", 4, 0x2e, 3, 4, CHG_NONE }, { "lbhi", "R", "bhi", 4, 0x22, 3, 4, CHG_NONE }, { "lble", "R", "ble", 4, 0x2f, 3, 4, CHG_NONE }, { "lbls", "R", "bls", 4, 0x23, 3, 4, CHG_NONE }, { "lblt", "R", "blt", 4, 0x2d, 3, 4, CHG_NONE }, { "lbmi", "R", "bmi", 4, 0x2b, 3, 4, CHG_NONE }, { "lbne", "R", "bne", 4, 0x26, 3, 4, CHG_NONE }, { "lbpl", "R", "bpl", 4, 0x2a, 3, 4, CHG_NONE }, { "lbra", "R", "bra", 4, 0x20, 4, 4, CHG_NONE }, { "lbrn", "R", "nop", 4, 0x21, 3, 3, CHG_NONE }, { "lbvc", "R", "bvc", 4, 0x28, 3, 4, CHG_NONE }, { "lbvs", "R", "bvs", 4, 0x29, 3, 4, CHG_NONE }, { "maxa", "[],a->a", "max8", 3, 0x18, 4, 4, CHG_NZVC }, { "maxm", "[],a->[]", "max8", 3, 0x1c, 4, 4, CHG_NZVC }, { "mina", "[],a->a", "min8", 3, 0x19, 4, 4, CHG_NZVC }, { "minm", "[],a->[]", "min8", 3, 0x1d, 4, 4, CHG_NZVC }, { "movb", 0, "move8", 5, 0x0b, 4, 4, CHG_NONE }, { "movb", 0, "move8", 4, 0x08, 4, 4, CHG_NONE }, { "movb", 0, "move8", 6, 0x0c, 6, 6, CHG_NONE }, { "movb", 0, "move8", 5, 0x09, 5, 5, CHG_NONE }, { "movb", 0, "move8", 5, 0x0d, 5, 5, CHG_NONE }, { "movb", 0, "move8", 4, 0x0a, 5, 5, CHG_NONE }, { "movw", 0, "move16", 6, 0x03, 5, 5, CHG_NONE }, { "movw", 0, "move16", 5, 0x00, 4, 4, CHG_NONE }, { "movw", 0, "move16", 6, 0x04, 6, 6, CHG_NONE }, { "movw", 0, "move16", 5, 0x01, 5, 5, CHG_NONE }, { "movw", 0, "move16", 5, 0x05, 5, 5, CHG_NONE }, { "movw", 0, "move16", 4, 0x02, 5, 5, CHG_NONE }, { "rev", 0, 0, 2, 0x3a, _M, _M, CHG_HNZVC }, { "revw", 0, 0, 2, 0x3b, _M, _M, CHG_HNZVC }, { "sba", "b,a->a", "sub8", 2, 0x16, 2, 2, CHG_NZVC }, { "stop", 0, 0, 2, 0x3e, 2, 9, CHG_NONE }, { "tab", "a->b", "movtst8", 2, 0x0e, 2, 2, CLR_V_CHG_NZ }, { "tba", "b->a", "movtst8", 2, 0x0f, 2, 2, CLR_V_CHG_NZ }, { "wav", 0, 0, 2, 0x3c, 8, _M, SET_Z_CHG_HNVC } }; void fatal_error (const struct m6811_opcode_def*, const char*, ...); void print (FILE*, int, const char*,...); int gen_fetch_operands (FILE*, int, const struct m6811_opcode_def*, const char*); void gen_save_result (FILE*, int, const struct m6811_opcode_def*, int, const char*); const struct m6811_opcode_pattern* find_opcode_pattern (const struct m6811_opcode_def*); void gen_interp (FILE*, int, const struct m6811_opcode_def*); void gen_interpreter_for_table (FILE*, int, const struct m6811_opcode_def*, int, const char*); void gen_interpreter (FILE*); static int indent_level = 2; static int current_insn_size = 0; /* Fatal error message and exit. This method is called when an inconsistency is detected in the generation table. */ void fatal_error (const struct m6811_opcode_def *opcode, const char *msg, ...) { va_list argp; fprintf (stderr, "Fatal error: "); va_start (argp, msg); vfprintf (stderr, msg, argp); va_end (argp); fprintf (stderr, "\n"); if (opcode) { fprintf (stderr, "Opcode: 0x%02x %s %s\n", opcode->insn_code, opcode->name ? opcode->name : "(null)", opcode->operands ? opcode->operands : "(null)"); } exit (1); } /* Format and pretty print for the code generation. (printf like format). */ void print (FILE *fp, int col, const char *msg, ...) { va_list argp; char buf[1024]; int cur_col = -1; int i; /* Format in a buffer. */ va_start (argp, msg); vsprintf (buf, msg, argp); va_end (argp); /* Basic pretty print: - Every line is indented at column 'col', - Indentation is updated when '{' and '}' are found, - Indentation is incremented by the special character '@' (not displayed). - New lines inserted automatically after ';' */ for (i = 0; buf[i]; i++) { if (buf[i] == '{') col += indent_level; else if (buf[i] == '}') col -= indent_level; else if (buf[i] == '@') { col += indent_level; continue; } if (cur_col == -1 && buf[i] != ' ' && buf[i] != '\t' && buf[i] != '\n') { cur_col = 0; while (cur_col < col) { fputc (' ', fp); cur_col++; } } if (buf[i] == '}') col -= indent_level; else if (buf[i] == '{') col += indent_level; else if (buf[i] == '\n') cur_col = -1; if (cur_col != -1 || buf[i] == '\n') fputc (buf[i], fp); if (buf[i] == ';') { fputc ('\n', fp); cur_col = -1; } } } /* Generate the code to obtain the operands before execution of the instruction. Operands are copied in local variables. This allows to have the same instruction pattern and different operand formats. There is a maximum of 3 variables: 8-bits 16-bits 1st operand: src8 src16 2nd operand: dst8 dst16 alt operand: addr addr The operand string is interpreted as follows: a Copy A register in the local 8-bits variable. b " B " ccr " ccr " d " D " " " 16-bits variable. x " X " y " Y " sp " SP " pc " PC " * 68HC11 page0 memory pointer. Get 8-bits page0 offset from program, set up 'addr' local variable to refer to the location in page0. Copy the 8/16-bits value pointed to by 'addr' in a 8/16-bits variable. (x) 68HC11 indirect access with X register. Get 8-bits unsigned offset from program, set up 'addr' = X + offset. Copy the 8/16-bits value pointed to by 'addr' in a 8/16-bits variable. (y) Same as (x) with Y register. () 68HC11 extended address mode (global variable). Get 16-bits address from program and set 'addr'. Copy the 8/16-bits value pointed to by 'addr' in a 8/16-bits variable. [] 68HC12 indexed addressing mode (sp) Pop Pop a 8/16-bits value from stack and set in a 8/16-bits variable. r Relative branch Get 8-bits relative branch, compute absolute address and set 'addr' # 68HC11 immediate value Get a 8/16-bits value from program and set a 8/16-bits variable. &(x) &(y) &() Similar to (x), (y) and () except that we don't read the value pointed to by 'addr' (ie, only 'addr' is setup). Used by jmp/jsr. &[] Similar to [] but don't read the value pointed to by the address. , Operand separator. - End of input operands. Example: (x),a->a addr = x + (uint16) (fetch8 (proc)); src8 = a *,#,r addr = (uint16) (fetch8 (proc)) <- Temporary 'addr' src8 = read_mem8 (proc, addr) dst8 = fetch8 (proc) addr = fetch_relbranch (proc) <- Final 'addr' Returns 1 if the 'addr' operand is set, 0 otherwise. */ int gen_fetch_operands (FILE *fp, int col, const struct m6811_opcode_def *opcode, const char *operand_size) { static char *vars[2] = { "src", "dst" }; char c; int addr_set = 0; int cur_var = 0; const char *operands = opcode->operands; if (operands == 0) operands = ""; while ((c = *operands++) != 0) { switch (c) { case 'a': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); print (fp, col, "%s8 = cpu_get_a (proc);", vars[cur_var]); break; case 'b': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); print (fp, col, "%s8 = cpu_get_b (proc);", vars[cur_var]); break; case 'd': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); print (fp, col, "%s16 = cpu_get_d (proc);", vars[cur_var]); break; case 'x': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); print (fp, col, "%s16 = cpu_get_x (proc);", vars[cur_var]); break; case 'y': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); print (fp, col, "%s16 = cpu_get_y (proc);", vars[cur_var]); break; case '*': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); if (addr_set) fatal_error (opcode, "Wrong use of '*', 'addr' already used"); addr_set = 1; current_insn_size += 1; print (fp, col, "addr = (uint16) cpu_fetch8 (proc);"); print (fp, col, "%s%s = memory_read%s (proc, addr);", vars[cur_var], operand_size, operand_size); break; case '&': if (addr_set) fatal_error (opcode, "Wrong use of '&', 'addr' already used"); addr_set = 1; if (strncmp (operands, "(x)", 3) == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_x (proc) + (uint16) cpu_fetch8 (proc);"); operands += 3; } else if (strncmp (operands, "(y)", 3) == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_y (proc) + (uint16) cpu_fetch8 (proc);"); operands += 3; } else if (strncmp (operands, "()", 2) == 0) { current_insn_size += 2; print (fp, col, "addr = cpu_fetch16 (proc);"); operands += 2; } else if (strncmp (operands, "[]", 2) == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_indexed_operand_addr (proc, 0);"); operands += 2; } else { fatal_error (opcode, "Unknown operand"); } break; case '(': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); if (addr_set) fatal_error (opcode, "Wrong use of '(', 'addr' already used"); if (strncmp (operands, "x)", 2) == 0) { addr_set = 1; current_insn_size += 1; print (fp, col, "addr = cpu_get_x (proc) + (uint16) cpu_fetch8 (proc);"); print (fp, col, "%s%s = memory_read%s (proc, addr);", vars[cur_var], operand_size, operand_size); operands += 2; } else if (strncmp (operands, "y)", 2) == 0) { addr_set = 1; current_insn_size += 1; print (fp, col, "addr = cpu_get_y (proc) + (uint16) cpu_fetch8 (proc);"); print (fp, col, "%s%s = memory_read%s (proc, addr);", vars[cur_var], operand_size, operand_size); operands += 2; } else if (strncmp (operands, ")", 1) == 0) { addr_set = 1; current_insn_size += 2; print (fp, col, "addr = cpu_fetch16 (proc);"); print (fp, col, "%s%s = memory_read%s (proc, addr);", vars[cur_var], operand_size, operand_size); operands++; } else if (strncmp (operands, "@)", 2) == 0) { current_insn_size += 2; print (fp, col, "addr = cpu_fetch16 (proc);"); print (fp, col, "%s%s = memory_read%s (proc, addr);", vars[cur_var], operand_size, operand_size); operands += 2; } else if (strncmp (operands, "sp)", 3) == 0) { print (fp, col, "%s%s = cpu_%s_pop_uint%s (proc);", vars[cur_var], operand_size, cpu_type == cpu6811 ? "m68hc11" : "m68hc12", operand_size); operands += 3; } else { fatal_error (opcode, "Unknown operand"); } break; case '[': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); if (addr_set) fatal_error (opcode, "Wrong use of '[', 'addr' already used"); if (strncmp (operands, "]", 1) == 0) { addr_set = 1; current_insn_size += 1; print (fp, col, "addr = cpu_get_indexed_operand_addr (proc,0);"); print (fp, col, "%s%s = memory_read%s (proc, addr);", vars[cur_var], operand_size, operand_size); operands += 1; } else if (strncmp (operands, "]", 1) == 0) { current_insn_size += 1; print (fp, col, "%s%s = cpu_get_indexed_operand%s (proc,0);", vars[cur_var], operand_size, operand_size); operands += 1; } else { fatal_error (opcode, "Unknown operand"); } break; case '{': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); if (addr_set) fatal_error (opcode, "Wrong use of '{', 'addr' already used"); if (strncmp (operands, "}", 1) == 0) { current_insn_size += 1; print (fp, col, "%s%s = cpu_get_indexed_operand%s (proc, 1);", vars[cur_var], operand_size, operand_size); operands += 1; } else { fatal_error (opcode, "Unknown operand"); } break; case 's': if (cur_var >= 2) fatal_error (opcode, "Too many locals"); if (strncmp (operands, "p", 1) == 0) { print (fp, col, "%s16 = cpu_get_sp (proc);", vars[cur_var]); operands++; } else { fatal_error (opcode, "Unknown operands"); } break; case 'c': if (strncmp (operands, "cr", 2) == 0) { print (fp, col, "%s8 = cpu_get_ccr (proc);", vars[cur_var]); operands += 2; } else { fatal_error (opcode, "Unknown operands"); } break; case 'r': if (addr_set && cur_var != 2) fatal_error (opcode, "Wrong use of 'r'"); addr_set = 1; current_insn_size += 1; print (fp, col, "addr = cpu_fetch_relbranch (proc);"); break; case 'R': if (addr_set && cur_var != 2) fatal_error (opcode, "Wrong use of 'R'"); addr_set = 1; current_insn_size += 2; print (fp, col, "addr = cpu_fetch_relbranch16 (proc);"); break; case '#': if (strcmp (operand_size, "8") == 0) { current_insn_size += 1; } else { current_insn_size += 2; } print (fp, col, "%s%s = cpu_fetch%s (proc);", vars[cur_var], operand_size, operand_size); break; case ',': cur_var ++; break; case '-': return addr_set; default: fatal_error (opcode, "Invalid operands"); break; } } return addr_set; } /* Generate the code to save the instruction result. The result is in a local variable: either 'dst8' or 'dst16'. There may be only one result. Instructions with 2 results (ie idiv and fdiv), take care of saving the first value. The operand string is the same as for 'gen_fetch_operands'. Everything before '->' is ignored. If the '->' is not found, it is assumed that there is nothing to save. After '->', the operand string is interpreted as follows: a Save 'dst8' in A register b " B " ccr " CCR " d " 'dst16' D " x " X " y " Y " sp " SP " * 68HC11 page0 memory pointer. (x) 68HC11 indirect access with X register. (y) Same as (x) with Y register. () 68HC11 extended address mode (global variable). For these modes, if they were used as an input operand, the 'addr' variable contains the address of memory where the result must be saved. If they were not used an input operand, 'addr' is computed (as in gen_fetch_operands()), and the result is saved. [] 68HC12 indexed indirect (sp) Push Push the 8/16-bits result on the stack. */ void gen_save_result (FILE *fp, int col, const struct m6811_opcode_def *opcode, int addr_set, const char *operand_size) { char c; const char *operands = opcode->operands; /* When the result is saved, 'result_size' is a string which indicates the size of the saved result ("8" or "16"). This is a sanity check with 'operand_size' to detect inconsistencies in the different tables. */ const char *result_size = 0; if (operands == 0) operands = ""; operands = strchr (operands, '-'); if (operands == 0) return; operands++; if (*operands++ != '>') { fatal_error (opcode, "Invalid operand"); } c = *operands++; switch (c) { case 'a': result_size = "8"; print (fp, col, "cpu_set_a (proc, dst8);"); break; case 'b': result_size = "8"; print (fp, col, "cpu_set_b (proc, dst8);"); break; case 'd': result_size = "16"; print (fp, col, "cpu_set_d (proc, dst16);"); break; case 'x': result_size = "16"; print (fp, col, "cpu_set_x (proc, dst16);"); break; case 'y': result_size = "16"; print (fp, col, "cpu_set_y (proc, dst16);"); break; case '*': if (addr_set == 0) { current_insn_size += 1; print (fp, col, "addr = (uint16) cpu_fetch8 (proc);"); } result_size = operand_size; print (fp, col, "memory_write%s (proc, addr, dst%s);", operand_size, operand_size); break; case '(': if (strncmp (operands, "x)", 2) == 0) { if (addr_set == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_x (proc) + cpu_fetch8 (proc);"); } print (fp, col, "memory_write%s (proc, addr, dst%s);", operand_size, operand_size); operands += 2; result_size = operand_size; } else if (strncmp (operands, "y)", 2) == 0) { if (addr_set == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_y (proc) + cpu_fetch8 (proc);"); } print (fp, col, "memory_write%s (proc, addr, dst%s);", operand_size, operand_size); operands += 2; result_size = operand_size; } else if (strncmp (operands, ")", 1) == 0) { if (addr_set == 0) { current_insn_size += 2; print (fp, col, "addr = cpu_fetch16 (proc);"); } print (fp, col, "memory_write%s (proc, addr, dst%s);", operand_size, operand_size); operands++; result_size = operand_size; } else if (strncmp (operands, "sp)", 3) == 0) { print (fp, col, "cpu_%s_push_uint%s (proc, dst%s);", cpu_type == cpu6811 ? "m68hc11" : "m68hc12", operand_size, operand_size); operands += 3; result_size = operand_size; } else { fatal_error (opcode, "Invalid operand"); } break; case '[': if (strncmp (operands, "]", 1) == 0) { if (addr_set == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_indexed_operand_addr (proc,0);"); } print (fp, col, "memory_write%s (proc, addr, dst%s);", operand_size, operand_size); operands++; result_size = operand_size; } else { fatal_error (opcode, "Invalid operand"); } break; case '{': if (strncmp (operands, "}", 1) == 0) { current_insn_size += 1; print (fp, col, "addr = cpu_get_indexed_operand_addr (proc, 1);"); print (fp, col, "memory_write%s (proc, addr, dst%s);", operand_size, operand_size); operands++; result_size = operand_size; } else { fatal_error (opcode, "Invalid operand"); } break; case 's': if (strncmp (operands, "p", 1) == 0) { print (fp, col, "cpu_set_sp (proc, dst16);"); operands++; result_size = "16"; } else { fatal_error (opcode, "Invalid operand"); } break; case 'c': if (strncmp (operands, "cr", 2) == 0) { print (fp, col, "cpu_set_ccr (proc, dst8);"); operands += 2; result_size = "8"; } else { fatal_error (opcode, "Invalid operand"); } break; default: fatal_error (opcode, "Invalid operand"); break; } if (*operands != 0) fatal_error (opcode, "Garbage at end of operand"); if (result_size == 0) fatal_error (opcode, "? No result seems to be saved"); if (strcmp (result_size, operand_size) != 0) fatal_error (opcode, "Result saved different than pattern size"); } /* Find the instruction pattern for a given instruction. */ const struct m6811_opcode_pattern* find_opcode_pattern (const struct m6811_opcode_def *opcode) { int i; const char *pattern = opcode->insn_pattern; if (pattern == 0) { pattern = opcode->name; } for (i = 0; i < TABLE_SIZE(m6811_opcode_patterns); i++) { if (strcmp (m6811_opcode_patterns[i].name, pattern) == 0) { return &m6811_opcode_patterns[i]; } } fatal_error (opcode, "Unknown instruction pattern"); return 0; } /* Generate the code for interpretation of instruction 'opcode'. */ void gen_interp (FILE *fp, int col, const struct m6811_opcode_def *opcode) { const char *operands = opcode->operands; int addr_set; const char *pattern = opcode->insn_pattern; const struct m6811_opcode_pattern *op; const char *operand_size; if (pattern == 0) { pattern = opcode->name; } /* Find out the size of the operands: 8 or 16-bits. */ if (strcmp(&pattern[strlen(pattern) - 1], "8") == 0) { operand_size = "8"; } else if (strcmp (&pattern[strlen(pattern) - 2], "16") == 0) { operand_size = "16"; } else { operand_size = ""; } if (operands == 0) operands = ""; /* Generate entry point for the instruction. */ print (fp, col, "case 0x%02x: /* %s %s */\n", opcode->insn_code, opcode->name, operands); col += indent_level; /* Generate the code to get the instruction operands. */ addr_set = gen_fetch_operands (fp, col, opcode, operand_size); /* Generate instruction interpretation. */ op = find_opcode_pattern (opcode); if (op->pattern) { print (fp, col, "%s;", op->pattern); } /* Generate the code to save the result. */ gen_save_result (fp, col, opcode, addr_set, operand_size); /* For some instructions, generate the code to update the flags. */ if (op && op->ccr_update) { print (fp, col, "%s;", op->ccr_update); } print (fp, col, "break;"); } /* Generate the interpretor for a given 68HC11 page set. */ void gen_interpreter_for_table (FILE *fp, int col, const struct m6811_opcode_def *table, int size, const char *cycles_table_name) { int i; int init_size; init_size = table == m6811_page1_opcodes || table == m6812_page1_opcodes? 1 : 2; /* Get the opcode and dispatch directly. */ print (fp, col, "op = cpu_fetch8 (proc);"); print (fp, col, "cpu_add_cycles (proc, %s[op]);", cycles_table_name); print (fp, col, "switch (op)\n"); col += indent_level; print (fp, col, "{\n"); for (i = 0; i < size; i++) { /* The table contains duplicate entries (ie, instruction aliases). */ if (i > 0 && table[i].insn_code == table[i - 1].insn_code) continue; current_insn_size = init_size; gen_interp (fp, col, &table[i]); #if 0 if (current_insn_size != table[i].insn_size) { fatal_error (&table[i], "Insn size %ld inconsistent with %ld", current_insn_size, table[i].insn_size); } #endif } print (fp, col, "default:\n"); print (fp, col + indent_level, "cpu_special (proc, M6811_ILLEGAL);"); print (fp, col + indent_level, "break;"); print (fp, col, "}\n"); } /* Generate the table of instruction cycle. These tables are indexed by the opcode number to allow a fast cycle time computation. */ void gen_cycle_table (FILE *fp, const char *name, const struct m6811_opcode_def *table, int size) { int i; char cycles[256]; int page1; page1 = table == m6811_page1_opcodes; /* Build the cycles table. The table is indexed by the opcode. */ memset (cycles, 0, sizeof (cycles)); while (--size >= 0) { if (table->insn_min_cycles > table->insn_max_cycles) fatal_error (table, "Wrong insn cycles"); if (table->insn_max_cycles == _M) cycles[table->insn_code] = table->insn_min_cycles; else cycles[table->insn_code] = table->insn_max_cycles; table++; } /* Some check: for the page1 opcode, the cycle type of the page2/3/4 opcode must be 0. */ if (page1 && (cycles[M6811_OPCODE_PAGE2] != 0 || cycles[M6811_OPCODE_PAGE3] != 0 || cycles[M6811_OPCODE_PAGE4] != 0)) fatal_error (0, "Invalid cycle table"); /* Generates the cycles table. */ print (fp, 0, "static const unsigned char %s[256] = {\n", name); for (i = 0; i < 256; i++) { if ((i % 16) == 0) { print (fp, indent_level, "/* %3d */ ", i); } fprintf (fp, "%2d", cycles[i]); if (i != 255) fprintf (fp, ","); if ((i % 16) != 15) fprintf (fp, " "); else fprintf (fp, "\n"); } print (fp, 0, "};\n\n"); } #define USE_SRC8 1 #define USE_DST8 2 void gen_function_entry (FILE *fp, const char *name, int locals) { /* Generate interpretor entry point. */ print (fp, 0, "%s (proc)\n", name); print (fp, indent_level, "struct _sim_cpu* proc;"); print (fp, indent_level, "{\n"); /* Interpretor local variables. */ print (fp, indent_level, "unsigned char op;"); print (fp, indent_level, "uint16 addr, src16, dst16;"); if (locals & USE_SRC8) print (fp, indent_level, "uint8 src8;\n"); if (locals & USE_DST8) print (fp, indent_level, "uint8 dst8;\n"); } void gen_function_close (FILE *fp) { print (fp, 0, "}\n"); } int cmp_opcode (void* e1, void* e2) { struct m6811_opcode_def* op1 = (struct m6811_opcode_def*) e1; struct m6811_opcode_def* op2 = (struct m6811_opcode_def*) e2; return (int) (op1->insn_code) - (int) (op2->insn_code); } void prepare_table (struct m6811_opcode_def* table, int size) { int i; qsort (table, size, sizeof (table[0]), cmp_opcode); for (i = 1; i < size; i++) { if (table[i].insn_code == table[i-1].insn_code) { fprintf (stderr, "Two insns with code 0x%02x\n", table[i].insn_code); } } } void gen_interpreter (FILE *fp) { int col = 0; prepare_table (m6811_page1_opcodes, TABLE_SIZE (m6811_page1_opcodes)); prepare_table (m6811_page2_opcodes, TABLE_SIZE (m6811_page2_opcodes)); prepare_table (m6811_page3_opcodes, TABLE_SIZE (m6811_page3_opcodes)); prepare_table (m6811_page4_opcodes, TABLE_SIZE (m6811_page4_opcodes)); prepare_table (m6812_page1_opcodes, TABLE_SIZE (m6812_page1_opcodes)); prepare_table (m6812_page2_opcodes, TABLE_SIZE (m6812_page2_opcodes)); /* Generate header of interpretor. */ print (fp, col, "/* File generated automatically by gencode. */\n"); print (fp, col, "#include \"sim-main.h\"\n\n"); if (cpu_type & cpu6811) { gen_cycle_table (fp, "cycles_page1", m6811_page1_opcodes, TABLE_SIZE (m6811_page1_opcodes)); gen_cycle_table (fp, "cycles_page2", m6811_page2_opcodes, TABLE_SIZE (m6811_page2_opcodes)); gen_cycle_table (fp, "cycles_page3", m6811_page3_opcodes, TABLE_SIZE (m6811_page3_opcodes)); gen_cycle_table (fp, "cycles_page4", m6811_page4_opcodes, TABLE_SIZE (m6811_page4_opcodes)); gen_function_entry (fp, "static void\ncpu_page3_interp", 0); gen_interpreter_for_table (fp, indent_level, m6811_page3_opcodes, TABLE_SIZE(m6811_page3_opcodes), "cycles_page3"); gen_function_close (fp); gen_function_entry (fp, "static void\ncpu_page4_interp", 0); gen_interpreter_for_table (fp, indent_level, m6811_page4_opcodes, TABLE_SIZE(m6811_page4_opcodes), "cycles_page4"); gen_function_close (fp); /* Generate the page 2, 3 and 4 handlers. */ gen_function_entry (fp, "static void\ncpu_page2_interp", USE_SRC8 | USE_DST8); gen_interpreter_for_table (fp, indent_level, m6811_page2_opcodes, TABLE_SIZE(m6811_page2_opcodes), "cycles_page2"); gen_function_close (fp); /* Generate the interpretor entry point. */ gen_function_entry (fp, "void\ncpu_interp_m6811", USE_SRC8 | USE_DST8); gen_interpreter_for_table (fp, indent_level, m6811_page1_opcodes, TABLE_SIZE(m6811_page1_opcodes), "cycles_page1"); gen_function_close (fp); } else { gen_cycle_table (fp, "cycles_page1", m6812_page1_opcodes, TABLE_SIZE (m6812_page1_opcodes)); gen_cycle_table (fp, "cycles_page2", m6812_page2_opcodes, TABLE_SIZE (m6812_page2_opcodes)); gen_function_entry (fp, "static void\ncpu_page2_interp", USE_SRC8 | USE_DST8); gen_interpreter_for_table (fp, indent_level, m6812_page2_opcodes, TABLE_SIZE(m6812_page2_opcodes), "cycles_page2"); gen_function_close (fp); /* Generate the interpretor entry point. */ gen_function_entry (fp, "void\ncpu_interp_m6812", USE_SRC8 | USE_DST8); gen_interpreter_for_table (fp, indent_level, m6812_page1_opcodes, TABLE_SIZE(m6812_page1_opcodes), "cycles_page1"); gen_function_close (fp); } } void usage (char* prog) { fprintf (stderr, "Usage: %s {-m6811|-m6812}\n", prog); exit (2); } int main (int argc, char *argv[]) { int i; for (i = 1; i < argc; i++) { if (strcmp (argv[i], "-m6811") == 0) cpu_type = cpu6811; else if (strcmp (argv[i], "-m6812") == 0) cpu_type = cpu6812; else { usage (argv[0]); } } if (cpu_type == 0) usage (argv[0]); gen_interpreter (stdout); if (fclose (stdout) != 0) { fprintf (stderr, "Error while generating the interpreter: %d\n", errno); return 1; } return 0; }
478914.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Copyright (C) 2018, Advanced Micro Devices, Inc. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name(s) of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" #ifdef BLIS_ENABLE_OPENMP // Define a dummy function bli_l3_thread_entry(), which is needed in the // pthreads version, so that when building Windows DLLs (with OpenMP enabled // or no multithreading) we don't risk having an unresolved symbol. void* bli_l3_thread_entry( void* data_void ) { return NULL; } //#define PRINT_THRINFO void bli_l3_thread_decorator ( l3int_t func, opid_t family, const obj_t* alpha, const obj_t* a, const obj_t* b, const obj_t* beta, const obj_t* c, const cntx_t* cntx, rntm_t* rntm, cntl_t* cntl ) { // Query the total number of threads from the rntm_t object. const dim_t n_threads = bli_rntm_num_threads( rntm ); #ifdef PRINT_THRINFO err_t r_val; thrinfo_t** threads = bli_malloc_intl( n_threads * sizeof( thrinfo_t* ), &r_val ); #endif // NOTE: The sba was initialized in bli_init(). // Check out an array_t from the small block allocator. This is done // with an internal lock to ensure only one application thread accesses // the sba at a time. bli_sba_checkout_array() will also automatically // resize the array_t, if necessary. array_t* array = bli_sba_checkout_array( n_threads ); // Access the pool_t* for thread 0 and embed it into the rntm. We do // this up-front only so that we have the rntm_t.sba_pool field // initialized and ready for the global communicator creation below. bli_sba_rntm_set_pool( 0, array, rntm ); // Set the packing block allocator field of the rntm. This will be // inherited by all of the child threads when they make local copies of // the rntm below. bli_pba_rntm_set_pba( rntm ); // Allocate a global communicator for the root thrinfo_t structures. thrcomm_t* gl_comm = bli_thrcomm_create( rntm, n_threads ); _Pragma( "omp parallel num_threads(n_threads)" ) { // Create a thread-local copy of the master thread's rntm_t. This is // necessary since we want each thread to be able to track its own // small block pool_t as it executes down the function stack. rntm_t rntm_l = *rntm; rntm_t* rntm_p = &rntm_l; // Query the thread's id from OpenMP. const dim_t tid = omp_get_thread_num(); // Check for a somewhat obscure OpenMP thread-mistmatch issue. bli_l3_thread_decorator_thread_check( n_threads, tid, gl_comm, rntm_p ); // Use the thread id to access the appropriate pool_t* within the // array_t, and use it to set the sba_pool field within the rntm_t. // If the pool_t* element within the array_t is NULL, it will first // be allocated/initialized. bli_sba_rntm_set_pool( tid, array, rntm_p ); obj_t a_t, b_t, c_t; cntl_t* cntl_use; thrinfo_t* thread; // Alias thread-local copies of A, B, and C. These will be the objects // we pass down the algorithmic function stack. Making thread-local // aliases is highly recommended in case a thread needs to change any // of the properties of an object without affecting other threads' // objects. bli_obj_alias_to( a, &a_t ); bli_obj_alias_to( b, &b_t ); bli_obj_alias_to( c, &c_t ); // This is part of a hack to support mixed domain in bli_gemm_front(). // Sometimes we need to specify a non-standard schema for A and B, and // we decided to transmit them via the schema field in the obj_t's // rather than pass them in as function parameters. Once the values // have been read, we immediately reset them back to their expected // values for unpacked objects. pack_t schema_a = bli_obj_pack_schema( &a_t ); pack_t schema_b = bli_obj_pack_schema( &b_t ); bli_obj_set_pack_schema( BLIS_NOT_PACKED, &a_t ); bli_obj_set_pack_schema( BLIS_NOT_PACKED, &b_t ); // Create a default control tree for the operation, if needed. bli_l3_cntl_create_if( family, schema_a, schema_b, &a_t, &b_t, &c_t, rntm_p, cntl, &cntl_use ); // Create the root node of the current thread's thrinfo_t structure. bli_l3_thrinfo_create_root( tid, gl_comm, rntm_p, cntl_use, &thread ); #if 1 func ( alpha, &a_t, &b_t, beta, &c_t, cntx, rntm_p, cntl_use, thread ); #else bli_thrinfo_grow_tree ( rntm_p, cntl_use, thread ); #endif // Free the thread's local control tree. bli_l3_cntl_free( rntm_p, cntl_use, thread ); #ifdef PRINT_THRINFO threads[tid] = thread; #else // Free the current thread's thrinfo_t structure. bli_l3_thrinfo_free( rntm_p, thread ); #endif } // We shouldn't free the global communicator since it was already freed // by the global communicator's chief thread in bli_l3_thrinfo_free() // (called above). #ifdef PRINT_THRINFO if ( family != BLIS_TRSM ) bli_l3_thrinfo_print_gemm_paths( threads ); else bli_l3_thrinfo_print_trsm_paths( threads ); exit(1); #endif // Check the array_t back into the small block allocator. Similar to the // check-out, this is done using a lock embedded within the sba to ensure // mutual exclusion. bli_sba_checkin_array( array ); } // ----------------------------------------------------------------------------- void bli_l3_thread_decorator_thread_check ( dim_t n_threads, dim_t tid, thrcomm_t* gl_comm, rntm_t* rntm ) { dim_t n_threads_real = omp_get_num_threads(); // Check if the number of OpenMP threads created within this parallel // region is different from the number of threads that were requested // of BLIS. This inequality may trigger when, for example, the // following conditions are satisfied: // - an application is executing an OpenMP parallel region in which // BLIS is invoked, // - BLIS is configured for multithreading via OpenMP, // - OMP_NUM_THREADS = t > 1, // - the number of threads requested of BLIS (regardless of method) // is p <= t, // - OpenMP nesting is disabled. // In this situation, the application spawns t threads. Each application // thread calls gemm (for example). Each gemm will attempt to spawn p // threads via OpenMP. However, since nesting is disabled, the OpenMP // implementation finds that t >= p threads are already spawned, and // thus it doesn't spawn *any* additional threads for each gemm. if ( n_threads_real != n_threads ) { // If the number of threads active in the current region is not // equal to the number requested of BLIS, we then only continue // if the number of threads in the current region is 1. If, for // example, BLIS requested 4 threads but only got 3, then we // abort(). //if ( tid == 0 ) //{ if ( n_threads_real != 1 ) { bli_print_msg( "A different number of threads was " "created than was requested.", __FILE__, __LINE__ ); bli_abort(); } //n_threads = 1; // not needed since it has no effect? bli_thrcomm_init( 1, gl_comm ); bli_rntm_set_num_threads_only( 1, rntm ); bli_rntm_set_ways_only( 1, 1, 1, 1, 1, rntm ); //} // Synchronize all threads and continue. _Pragma( "omp barrier" ) } } #endif
350959.c
/*- * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <sys/cdefs.h> __FBSDID("$FreeBSD: releng/9.3/sys/dev/sfxge/common/efx_filter.c 228100 2011-11-28 20:28:23Z philip $"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_FILTER /* "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want. */ #define FILTER_CTL_SRCH_FUDGE_WILD 3 #define FILTER_CTL_SRCH_FUDGE_FULL 1 /* Hard maximum hop limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in efx_filter_search() when the * table is full. */ #define FILTER_CTL_SRCH_MAX 200 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. */ static uint16_t efx_filter_tbl_hash( __in uint32_t key) { uint16_t tmp; /* First 16 rounds */ tmp = 0x1fff ^ (uint16_t)(key >> 16); tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; /* Last 16 rounds */ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff); tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; return (tmp); } /* To allow for hash collisions, filter search continues at these * increments from the first possible entry selected by the hash. */ static uint16_t efx_filter_tbl_increment( __in uint32_t key) { return ((uint16_t)(key * 2 - 1)); } static __checkReturn boolean_t efx_filter_test_used( __in efx_filter_tbl_t *eftp, __in unsigned int index) { EFSYS_ASSERT3P(eftp->eft_bitmap, !=, NULL); return ((eftp->eft_bitmap[index / 32] & (1 << (index % 32))) != 0); } static void efx_filter_set_used( __in efx_filter_tbl_t *eftp, __in unsigned int index) { EFSYS_ASSERT3P(eftp->eft_bitmap, !=, NULL); eftp->eft_bitmap[index / 32] |= (1 << (index % 32)); ++eftp->eft_used; } static void efx_filter_clear_used( __in efx_filter_tbl_t *eftp, __in unsigned int index) { EFSYS_ASSERT3P(eftp->eft_bitmap, !=, NULL); eftp->eft_bitmap[index / 32] &= ~(1 << (index % 32)); --eftp->eft_used; EFSYS_ASSERT3U(eftp->eft_used, >=, 0); } static efx_filter_tbl_id_t efx_filter_tbl_id( __in efx_filter_type_t type) { efx_filter_tbl_id_t tbl_id; switch (type) { case EFX_FILTER_RX_TCP_FULL: case EFX_FILTER_RX_TCP_WILD: case EFX_FILTER_RX_UDP_FULL: case EFX_FILTER_RX_UDP_WILD: tbl_id = EFX_FILTER_TBL_RX_IP; break; #if EFSYS_OPT_SIENA case EFX_FILTER_RX_MAC_FULL: case EFX_FILTER_RX_MAC_WILD: tbl_id = EFX_FILTER_TBL_RX_MAC; break; case EFX_FILTER_TX_TCP_FULL: case EFX_FILTER_TX_TCP_WILD: case EFX_FILTER_TX_UDP_FULL: case EFX_FILTER_TX_UDP_WILD: tbl_id = EFX_FILTER_TBL_TX_IP; break; case EFX_FILTER_TX_MAC_FULL: case EFX_FILTER_TX_MAC_WILD: tbl_id = EFX_FILTER_TBL_RX_MAC; break; #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); break; } return (tbl_id); } static void efx_filter_reset_search_depth( __inout efx_filter_t *efp, __in efx_filter_tbl_id_t tbl_id) { switch (tbl_id) { case EFX_FILTER_TBL_RX_IP: efp->ef_depth[EFX_FILTER_RX_TCP_FULL] = 0; efp->ef_depth[EFX_FILTER_RX_TCP_WILD] = 0; efp->ef_depth[EFX_FILTER_RX_UDP_FULL] = 0; efp->ef_depth[EFX_FILTER_RX_UDP_WILD] = 0; break; #if EFSYS_OPT_SIENA case EFX_FILTER_TBL_RX_MAC: efp->ef_depth[EFX_FILTER_RX_MAC_FULL] = 0; efp->ef_depth[EFX_FILTER_RX_MAC_WILD] = 0; break; case EFX_FILTER_TBL_TX_IP: efp->ef_depth[EFX_FILTER_TX_TCP_FULL] = 0; efp->ef_depth[EFX_FILTER_TX_TCP_WILD] = 0; efp->ef_depth[EFX_FILTER_TX_UDP_FULL] = 0; efp->ef_depth[EFX_FILTER_TX_UDP_WILD] = 0; break; case EFX_FILTER_TBL_TX_MAC: efp->ef_depth[EFX_FILTER_TX_MAC_FULL] = 0; efp->ef_depth[EFX_FILTER_TX_MAC_WILD] = 0; break; #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); break; } } static void efx_filter_push_rx_limits( __in efx_nic_t *enp) { efx_filter_t *efp = &enp->en_filter; efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT, efp->ef_depth[EFX_FILTER_RX_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT, efp->ef_depth[EFX_FILTER_RX_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT, efp->ef_depth[EFX_FILTER_RX_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT, efp->ef_depth[EFX_FILTER_RX_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); #if EFSYS_OPT_SIENA if (efp->ef_tbl[EFX_FILTER_TBL_RX_MAC].eft_size) { EFX_SET_OWORD_FIELD(oword, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, efp->ef_depth[EFX_FILTER_RX_MAC_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, efp->ef_depth[EFX_FILTER_RX_MAC_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } #endif /* EFSYS_OPT_SIENA */ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); } static void efx_filter_push_tx_limits( __in efx_nic_t *enp) { efx_filter_t *efp = &enp->en_filter; efx_oword_t oword; if (efp->ef_tbl[EFX_FILTER_TBL_TX_IP].eft_size == 0) return; EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE, efp->ef_depth[EFX_FILTER_TX_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE, efp->ef_depth[EFX_FILTER_TX_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE, efp->ef_depth[EFX_FILTER_TX_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE, efp->ef_depth[EFX_FILTER_TX_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); } /* Build a filter entry and return its n-tuple key. */ static __checkReturn uint32_t efx_filter_build( __out efx_oword_t *filter, __in efx_filter_spec_t *spec) { uint32_t dword3; uint32_t key; uint8_t type = spec->efs_type; uint8_t flags = spec->efs_flags; switch (efx_filter_tbl_id(type)) { case EFX_FILTER_TBL_RX_IP: { boolean_t is_udp = (type == EFX_FILTER_RX_UDP_FULL || type == EFX_FILTER_RX_UDP_WILD); EFX_POPULATE_OWORD_7(*filter, FRF_BZ_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, FRF_BZ_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, FRF_AZ_TCP_UDP, is_udp, FRF_AZ_RXQ_ID, spec->efs_dmaq_id, EFX_DWORD_2, spec->efs_dword[2], EFX_DWORD_1, spec->efs_dword[1], EFX_DWORD_0, spec->efs_dword[0]); dword3 = is_udp; break; } #if EFSYS_OPT_SIENA case EFX_FILTER_TBL_RX_MAC: { boolean_t is_wild = (type == EFX_FILTER_RX_MAC_WILD); EFX_POPULATE_OWORD_8(*filter, FRF_CZ_RMFT_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, FRF_CZ_RMFT_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, FRF_CZ_RMFT_IP_OVERRIDE, (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ? 1 : 0, FRF_CZ_RMFT_RXQ_ID, spec->efs_dmaq_id, FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, FRF_CZ_RMFT_DEST_MAC_DW1, spec->efs_dword[2], FRF_CZ_RMFT_DEST_MAC_DW0, spec->efs_dword[1], FRF_CZ_RMFT_VLAN_ID, spec->efs_dword[0]); dword3 = is_wild; break; } #endif /* EFSYS_OPT_SIENA */ case EFX_FILTER_TBL_TX_IP: { boolean_t is_udp = (type == EFX_FILTER_TX_UDP_FULL || type == EFX_FILTER_TX_UDP_WILD); EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TIFT_TCP_UDP, is_udp, FRF_CZ_TIFT_TXQ_ID, spec->efs_dmaq_id, EFX_DWORD_2, spec->efs_dword[2], EFX_DWORD_1, spec->efs_dword[1], EFX_DWORD_0, spec->efs_dword[0]); dword3 = is_udp | spec->efs_dmaq_id << 1; break; } #if EFSYS_OPT_SIENA case EFX_FILTER_TBL_TX_MAC: { boolean_t is_wild = (type == EFX_FILTER_TX_MAC_WILD); EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TMFT_TXQ_ID, spec->efs_dmaq_id, FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, FRF_CZ_TMFT_SRC_MAC_DW1, spec->efs_dword[2], FRF_CZ_TMFT_SRC_MAC_DW0, spec->efs_dword[1], FRF_CZ_TMFT_VLAN_ID, spec->efs_dword[0]); dword3 = is_wild | spec->efs_dmaq_id << 1; break; } #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); } key = spec->efs_dword[0] ^ spec->efs_dword[1] ^ spec->efs_dword[2] ^ dword3; return (key); } static __checkReturn int efx_filter_push_entry( __inout efx_nic_t *enp, __in efx_filter_type_t type, __in int index, __in efx_oword_t *eop) { int rc; switch (type) { case EFX_FILTER_RX_TCP_FULL: case EFX_FILTER_RX_TCP_WILD: case EFX_FILTER_RX_UDP_FULL: case EFX_FILTER_RX_UDP_WILD: EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index, eop); break; #if EFSYS_OPT_SIENA case EFX_FILTER_RX_MAC_FULL: case EFX_FILTER_RX_MAC_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index, eop); break; case EFX_FILTER_TX_TCP_FULL: case EFX_FILTER_TX_TCP_WILD: case EFX_FILTER_TX_UDP_FULL: case EFX_FILTER_TX_UDP_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index, eop); break; case EFX_FILTER_TX_MAC_FULL: case EFX_FILTER_TX_MAC_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index, eop); break; #endif /* EFSYS_OPT_SIENA */ default: rc = ENOTSUP; goto fail1; } return (0); fail1: return (rc); } static __checkReturn boolean_t efx_filter_equal( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { efx_filter_tbl_id_t tbl_id = efx_filter_tbl_id(left->efs_type); if (left->efs_type != right->efs_type) return (B_FALSE); if (memcmp(left->efs_dword, right->efs_dword, sizeof(left->efs_dword))) return (B_FALSE); if ((tbl_id == EFX_FILTER_TBL_TX_IP || tbl_id == EFX_FILTER_TBL_TX_MAC) && left->efs_dmaq_id != right->efs_dmaq_id) return (B_FALSE); return (B_TRUE); } static __checkReturn int efx_filter_search( __in efx_filter_tbl_t *eftp, __in efx_filter_spec_t *spec, __in uint32_t key, __in boolean_t for_insert, __out int *filter_index, __out int *depth_required) { unsigned hash, incr, filter_idx, depth; hash = efx_filter_tbl_hash(key); incr = efx_filter_tbl_increment(key); filter_idx = hash & (eftp->eft_size - 1); depth = 1; for (;;) { /* Return success if entry is used and matches this spec * or entry is unused and we are trying to insert. */ if (efx_filter_test_used(eftp, filter_idx) ? efx_filter_equal(spec, &eftp->eft_spec[filter_idx]) : for_insert) { *filter_index = filter_idx; *depth_required = depth; return (0); } /* Return failure if we reached the maximum search depth */ if (depth == FILTER_CTL_SRCH_MAX) return for_insert ? EBUSY : ENOENT; filter_idx = (filter_idx + incr) & (eftp->eft_size - 1); ++depth; } } __checkReturn int efx_filter_insert_filter( __in efx_nic_t *enp, __in efx_filter_spec_t *spec, __in boolean_t replace) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_id_t tbl_id = efx_filter_tbl_id(spec->efs_type); efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id]; efx_filter_spec_t *saved_spec; efx_oword_t filter; int filter_idx; unsigned int depth; int state; uint32_t key; int rc; if (eftp->eft_size == 0) return (EINVAL); key = efx_filter_build(&filter, spec); EFSYS_LOCK(enp->en_eslp, state); rc = efx_filter_search(eftp, spec, key, B_TRUE, &filter_idx, &depth); if (rc != 0) goto done; EFSYS_ASSERT3U(filter_idx, <, eftp->eft_size); saved_spec = &eftp->eft_spec[filter_idx]; if (efx_filter_test_used(eftp, filter_idx)) { if (replace == B_FALSE) { rc = EEXIST; goto done; } } efx_filter_set_used(eftp, filter_idx); *saved_spec = *spec; if (efp->ef_depth[spec->efs_type] < depth) { efp->ef_depth[spec->efs_type] = depth; if (tbl_id == EFX_FILTER_TBL_TX_IP || tbl_id == EFX_FILTER_TBL_TX_MAC) efx_filter_push_tx_limits(enp); else efx_filter_push_rx_limits(enp); } efx_filter_push_entry(enp, spec->efs_type, filter_idx, &filter); done: EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } static void efx_filter_clear_entry( __in efx_nic_t *enp, __in efx_filter_tbl_t *eftp, __in int index) { efx_oword_t filter; if (efx_filter_test_used(eftp, index)) { efx_filter_clear_used(eftp, index); EFX_ZERO_OWORD(filter); efx_filter_push_entry(enp, eftp->eft_spec[index].efs_type, index, &filter); memset(&eftp->eft_spec[index], 0, sizeof(eftp->eft_spec[0])); } } __checkReturn int efx_filter_remove_filter( __in efx_nic_t *enp, __in efx_filter_spec_t *spec) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_id_t tbl_id = efx_filter_tbl_id(spec->efs_type); efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id]; efx_filter_spec_t *saved_spec; efx_oword_t filter; int filter_idx, depth; int state; uint32_t key; int rc; key = efx_filter_build(&filter, spec); EFSYS_LOCK(enp->en_eslp, state); rc = efx_filter_search(eftp, spec, key, B_FALSE, &filter_idx, &depth); if (rc != 0) goto out; saved_spec = &eftp->eft_spec[filter_idx]; efx_filter_clear_entry(enp, eftp, filter_idx); if (eftp->eft_used == 0) efx_filter_reset_search_depth(efp, tbl_id); rc = 0; out: EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } void efx_filter_remove_index( __inout efx_nic_t *enp, __in efx_filter_type_t type, __in int index) { efx_filter_t *efp = &enp->en_filter; enum efx_filter_tbl_id tbl_id = efx_filter_tbl_id(type); efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id]; int state; if (index < 0) return; EFSYS_LOCK(enp->en_eslp, state); efx_filter_clear_entry(enp, eftp, index); if (eftp->eft_used == 0) efx_filter_reset_search_depth(efp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } void efx_filter_tbl_clear( __inout efx_nic_t *enp, __in efx_filter_tbl_id_t tbl_id) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id]; int index; int state; EFSYS_LOCK(enp->en_eslp, state); for (index = 0; index < eftp->eft_size; ++index) { efx_filter_clear_entry(enp, eftp, index); } if (eftp->eft_used == 0) efx_filter_reset_search_depth(efp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } /* Restore filter state after a reset */ void efx_filter_restore( __in efx_nic_t *enp) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_id_t tbl_id; efx_filter_tbl_t *eftp; efx_filter_spec_t *spec; efx_oword_t filter; int filter_idx; int state; EFSYS_LOCK(enp->en_eslp, state); for (tbl_id = 0; tbl_id < EFX_FILTER_NTBLS; tbl_id++) { eftp = &efp->ef_tbl[tbl_id]; for (filter_idx = 0; filter_idx < eftp->eft_size; filter_idx++) { if (!efx_filter_test_used(eftp, filter_idx)) continue; spec = &eftp->eft_spec[filter_idx]; efx_filter_build(&filter, spec); efx_filter_push_entry(enp, spec->efs_type, filter_idx, &filter); } } efx_filter_push_rx_limits(enp); efx_filter_push_tx_limits(enp); EFSYS_UNLOCK(enp->en_eslp, state); } void efx_filter_redirect_index( __inout efx_nic_t *enp, __in efx_filter_type_t type, __in int filter_index, __in int rxq_index) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_t *eftp = &efp->ef_tbl[efx_filter_tbl_id(type)]; efx_filter_spec_t *spec; efx_oword_t filter; int state; EFSYS_LOCK(enp->en_eslp, state); spec = &eftp->eft_spec[filter_index]; spec->efs_dmaq_id = (uint16_t)rxq_index; efx_filter_build(&filter, spec); efx_filter_push_entry(enp, spec->efs_type, filter_index, &filter); EFSYS_UNLOCK(enp->en_eslp, state); } __checkReturn int efx_filter_init( __in efx_nic_t *enp) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_t *eftp; int tbl_id; int rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER)); switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: eftp = &efp->ef_tbl[EFX_FILTER_TBL_RX_IP]; eftp->eft_size = FR_AZ_RX_FILTER_TBL0_ROWS; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: eftp = &efp->ef_tbl[EFX_FILTER_TBL_RX_IP]; eftp->eft_size = FR_AZ_RX_FILTER_TBL0_ROWS; eftp = &efp->ef_tbl[EFX_FILTER_TBL_RX_MAC]; eftp->eft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; eftp = &efp->ef_tbl[EFX_FILTER_TBL_TX_IP]; eftp->eft_size = FR_CZ_TX_FILTER_TBL0_ROWS; eftp = &efp->ef_tbl[EFX_FILTER_TBL_TX_MAC]; eftp->eft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; break; #endif /* EFSYS_OPT_SIENA */ default: rc = ENOTSUP; goto fail1; } for (tbl_id = 0; tbl_id < EFX_FILTER_NTBLS; tbl_id++) { unsigned int bitmap_size; eftp = &efp->ef_tbl[tbl_id]; if (eftp->eft_size == 0) continue; EFX_STATIC_ASSERT(sizeof(eftp->eft_bitmap[0]) == sizeof(uint32_t)); bitmap_size = (eftp->eft_size + (sizeof(uint32_t) * 8) - 1) / 8; EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, eftp->eft_bitmap); if (!eftp->eft_bitmap) { rc = ENOMEM; goto fail2; } EFSYS_KMEM_ALLOC(enp->en_esip, eftp->eft_size * sizeof(*eftp->eft_spec), eftp->eft_spec); if (!eftp->eft_spec) { rc = ENOMEM; goto fail2; } memset(eftp->eft_spec, 0, eftp->eft_size * sizeof(*eftp->eft_spec)); } enp->en_mod_flags |= EFX_MOD_FILTER; return (0); fail2: EFSYS_PROBE(fail2); efx_filter_fini(enp); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } void efx_filter_fini( __in efx_nic_t *enp) { efx_filter_t *efp = &enp->en_filter; efx_filter_tbl_id_t tbl_id; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); for (tbl_id = 0; tbl_id < EFX_FILTER_NTBLS; tbl_id++) { efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id]; unsigned int bitmap_size; EFX_STATIC_ASSERT(sizeof(eftp->eft_bitmap[0]) == sizeof(uint32_t)); bitmap_size = (eftp->eft_size + (sizeof(uint32_t) * 8) - 1) / 8; EFSYS_KMEM_FREE(enp->en_esip, bitmap_size, eftp->eft_bitmap); eftp->eft_bitmap = NULL; EFSYS_KMEM_FREE(enp->en_esip, eftp->eft_size * sizeof(*eftp->eft_spec), eftp->eft_spec); eftp->eft_spec = NULL; } enp->en_mod_flags &= ~EFX_MOD_FILTER; } extern void efx_filter_spec_rx_ipv4_tcp_full( __inout efx_filter_spec_t *spec, __in unsigned int flags, __in uint32_t src_ip, __in uint16_t src_tcp, __in uint32_t dest_ip, __in uint16_t dest_tcp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER)) == 0); spec->efs_type = EFX_FILTER_RX_TCP_FULL; spec->efs_flags = (uint8_t)flags; spec->efs_dword[0] = src_tcp | src_ip << 16; spec->efs_dword[1] = dest_tcp << 16 | src_ip >> 16; spec->efs_dword[2] = dest_ip; } extern void efx_filter_spec_rx_ipv4_tcp_wild( __inout efx_filter_spec_t *spec, __in unsigned int flags, __in uint32_t dest_ip, __in uint16_t dest_tcp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER)) == 0); spec->efs_type = EFX_FILTER_RX_TCP_WILD; spec->efs_flags = (uint8_t)flags; spec->efs_dword[0] = 0; spec->efs_dword[1] = dest_tcp << 16; spec->efs_dword[2] = dest_ip; } extern void efx_filter_spec_rx_ipv4_udp_full( __inout efx_filter_spec_t *spec, __in unsigned int flags, __in uint32_t src_ip, __in uint16_t src_udp, __in uint32_t dest_ip, __in uint16_t dest_udp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER)) == 0); spec->efs_type = EFX_FILTER_RX_UDP_FULL; spec->efs_flags = (uint8_t)flags; spec->efs_dword[0] = src_udp | src_ip << 16; spec->efs_dword[1] = dest_udp << 16 | src_ip >> 16; spec->efs_dword[2] = dest_ip; } extern void efx_filter_spec_rx_ipv4_udp_wild( __inout efx_filter_spec_t *spec, __in unsigned int flags, __in uint32_t dest_ip, __in uint16_t dest_udp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER)) == 0); spec->efs_type = EFX_FILTER_RX_UDP_WILD; spec->efs_flags = (uint8_t)flags; spec->efs_dword[0] = dest_udp; spec->efs_dword[1] = 0; spec->efs_dword[2] = dest_ip; } #if EFSYS_OPT_SIENA extern void efx_filter_spec_rx_mac_full( __inout efx_filter_spec_t *spec, __in unsigned int flags, __in uint16_t vlan_id, __in uint8_t *dest_mac) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(dest_mac, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER | EFX_FILTER_FLAG_RX_OVERRIDE_IP)) == 0); spec->efs_type = EFX_FILTER_RX_MAC_FULL; spec->efs_flags = (uint8_t)flags; spec->efs_dword[0] = vlan_id; spec->efs_dword[1] = dest_mac[2] << 24 | dest_mac[3] << 16 | dest_mac[4] << 8 | dest_mac[5]; spec->efs_dword[2] = dest_mac[0] << 8 | dest_mac[1]; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_rx_mac_wild( __inout efx_filter_spec_t *spec, __in unsigned int flags, __in uint8_t *dest_mac) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(dest_mac, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER | EFX_FILTER_FLAG_RX_OVERRIDE_IP)) == 0); spec->efs_type = EFX_FILTER_RX_MAC_WILD; spec->efs_flags = (uint8_t)flags; spec->efs_dword[0] = 0; spec->efs_dword[1] = dest_mac[2] << 24 | dest_mac[3] << 16 | dest_mac[4] << 8 | dest_mac[5]; spec->efs_dword[2] = dest_mac[0] << 8 | dest_mac[1]; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_tx_ipv4_tcp_full( __inout efx_filter_spec_t *spec, __in uint32_t src_ip, __in uint16_t src_tcp, __in uint32_t dest_ip, __in uint16_t dest_tcp) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_type = EFX_FILTER_TX_TCP_FULL; spec->efs_flags = 0; spec->efs_dword[0] = src_tcp | src_ip << 16; spec->efs_dword[1] = dest_tcp << 16 | src_ip >> 16; spec->efs_dword[2] = dest_ip; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_tx_ipv4_tcp_wild( __inout efx_filter_spec_t *spec, __in uint32_t src_ip, __in uint16_t src_tcp) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_type = EFX_FILTER_TX_TCP_WILD; spec->efs_flags = 0; spec->efs_dword[0] = 0; spec->efs_dword[1] = src_tcp << 16; spec->efs_dword[2] = src_ip; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_tx_ipv4_udp_full( __inout efx_filter_spec_t *spec, __in uint32_t src_ip, __in uint16_t src_udp, __in uint32_t dest_ip, __in uint16_t dest_udp) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_type = EFX_FILTER_TX_UDP_FULL; spec->efs_flags = 0; spec->efs_dword[0] = src_udp | src_ip << 16; spec->efs_dword[1] = dest_udp << 16 | src_ip >> 16; spec->efs_dword[2] = dest_ip; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_tx_ipv4_udp_wild( __inout efx_filter_spec_t *spec, __in uint32_t src_ip, __in uint16_t src_udp) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_type = EFX_FILTER_TX_UDP_WILD; spec->efs_flags = 0; spec->efs_dword[0] = src_udp; spec->efs_dword[1] = 0; spec->efs_dword[2] = src_ip; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_tx_mac_full( __inout efx_filter_spec_t *spec, __in uint16_t vlan_id, __in uint8_t *src_mac) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(src_mac, !=, NULL); spec->efs_type = EFX_FILTER_TX_MAC_FULL; spec->efs_flags = 0; spec->efs_dword[0] = vlan_id; spec->efs_dword[1] = src_mac[2] << 24 | src_mac[3] << 16 | src_mac[4] << 8 | src_mac[5]; spec->efs_dword[2] = src_mac[0] << 8 | src_mac[1]; } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA extern void efx_filter_spec_tx_mac_wild( __inout efx_filter_spec_t *spec, __in uint8_t *src_mac) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(src_mac, !=, NULL); spec->efs_type = EFX_FILTER_TX_MAC_WILD; spec->efs_flags = 0; spec->efs_dword[0] = 0; spec->efs_dword[1] = src_mac[2] << 24 | src_mac[3] << 16 | src_mac[4] << 8 | src_mac[5]; spec->efs_dword[2] = src_mac[0] << 8 | src_mac[1]; } #endif /* EFSYS_OPT_SIENA */ #endif /* EFSYS_OPT_FILTER */
965268.c
/* * Various input/output functions * * @(#)io.c 4.32 (Berkeley) 02/05/99 */ #include <stdarg.h> #include "i_curses.h" #include <ctype.h> #include <string.h> #include "rogue.h" /* * msg: * Display a message at the top of the screen. */ #define MAXMSG (NUMCOLS - sizeof "--More--") static char msgbuf[2 * MAXMSG + 1]; static int newpos = 0; /* VARARGS1 */ int msg(char *fmt, ...) { va_list args; /* * if the string is "", just clear the line */ if (*fmt == '\0') { move(0, 0); clrtoeol(); /* * need to refresh, otherwise if we do inventory or options * the first line won't show. */ refresh(); mpos = 0; return ~ESCAPE; } /* * otherwise add to the message and flush it out */ va_start(args, fmt); doadd(fmt, args); va_end(args); return endmsg(); } /* * addmsg: * Add things to the current message */ /* VARARGS1 */ void addmsg(char *fmt, ...) { va_list args; va_start(args, fmt); doadd(fmt, args); va_end(args); } /* * endmsg: * Display a new msg (giving him a chance to see the previous one * if it is up there with the --More--) */ int endmsg() { char ch; if (save_msg) strcpy(huh, msgbuf); // if (mpos) // { // look(FALSE); // mvaddstr(0, mpos, "--More--"); // refresh(); // if (!msg_esc) // wait_for(' '); // else // { // while ((ch = readchar()) != ' ') // if (ch == ESCAPE) // { // msgbuf[0] = '\0'; // mpos = 0; // newpos = 0; // msgbuf[0] = '\0'; // return ESCAPE; // } // } // } /* * All messages should start with uppercase, except ones that * start with a pack addressing character */ if (islower(msgbuf[0]) && !lower_msg && msgbuf[1] != ')') msgbuf[0] = (char)toupper(msgbuf[0]); mvaddstr(0, 0, msgbuf); clrtoeol(); mpos = newpos; newpos = 0; msgbuf[0] = '\0'; refresh(); return ~ESCAPE; } /* * doadd: * Perform an add onto the message buffer */ void doadd(char *fmt, va_list args) { static char buf[MAXSTR]; /* * Do the printf into buf */ vsprintf(buf, fmt, args); if (strlen(buf) + newpos >= MAXMSG) endmsg(); strcat(msgbuf, buf); newpos = (int)strlen(msgbuf); } /* * step_ok: * Returns true if it is ok to step on ch */ int step_ok(int ch) { switch (ch) { case ' ': case '|': case '-': return FALSE; default: return (!isalpha(ch)); } } /* * readchar: * Reads and returns a character, checking for gross input errors */ char readchar() { char ch; return '.'; // ch = (char) md_readchar(); // if (ch == 3) // { // quit(0); // return(27); // } // return(ch); } /* * status: * Display the important stats line. Keep the cursor where it was. */ void status() { register int oy, ox, temp; static int hpwidth = 0; static int s_hungry = 0; static int s_lvl = 0; static int s_pur = -1; static int s_hp = 0; static int s_arm = 0; static str_t s_str = 0; static int s_exp = 0; static char *state_name[] = { "", "Hungry", "Weak", "Faint"}; /* * If nothing has changed since the last status, don't * bother. */ temp = (cur_armor != NULL ? cur_armor->o_arm : pstats.s_arm); if (s_hp == pstats.s_hpt && s_exp == pstats.s_exp && s_pur == purse && s_arm == temp && s_str == pstats.s_str && s_lvl == level && s_hungry == hungry_state && !stat_msg) return; s_arm = temp; getyx(stdscr, oy, ox); if (s_hp != max_hp) { temp = max_hp; s_hp = max_hp; for (hpwidth = 0; temp; hpwidth++) temp /= 10; } /* * Save current status */ s_lvl = level; s_pur = purse; s_hp = pstats.s_hpt; s_str = pstats.s_str; s_exp = pstats.s_exp; s_hungry = hungry_state; if (stat_msg) { move(0, 0); msg("Level: %d Gold: %-5d Hp: %*d(%*d) Str: %2d(%d) Arm: %-2d Exp: %d/%ld %c%c %s", level, purse, hpwidth, pstats.s_hpt, hpwidth, max_hp, pstats.s_str, max_stats.s_str, 10 - s_arm, pstats.s_lvl, pstats.s_exp, (rookie_mode ? 'r' : 'w'), rogue_version, state_name[hungry_state]); } else { move(STATLINE, 0); printw("Level: %d Gold: %-5d Hp: %*d(%*d) Str: %2d(%d) Arm: %-2d Exp: %d/%d %c%c %s", level, purse, hpwidth, pstats.s_hpt, hpwidth, max_hp, pstats.s_str, max_stats.s_str, 10 - s_arm, pstats.s_lvl, pstats.s_exp, (rookie_mode ? 'r' : 'w'), rogue_version, state_name[hungry_state]); } clrtoeol(); move(oy, ox); } /* * wait_for * Sit around until the guy types the right key */ void wait_for(int ch) { register char c; if (ch == '\n') while ((c = readchar()) != '\n' && c != '\r') continue; else while (readchar() != ch) continue; } /* * fatal: * Exit the program, printing a message. */ /* VARARGS */ void fatal(char *fmt, ...) { va_list args; static char fatalbuf[MAXSTR]; /* fake zero coords for show_win */ strcpy(fatalbuf, "GAME ERROR: "); va_start(args, fmt); vsprintf(&fatalbuf[strlen(fatalbuf)], fmt, args); va_end(args); strcat(&fatalbuf[strlen(fatalbuf)], " --Quit--"); hero.y = 0; hero.x = strlen(fatalbuf); show_win(fatalbuf); endwin(); my_exit(3); } /* * show_win: * Function used to display a window and wait before returning */ void show_win(char *message) { // WINDOW *win; // win = hw; wmove(win, 0, 0); waddstr(win, message); touchwin(win); wmove(win, hero.y, hero.x); wrefresh(win); wait_for(' '); clearok(curscr, TRUE); touchwin(stdscr); }
49415.c
/* * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2020 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2015 Research Organization for Information Science * and Technology (RIST). All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #include "ompi_config.h" #include "ompi/mpi/c/bindings.h" #include "ompi/runtime/params.h" #include "ompi/communicator/communicator.h" #include "ompi/errhandler/errhandler.h" #include "ompi/info/info.h" #if OMPI_BUILD_MPI_PROFILING #if OPAL_HAVE_WEAK_SYMBOLS #pragma weak MPI_Info_free = PMPI_Info_free #endif #define MPI_Info_free PMPI_Info_free #endif static const char FUNC_NAME[] = "MPI_Info_free"; /** * MPI_Info_free - Free an 'MPI_Info' object. * * @param info pointer to info object to be freed (handle) * * @retval MPI_SUCCESS * @retval MPI_ERR_INFO * * Upon successful completion, 'info' will be set to 'MPI_INFO_NULL'. */ int MPI_Info_free(MPI_Info *info) { int err; /* * Free all the alloced items from MPI_Info info. * Make sure the items are freed in an orderly * fashion so that there are no dangling pointers. */ if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == info || MPI_INFO_NULL == *info || ompi_info_is_freed(*info)) { return OMPI_ERRHANDLER_NOHANDLE_INVOKE(MPI_ERR_INFO, FUNC_NAME); } } OPAL_CR_ENTER_LIBRARY(); err = ompi_info_free(info); OMPI_ERRHANDLER_NOHANDLE_RETURN(err, err, FUNC_NAME); }
216428.c
/* * standby driver for allwinnertech * * Copyright (C) 2015 allwinnertech Ltd. * Author: Ming Li <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "standby.h" #include "main.h" static struct aw_pm_info pm_info; static extended_standby_t extended_standby_para_info; static struct pll_factor_t orig_pll; static struct pll_factor_t local_pll; static struct standby_clk_div_t clk_div; static struct standby_clk_div_t tmp_clk_div; static int dram_enter_selfresh(extended_standby_t *para); static int dram_exit_selfresh(void); static int cpu_enter_lowfreq(void); static int cpu_freq_resume(void); static int bus_enter_lowfreq(extended_standby_t *para); static int bus_freq_resume(extended_standby_t *para); static void query_wakeup_source(struct aw_pm_info *arg); int standby_main(struct aw_pm_info *arg) { save_mem_status(STANDBY_START | 0X01); /* copy standby parameter from dram */ standby_memcpy(&pm_info, arg, sizeof(pm_info)); /* copy extended standby info */ if(0 != pm_info.standby_para.pextended_standby) { standby_memcpy(&extended_standby_para_info, (void *)(pm_info.standby_para.pextended_standby), sizeof(extended_standby_para_info)); } mem_clk_init(1); /* init uart for print */ if(unlikely(pm_info.standby_para.debug_mask&PM_STANDBY_PRINT_STANDBY)){ serial_init_manager(); } save_mem_status(STANDBY_START | 0X02); /* enable dram enter into self-refresh */ dram_enter_selfresh(&extended_standby_para_info); save_mem_status(STANDBY_START | 0X03); /* cpu reduce frequency */ cpu_enter_lowfreq(); save_mem_status(STANDBY_START | 0X04); /* power domain suspend */ #ifdef CONFIG_AW_AXP standby_twi_init(pm_info.pmu_arg.twi_port); if (SUPER_STANDBY_FLAG == extended_standby_para_info.id) power_enter_super(&pm_info, &extended_standby_para_info); dm_suspend(&pm_info, &extended_standby_para_info); #endif printk("test printk...\n"); save_mem_status(STANDBY_START | 0X05); /* bus reduce frequency */ bus_enter_lowfreq(&extended_standby_para_info); /* cpu enter sleep, wait wakeup by interrupt */ asm("WFI"); /* bus freq resume */ bus_freq_resume(&extended_standby_para_info); save_mem_status(RESUME0_START | 0X01); /* cpu freq resume */ cpu_freq_resume(); save_mem_status(RESUME0_START | 0X02); /* power domain resume */ #ifdef CONFIG_AW_AXP dm_resume(&extended_standby_para_info); standby_twi_exit(); #endif save_mem_status(RESUME0_START | 0X03); /* dram out self-refresh */ dram_exit_selfresh(); save_mem_status(RESUME0_START | 0X04); if(unlikely(pm_info.standby_para.debug_mask&PM_STANDBY_PRINT_STANDBY)){ serial_exit_manager(); } return 0; } static int dram_enter_selfresh(extended_standby_t *para) { s32 ret = -1; return ret; } static int dram_exit_selfresh(void) { s32 ret = -1; return ret; } static int cpu_enter_lowfreq(void) { standby_clk_init(); /* backup cpu freq */ standby_clk_get_pll_factor(&orig_pll); /* backup bus src */ standby_clk_bus_src_backup(); /*lower freq from 1008M to 408M*/ local_pll.FactorN = 16; local_pll.FactorK = 0; local_pll.FactorM = 0; local_pll.FactorP = 0; standby_clk_set_pll_factor(&local_pll); delay_ms(10); /* switch cpu clock to HOSC, and disable pll */ standby_clk_core2hosc(); delay_us(1); return 0; } static int cpu_freq_resume(void) { /* switch cpu clock to core pll */ standby_clk_core2pll(); change_runtime_env(); delay_ms(10); /*restore freq from 384 to 1008M*/ standby_clk_set_pll_factor(&orig_pll); change_runtime_env(); delay_ms(5); return 0; } static int bus_enter_lowfreq(extended_standby_t *para) { /* change ahb src to axi? losc?*/ standby_clk_bus_src_set(); standby_clk_getdiv(&clk_div); /* set clock division cpu:axi:ahb:apb = 2:2:2:1 */ tmp_clk_div.axi_div = 0; tmp_clk_div.ahb_div = 0; tmp_clk_div.ahb_pre_div = 0; tmp_clk_div.apb_div = 0; tmp_clk_div.apb_pre_div = 0; standby_clk_setdiv(&tmp_clk_div); /* swtich apb2 to losc */ standby_clk_apb2losc(); change_runtime_env(); //delay_ms(1); standby_clk_plldisable(); /* switch cpu to 32k */ standby_clk_core2losc(); if(1 == para->soc_dram_state.selfresh_flag){ // disable HOSC, and disable LDO standby_clk_hoscdisable(); standby_clk_ldodisable(); } return 0; } static int bus_freq_resume(extended_standby_t *para) { if(1 == para->soc_dram_state.selfresh_flag){ /* enable LDO, enable HOSC */ standby_clk_ldoenable(); /* delay 1ms for power be stable */ //3ms standby_delay_cycle(1); standby_clk_hoscenable(); //3ms standby_delay_cycle(1); } /* switch clock to hosc */ standby_clk_core2hosc(); /* swtich apb2 to hosc */ standby_clk_apb2hosc(); /* restore clock division */ standby_clk_setdiv(&clk_div); /* enable pll */ standby_clk_pllenable(); delay_ms(10); standby_clk_bus_src_restore(); return 0; }
519323.c
#include <mpi.h> #include <stddef.h> #include <stdio.h> #define MSG_TAG_A 124 #define MSG_TAG_B 1234 #define N 1000 #define N2 1204 /* * Illegal rank in mpi send, different communicators. (line 27) */ int main(int argc, char **argv) { int buffer[N] = {0}; int buffer2[N2] = {0}; MPI_Init(&argc, &argv); int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); int color = 1; if (argc == 1) { color = rank / 1; } else { color = 1; } MPI_Comm row_comm; MPI_Comm_split(MPI_COMM_WORLD, color, rank, &row_comm); if (rank == 0) { MPI_Send(buffer, 1, MPI_INT, 1, MSG_TAG_A, row_comm); } else if (rank == 1) { MPI_Recv(buffer2, N2, MPI_INT, 0, MSG_TAG_A, row_comm, MPI_STATUS_IGNORE); } MPI_Finalize(); return 0; }
759171.c
#include <stdlib.h> #include <stdio.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include "create_socket.h" /* Creates a socket and initialize it * @source_addr: if !NULL, the source address that should be bound to this socket * @src_port: if >0, the port on which the socket is listening * @dest_addr: if !NULL, the destination address to which the socket should send data * @dst_port: if >0, the destination port to which the socket should be connected * @return: a file descriptor number representing the socket, * or -1 in case of error (explanation will be printed on stderr) */ int create_socket(struct sockaddr_in6 *source_addr, int src_port, struct sockaddr_in6 *dest_addr, int dst_port){ if(dst_port > 0){ dest_addr->sin6_port = (in_port_t) htons(dst_port); } if(src_port > 0){ source_addr->sin6_port = (in_port_t) htons(src_port); } int sfd = socket(AF_INET6,SOCK_DGRAM,IPPROTO_UDP); //Type : p-ê SOCK_STREAM,0 if(sfd <0){ fprintf(stderr, "%s\n", "Erreur : appel socket()"); return -1; } if(source_addr != NULL){ if(bind(sfd,(struct sockaddr *) source_addr,(socklen_t) sizeof(struct sockaddr_in6)) != 0){ fprintf(stderr, "%s\n", "Erreur : bind source au socket"); return -1; } } if(dest_addr != NULL){ if(connect(sfd,(struct sockaddr *) dest_addr,(socklen_t) sizeof(struct sockaddr_in6)) != 0){ fprintf(stderr, "%s\n", "Erreur : connect socket à la dest"); return -1; } } //Accept ? return sfd; }
650104.c
#include "/daemon/rumours/sources.h" #define RUMOURS_D "/daemon/rumours_d.c" string subj; int num; void help() { write( // "123456789012345678901234567890123456789012345678901234567890123456789012345" "Syntax: <gossip> - Opens an editor from which you can make choices about" +"\n finding out about rumours, or starting some new ones\n" +"\nOptions available in the editor are: " + list_commands("short") ); write( "\n%^BOLD%^%^YELLOW%^Note: %^RESET%^%^RED%^Starting a background rumour about yourself may be considered an OOC action, to help establish the known history of your character. However, starting a rumour about someone else, or digging for information about them are definitely IC actions. There is some chance of word getting out about what you've been up to." +"\nYou can start as many background rumours for your character as you like. However, you will be limited in how many rumours you can start or circulate using the 'start rumour' and 'circulate rumour' options, depending on your influence. Once you have exhausted all your gossipping potential, you'll have to wait a while before starting or circulating more."); } varargs cmd_gossip(string str){ tell_object(TP, list_commands()); ///////////////////////////////////////////////////////////////// // REMOVE THIS SECTION TO IMPLEMENT THE COMMAND FOR PLAYERS // // // // if (!avatarp(TP) && !TP->query_true_invis() && TPQN != "noob") // // { // // tell_object(TP, "NOTE: " // // +"%^BOLD%^%^YELLOW%^The gossip command is not yet" // // +" available to players, but we hope it will be soon." // // +" Do get your character rumours ready to go, for when" // // +" it is available. %^BOLD%^%^GREEN%^Lujke, Jan 2018"); // // return; // // } // ///////////////////////////////////////////////////////////////// // input_to("enter_editor", 0); return 1; } void report(string str){ //"/daemon/reporter_d.c"->report("lujke", str); not there anymore -H "/d/atoyatl/reporter_d.c"->report("lujke", str); } varargs string list_commands(string str){ string result; if (!stringp(str) || str != "short") { result = "%^BOLD%^%^RED%^Here's a list of commands you can use," +"\n%^RESET%^%^ORANGE%^OR%^BOLD%^%^RED%^ enter anything else to exit%^RESET%^"; } else { result = ""; } result += "\n%^ORANGE%^ gossip at random%^RESET%^ - listen out for random rumours" +"\n from around the Realms" +"\n%^ORANGE%^ dig dirt on [%^RESET%^name%^ORANGE%^]%^RESET%^ - ask around after rumours" +"\n about the named person" +"\n%^ORANGE%^ start rumour%^RESET%^ - start a rumour about" +"\n yourself or someone you know" +"\n%^ORANGE%^ background%^RESET%^ - create a rumour about your character that" +"\n will form part of the background information" +"\n that might be generally discoverable about" +"\n them" +"\n%^ORANGE%^ recall rumours%^RESET%^ - lists all the people you" +"\n have heard a rumour about" +"\n%^ORANGE%^ recall rumours about [%^RESET%^name%^ORANGE%^]%^RESET%^ - lists all the rumours you" +"\n have heard about the named" +"\n person" +"\n%^ORANGE%^recall rumour # about [%^RESET%^name%^ORANGE%^]%^RESET%^ - gives all the details you" +"\n have heard about the rumour" +"\n in question" +"\n%^ORANGE%^dig further into rumour # about [name]%^RESET%^ - seek out more details" +"\n about a rumour you have" +"\n already heard about" +"\n someone" +"\n%^ORANGE%^ circulate%^RESET%^ - circulate a rumour that" +"\n you have heard about someone" +"\n%^ORANGE%^ quash%^RESET%^ - try to quash a rumour that" +"\n you have heard about someone" +"\n (This is harder than spreading" +"\n a rumour around, and could" +"\n backfire)" +"\n%^ORANGE%^ check%^RESET%^ - check what sort of rumours" +"\n might be available in the place you are in," +"\n and whether you are currently engaged in any" +"\n gossip"; if (avatarp(TP)||TP->query_true_invis()){ result +="\n%^BOLD%^%^WHITE%^Your additional Imm commands:\n" +" <remove rumour # for [name]> - deletes the rumour in question" +"\n completely. For judicious use, to" +"\n prevent the rumour system being abused" +"\n <plant rumour about [name]> - creates a rumour about the person in" +"\n question, but also allows you to" +"\n specify someone else as instigator of" +"\n the rumour. Handy for putting in some" +"\n consequences when PCs are blabbing" +"\n about secret stuff in public locations," +"\n for example." +"\n <list rumours about [name] - Lists all the rumours in circulation" +"\n the named person." +"\n <edit rumour # about [name] - Puts you into an editor where you can" +"\n choose to change various aspects of" +"\n the rumour specified"; } return result; } string list_heard_rumour_subjects(string hearer){ mapping rumours, subject_rumours; int * nums, num, width, linelen, sublen, remainder, i; string * subjects, subject, result, line; report("list_heard_rumour_subjects: %^CYAN%^Starting"); rumours = RUMOURS_D->query_heard_rumours(hearer); if (!mapp(rumours) || sizeof(rumours)<1) return "%^BOLD%^%^WHITE%^Sorry, you have not heard any rumours yet.\n\n%^RESET%^%^ORANGE%^How sad. \n%^RESET%^Don't you talk to people? \n\n%^BOLD%^%^WHITE%^Check 'help gossip' for more on how to go about hearing some juicy tidbits of information."; subjects = keys(rumours); // report("Setting up result"); result = "%^RESET%^%^ORANGE%^You have heard rumours about the following subjects:\n"; line = ""; width = atoi(TP->getenv("SCREEN")); if (width <15) width = 70; // report ("Checking subjects"); foreach(subject in subjects) { // report("Subject: " + subject); sublen = strlen(subject); linelen = strlen(line); if (linelen + sublen>width) { result += line + "\n"; line = ""; } // report("Adding subject to line"); line += capitalize(subject); if (width > sublen + linelen + 6) { remainder = sublen % 6; if (remainder > 0) { for (i = remainder; i<6;i++) line += " "; } line += " "; } else { result +=line + "\n"; line = ""; } } result += line; return result; } mixed * get_random_rumour(){ mixed * rumour; string surface; rumour = RUMOURS_D->get_rumour(); return rumour; } int get_random_source_number(){ return random(sizeof(SOURCES)); } string get_source(int num){ int num_sources; num_sources = sizeof(SOURCES); if (num >= num_sources) return ""; return SOURCES[num]; } varargs void confirm_unknown(string str, string name, string inst){ if (lower_case(str) == "yes") { tell_object(TP, "%^BOLD%^%^WHITE%^Okay, going ahead with the name %^RESET%^%^CYAN%^" + capitalize(name) + "%^BOLD%^%^WHITE%^." +"\n%^BOLD%^%^WHITE%^Now, where would someone be most likely to hear the rumour you are starting?" +"\n%^ORANGE%^Options are:" +"\n%^BOLD%^%^WHITE%^City %^RESET%^-%^ORANGE%^ Rumours circulating in human, elf and dwarf cities" +"\n%^GREEN%^Wild %^RESET%^-%^ORANGE%^ Rumours circulating among the people of the wilds - rangers, druids, barbarians and the like" +"\n%^ORANGE%^B%^BOLD%^%^BLACK%^e%^RESET%^%^MAGENTA%^a%^GREEN%^s%^ORANGE%^t %^RESET%^-%^ORANGE%^ Rumours circulating among the beast races of the Realms" +"\n%^RESET%^Or enter 'Q' to quit"); input_to("rumour_type", 0, name, inst); return; } tell_object(TP, "%^ORANGE%^Very well, what is the name you would like the rumour to be about instead?"); input_to("start_rumour", 0, inst); } varargs void start_rumour(string str, string inst){ mapping rels; string name, myname, * nick_options, nick_option; mixed * nicks, nick ; int flag; if (!RUMOURS_D->can_gossip(TP->query_name()) ) { if (avatarp(TP) || TP->query_true_invis()) { tell_object("As an Imm, you can gossip as much as you want. Otherwise, you'd be out of tries about now"); } else { tell_object(TP, "You have exhausted your gossipping contacts for the time being. Try again later."); return; } } if (str == "me" || str == "myself") { myname = TP->query_name(); if (TP->isKnown(myname)) { str = TP->knownAs(myname); tell_object(TP, "%^CYAN%^Okay, you're starting a rumour about yourself"); } else { tell_object(TP, "You haven't recognized yourself yet. You'll need to do that before you can start a rumour about yourself"); return; } } if(!Does_x_know_y(TP, str)) { if (avatarp(TP) || TP->query_true_invs()) { tell_object(TP, "%^CYAN%^You don't know anyone by that name. Are you SURE you want to go ahead? Enter 'yes' to proceed or anything else to try again"); input_to("confirm_unknown", 0, str, inst); } tell_object(TP, "%^CYAN%^You don't know anyone by that name"); return; } name = str; tell_object(TP, "%^BOLD%^%^WHITE%^Where would someone be most likely to hear the rumour you are starting?" +"\n%^ORANGE%^Options are:" +"\n%^BOLD%^%^WHITE%^City %^RESET%^-%^ORANGE%^ Rumours circulating in human, elf and dwarf cities" +"\n%^GREEN%^Wild %^RESET%^-%^ORANGE%^ Rumours circulating among the people of the wilds - rangers, druids, barbarians and the like" +"\n%^ORANGE%^B%^BOLD%^%^BLACK%^e%^RESET%^%^MAGENTA%^a%^GREEN%^s%^ORANGE%^t %^RESET%^-%^ORANGE%^ Rumours circulating among the beast races of the Realms" +"\n%^RESET%^Or enter 'Q' to quit"); input_to("rumour_type", 0, name, inst); } varargs void rumour_type(string str, string name, string inst){ int type; string location; if (!stringp(str)) { tell_object(TP, "No rumour type entered. Defaulting to CITY - you will get a chance to amend this before you finalise the rumour"); type = CITY_SOURCE; input_to("surface_rumour", 0, name, type, inst); return; } switch(lower_case(str)) { case "city": tell_object(TP, "%^ORANGE%^Okay, it's going to be a city rumour"); type = CITY_SOURCE; location = "in the %^ORANGE%^city%^RESET%^"; break; case "wild": tell_object(TP, "%^ORANGE%^Okay, it's going to be a rumour passing among the wild folk"); type = WILD_SOURCE; location = "among the people of the %^GREEN%^Wild%^RESET%^"; break; case "beast": tell_object(TP, "%^ORANGE%^Okay, it's going to be a rumour passing among the beast races"); type = BEAST_SOURCE; location = "among the %^BOLD%^%^BLACK%^B%^RESET%^%^ORANGE%^e%^MAGENTA%^a%^GREEN%^s%^BOLD%^%^BLACK%^t%^RESET%^ races"; break; case "q": tell_object(TP, "%^ORANGE%^Abandoning rumour"); return; default: tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, that's not an available option. Please try again to specify the type of rumour you want to start" +"\n%^ORANGE%^Options are:" +"\n%^BOLD%^%^WHITE%^City %^RESET%^-%^ORANGE%^ Rumours circulating in human, elf and dwarf cities" +"\n%^GREEN%^Wild %^RESET%^-%^ORANGE%^ Rumours circulating among the people of the wilds - rangers, druids, barbarians and the like" +"\n%^BOLD%^%^BLACK%^B%^RESET%^%^ORANGE%^e%^MAGENTA%^a%^GREEN%^s%^BOLD%^%^BLACK%^t %^RESET%^-%^ORANGE%^ Rumours circulating among the beast races of the Realms" +"\n%^RESET%^Or enter 'Q' to quit"); location = "in the %^ORANGE%^city%^RESET%^"; input_to("rumour_type", 0, name, inst); return; break; } tell_object(TP, "What rumour about " + name + " would you like to start " + location + "?\n" +"%^RESET%^%^CYAN%^Note: your rumour can have a maximum of 300 characters" +" (excluding colour codes) - about 50 words." +"\n%^RESET%^Or enter 'Q' to quit"); report ("About to do surface rumour. Type = " + type); input_to("surface_rumour", 0, name, type, inst); } string get_location_name(int type){ string location; switch(type){ case CITY_SOURCE: location = "in the %^ORANGE%^city%^RESET%^"; break; case WILD_SOURCE: location = "among the people of the %^GREEN%^Wild%^RESET%^"; break; case BEAST_SOURCE: location = "among the %^BOLD%^%^BLACK%^B%^RESET%^%^ORANGE%^e%^MAGENTA%^a%^GREEN%^s%^BOLD%^%^BLACK%^t%^RESET%^ races"; break; } return location; } varargs void surface_rumour(string str, string name, int type, string inst){ string plainstr; report("surface rumour: type = " + type); plainstr = strip_colors(str); if (lower_case(plainstr) == "q") { tell_object(TP, "%^ORANGE%^Rumour edit abandoned"); return; } if (strlen(plainstr)>300) { tell_object(TP, "Sorry, that rumour is too long. Try again, keeping it to 300 characters or fewer"); input_to("surface_rumour", 0, name, type, inst); return; } if (strlen(plainstr)<1){ tell_object(TP, "%^BOLD%^%^WHITE%^There needs to be some content to a rumour, to make it worth gossipping about. Try again, or enter 'q' to abandon this rumour"); input_to("surface_rumour",0,name, type); return; } tell_object(TP, "%^BOLD%^%^WHITE%^Got it.\n%^BOLD%^%^WHITE%^Is there any more detail someone might find out with a little more digging? You can enter another 600 characters or so, if the rumour needs fleshing out, or hit <enter> to skip"); input_to("detailed_rumour", 0, name, type, str, inst); } varargs void detailed_rumour(string str, string name, int type, string surface, string inst){ string plainstr; report("detailed rumour: type = " + type); if (!stringp(str) || strlen(str)<1) { str = ""; tell_object(TP, "%^BOLD%^%^WHITE%^Okay, so there's no further general details about this rumour. Are there any %^RED%^secret%^WHITE%^ details that could be discovered with serious investigation?\nYou can enter another 600 characters or so, if there are secrets to discover, or hit <enter> to skip"); input_to("secret_info", 0, name, type, surface, str, inst); return; } plainstr = strip_colors(str); if (lower_case(plainstr) == "q"){ tell_object(TP, "%^ORANGE%^Rumour edit abandoned"); return; } if (strlen(plainstr)>600) { tell_object(TP, "Sorry, that detailed information must have too much detail. Try again, keeping it to 600 characters or fewer"); input_to("detailed_rumour", 0, name, surface, inst); return; } tell_object(TP, "%^BOLD%^%^WHITE%^Got it.\n%^BOLD%^%^WHITE%^Are there any %^RED%^secret%^WHITE%^ details that could be discovered with serious investigation?\nYou can enter another 600 characters or so, if there are secrets to discover, or hit <enter> to skip"); input_to("secret_info", 0, name, type, surface, str, inst); return; } varargs void secret_info(string str, name, int type, surface, detail, inst ){ string secret; report("secret rumour: type = " + type); if (!stringp(str) || strlen(str)<1) { secret = ""; tell_object(TP, "%^BOLD%^%^WHITE%^Okay, so there are no secrets to be found about this rumour at the moment. Now let's finalise the details"); } else { tell_object(TP, "%^BOLD%^%^WHITE%^Okay, got the secret information. Now let's finalise the details"); secret = str; } call_out("confirm_rumour", 1, name, type, surface, detail, secret, TP, inst); return; } varargs void confirm_rumour(string name, int type, string surface, string detail, string secret, object ob, inst){ string location; report("confirm rumour: type = " + type); location = get_location_name(type); if (!objectp(ob)) return; tell_object(ob, "%^BOLD%^%^WHITE%^Please confirm the details of the rumour you wish to start"); tell_object(ob, "\n%^ORANGE%^Subject name:%^RESET%^ " + name ); tell_object(ob, "Rumour circulating: " + location); tell_object(ob, "\n%^ORANGE%^Surface details of rumour: %^RESET%^"); tell_object(ob, surface); tell_object(ob, "\n%^ORANGE%^Deeper details of rumour: %^RESET%^"); tell_object(ob, detail); tell_object(ob, "\n%^ORANGE%^Secret details of rumour (these will be hard to find out)"); tell_object(ob, secret); tell_object(ob, "\n%^BOLD%^%^WHITE%^Are these details correct? Enter 'Yes' to go ahead and start the rumour, or use one of the following commands:"); tell_object(ob, "%^CYAN%^Amend subject name"); tell_object(ob, "%^CYAN%^Amend rumour type%^RESET%^ (Selects between city, wild or beast-race rumours)"); tell_object(ob, "%^CYAN%^Amend surface details"); tell_object(ob, "%^CYAN%^Amend deeper details"); tell_object(ob, "%^CYAN%^Amend secret details"); tell_object(ob, "%^CYAN%^Amend traction"); tell_object(ob, "%^CYAN%^OR 'Q' to abort the rumour altogether"); input_to("final_confirmation", 0, name, type, surface, detail, secret, ob, inst); } int calculate_traction(int type, object ob){ int racetype, traction; if (!objectp(ob)) return 0; switch(lower_case(ob->query_race())) { case "beastman": case "firbolg": case "half-elf": case "halfling": case "human": case "centaur": case "voadkyn": case "wemic": case "gnome": racetype = 1; break; case "half-orc": switch (ob->query("subrace")) { case "gray orc": case "mountain orc": case "orog": case "tanarukk": racetype = 2; break; default: racetype = 1; break; } break; case "elf": switch (ob->query("subrace")) { case "fey'ri": racetype = 2; break; default: racetype = 1; break; } break; case "dwarf": switch (ob->query("subrace")) { case "gray dwarf": racetype = 2; break; default: racetype = 1; break; } break; case "half-drow": case "bugbear": case "drow": case "gnoll": case "goblin": case "half-ogre": case "hobgoblin": case "kobold": case "minotaur": case "orc": case "ogre": case "ogre-mage": case "yuan-ti": racetype = 2; break; } switch(type) { case CITY_SOURCE: if (racetype ==1) { traction = ob->query_skill("influence") *2; } else { traction = ob->query_skill("influence")/5; } break; case WILD_SOURCE: if (racetype ==1) { traction = ob->query_skill("influence") *2; } else { traction = ob->query_skill("influence")/5; } break; case BEAST_SOURCE: if (racetype ==2) { traction = ob->query_skill("influence") *2; } else { traction = ob->query_skill("influence")/5; } break; } return traction; } void final_confirmation(string str,string name, int type, string surface, string detail, string secret, object ob, string inst){ string tn, instigator, * altnames, altname; int traction, flag, rn; mixed truename, * truenames; mapping rels; if (!objectp (ob)) return; if (stringp(inst) && inst !="") instigator = inst; else instigator = ob->query_name(); traction = calculate_traction(type, ob); switch(lower_case(str)) { case "yes": tn = "no-one"; rels = ob->getRelationships(); truenames = keys(rels); flag = 0; foreach(truename in truenames) { if (mapp(truename)){ altnames = values(truename); foreach(altname in altnames) { if (ob->knownAs(altname)==name) { tn = altname; flag = 1; break; } if (flag == 1) break; } } if (stringp(truename)){ if (ob->knownAs(truename)==name) { tn = truename; break; } } } rn = RUMOURS_D->add_rumour(name, tn, ({instigator, instigator, instigator}), surface, detail, secret, type, traction, ob->query_short()); RUMOURS_D->hear_rumour(ob->query_true_name(), RUMOURS_D->query_rumour( name, rn), SECRET_LVL); tell_object(ob, "%^ORANGE%^Very good, your rumour has been started. Let's see if it gets any traction"); if (RUMOURS_D->can_gossip(TPQN, TP->query_skill("influence")) && instigator != "background") RUMOURS_D->add_gossip_time(instigator); break; case "amend subject name": tell_object(ob, "%^BOLD%^%^WHITE%^Very well, what is the name you wish this rumour to be attached to?"); input_to("amend_name", 0, type, surface, detail, secret, ob); break; case "amend rumour type": tell_object(ob, "%^BOLD%^%^WHITE%^Very well, where do you want to start this rumour circulating?" +"\n%^ORANGE%^Options are:" +"\n%^BOLD%^%^WHITE%^City %^RESET%^-%^ORANGE%^ Rumours circulating in human, elf and dwarf cities" +"\n%^GREEN%^Wild %^RESET%^-%^ORANGE%^ Rumours circulating among the people of the wilds - rangers, druids, barbarians and the like" +"\n%^BOLD%^%^BLACK%^B%^RESET%^%^ORANGE%^e%^MAGENTA%^a%^GREEN%^s%^BOLD%^%^BLACK%^t %^RESET%^-%^ORANGE%^ Rumours circulating among the beast races of the Realms"); input_to("amend_type", 0, name, surface, detail, secret, ob); break; case "amend traction": tell_object("%^BOLD%^%^WHITE%^Very well, what would you like the new traction score to be?"); input_to("amend_type", 0, name, type, surface, detail, secret, ob); break; case "amend surface details": tell_object(ob, "%^BOLD%^%^WHITE%^Very well, please re-enter the surface details of the rumour"); input_to("amend_surface", 0, name, type, detail, secret, ob); break; case "amend deeper details": tell_object(ob, "%^BOLD%^%^WHITE%^Very well, please re-enter the deeper details of the rumour"); input_to("amend_detail", 0, name, type, surface, secret, ob); break; case "amend secret details": tell_object(ob, "%^BOLD%^%^WHITE%^Very well, please re-enter the secret details of the rumour"); input_to("amend_secret", 0, name, type, surface, detail, ob); break; case "q": break; default: tell_object(TP, "I'm sorry, that was not one of your options."); confirm_rumour(name, type, surface, detail, secret, ob, inst); break; } return; } int Does_x_know_y(object x, string y){ mapping rels; string * nick_options, nick_option; mixed * nicks, nick ; int flag; nicks = ({}); rels = x->getRelationships(); if (mapp(rels) && sizeof(rels)>0) { nicks = values(rels); } if (sizeof(nicks)<1) { return 0; } else { flag = 0; foreach(nick in nicks) { if (stringp(nick) && nick == y) { flag = 1; break; } if (mapp(nick) && sizeof(nick)>0) { nick_options = values(nick); foreach(nick_option in nick_options) { if (nick_option == y) { flag = 1; break; } } } } return flag; } } void amend_name(string str, int type, string surface, string detail, string secret, object ob){ mapping rels; string * nicks; if (!objectp(ob)) return; if (!stringp(str) || strlen(str)<1) { tell_object(ob, "%^BOLD%^%^Please enter the name you want to use - or 'Q' if you want to abandon the rumour you were writing completely"); input_to("amend_name", 0, surface, detail, secret, ob); return; } if (lower_case(str)== "q") { tell_object(ob, "%^ORANGE%^Rumour abandoned. Feel free to start over"); return; } nicks = ({}); rels = TP->getRelationships(); if (mapp(rels) && sizeof(rels)>0) { nicks = values(rels); } if (sizeof(nicks)<1 || member_array(str, nicks)==-1) { tell_object(TP, "%^CYAN%^You don't know anyone by that name. Please try again"); tell_object(ob, "%^BOLD%^%^WHITE%^What is the name you wish this rumour to be attached to?"); input_to("amend_name", 0, surface, detail, secret, ob); return; } tell_object(ob, "%^BOLD%^%^WHITE%^Amending name to " + str); call_out("confirm_rumour",1, str, type, surface, detail, secret, ob); } void amend_type(string str, string name, string surface, string detail, string secret, object ob){ int type; if (!objectp(ob)) return; if (!stringp(str) || strlen(str)<1) { tell_object(ob, "%^BOLD%^%^WHITE%^Please enter the type of rumour you want to start." +"\n%^ORANGE%^Options are:" +"\n%^BOLD%^%^WHITE%^City %^RESET%^-%^ORANGE%^ Rumours circulating in human, elf and dwarf cities" +"\n%^GREEN%^Wild %^RESET%^-%^ORANGE%^ Rumours circulating among the people of the wilds - rangers, druids, barbarians and the like" +"\n%^BOLD%^%^BLACK%^B%^RESET%^%^ORANGE%^e%^MAGENTA%^a%^GREEN%^s%^BOLD%^%^BLACK%^t %^RESET%^-%^ORANGE%^ Rumours circulating among the beast races of the Realms"); input_to("amend_type", 0, name, surface, detail, secret, ob); return; } if (lower_case(str)== "q") { tell_object(ob, "%^ORANGE%^Rumour abandoned. Feel free to start over"); return; } switch(lower_case(str)) { case "city": tell_object(ob, "%^ORANGE%^Okay, it's going to be a city rumour"); type = CITY_SOURCE; break; case "wild": tell_object(ob, "%^ORANGE%^Okay, it's going to be a rumour passing among the wild folk"); type = WILD_SOURCE; break; case "beast": tell_object(ob, "%^ORANGE%^Okay, it's going to be a rumour passing among the beast races"); type = BEAST_SOURCE; break; default: tell_object(ob, "%^ORANGE%^I didn't understand that. Defaulting to a city rumour"); type = CITY_SOURCE; break; } call_out("confirm_rumour",1, name, type, surface, detail, secret, ob); } void amend_surface(string str, string name, int type, string detail, string secret, object ob){ string plainstr; if (!objectp(ob)) return; if (!stringp(str) || strlen(str)<1) { tell_object(ob, "%^BOLD%^%^WHITE%^Please enter the basic details of the rumour you want to start about " + name + " - or 'Q' if you want to abandon the rumour you were writing completely"); input_to("amend_surface", 0, name, detail, secret, ob); return; } if (lower_case(str)== "q") { tell_object(ob, "%^ORANGE%^Rumour abandoned. Feel free to start over"); return; } plainstr = strip_colors(str); if (strlen(plainstr)>300) { tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, that's too much information. Please try again, using 300 characters or fewer"); input_to("amend_surface", 0, name, detail, secret, ob); return; } tell_object(ob, "%^BOLD%^%^WHITE%^Got it. Amending the surface details of the rumour"); call_out("confirm_rumour",1, name, type, str, detail, secret, ob); } void amend_detail(string str, string name, int type, string surface, string secret, object ob){ string plainstr; if (!objectp(ob)) return; if (!stringp(str) || strlen(str)<1) { tell_object(ob, "%^BOLD%^%^WHITE%^Got it. No further details about the rumour available at this time."); call_out("confirm_rumour",1, name, type, surface, str, secret, ob); return; } if (lower_case(str)== "q") { tell_object(ob, "%^ORANGE%^Rumour abandoned. Feel free to start over"); return; } plainstr = strip_colors(str); if (strlen(plainstr)>600) { tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, that's too much information, even for the deeper details of the rumour. Please try again, using 600 characters or fewer"); input_to("amend_detail", 0, name, surface, secret, ob); return; } tell_object(ob, "%^BOLD%^%^WHITE%^Got it. Amending the deeper details of the rumour"); call_out("confirm_rumour",1, name, type, surface, str, secret, ob); } void amend_secret(string str, string name, int type, string surface, string detail, object ob){ string plainstr; if (!objectp(ob)) return; if (!stringp(str) || strlen(str)<1) { tell_object(ob, "%^BOLD%^%^WHITE%^Got it. No secret information about the rumour available at this time."); call_out("confirm_rumour",1, name, type, surface, detail, str, ob); return; } if (lower_case(str)== "q") { tell_object(ob, "%^ORANGE%^Rumour abandoned. Feel free to start over"); return; } plainstr = strip_colors(str); if (strlen(plainstr)>600) { tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, that's too much information, even for the secret details of the rumour. Please try again, using 600 characters or fewer"); input_to("amend_secret", 0, name, type, surface, detail, ob); return; } tell_object(ob, "%^BOLD%^%^WHITE%^Got it. Amending the secret information about this rumour"); call_out("confirm_rumour",1, name, type, surface, detail, str, ob); } void enter_editor(string str){ string word1, rest, name, list, response, rum_type, tracing, investigating, source_type, result; int num, rumour_no, room_type, gossipping; mixed * rum; num = sscanf(str, "%s %s", word1, rest); if (num<2) { rest = ""; word1 = str; } report("Entering editor. str = " + str + " word1 = " + word1); switch(lower_case(word1)){ case "q": tell_object(TP, "%^ORANGE%^OK, exiting gossip commands"); return; case "remove": if (!avatarp(TP)){ break; } num = sscanf(rest, "rumour %d for %s", rumour_no, name); if (num <2) { num = sscanf(rest, "rumor %d for %s", rumour_no, name); if (num <2) { num = sscanf(rest, "rumor %d about %s", rumour_no, name); if (num <2) { tell_object(TP, "%^BOLD%^%^WHITE%^I didn't understand that. The syntax to remove a rumour is: %^RESET%^'remove rumour # for [name]'"); return; } } } num = RUMOURS_D->remove_rumour(name, rumour_no); switch(num) { case -2: case -1: //deliberately falling through tell_object("Sorry, there do not seem to be any rumours for " + name); return; break; case 1: tell_object(TP, "Rumour successfully removed"); return; break; } break; case "plant": if (!avatarp(TP) && !TP->query_true_invis()) break; tell_object(TP, "%^BOLD%^%^WHITE%^Ooh sneaky - who would you like this rumour to seem to have been started by?"); input_to("plant_rumour",0); return; break; case "background": report("Background check. Checking whether TP knows themselves"); if (!TP->isKnown(TPQN)) { tell_object(TP, "Sorry, you have not yet recognized yourself. As rumours" +" about yourself work on the name you have recognized yourself by," +" you'll have to do that first. "); return; } name = TP->knownAs(TPQN); report("TP Does indeed know themselves, as " + name); tell_object(TP, "%^BOLD%^%^WHITE%^Okay, let's get started with a%^RESET%^%^ORANGE%^" +" background%^BOLD%^%^WHITE%^ rumour for your character. Since you have" +" recognized yourself as %^RESET%^" + capitalize(name) + "%^BOLD%^%^WHITE%^," +" that is the name this rumour will be attributed to." +"\n%^BOLD%^%^WHITE%^Now, where would someone be most likely to hear about this bit of your" +" background?" +"\n%^BOLD%^%^WHITE%^City %^RESET%^-%^ORANGE%^ In human, elf and dwarf cities" +"\n%^GREEN%^Wild %^RESET%^-%^ORANGE%^ Among the people of the wilds - rangers, druids, barbarians and the like" +"\n%^ORANGE%^B%^BOLD%^%^BLACK%^e%^RESET%^%^MAGENTA%^a%^GREEN%^s%^ORANGE%^t %^RESET%^-%^ORANGE%^ Among the beast races of the Realms" +"\n%^RESET%^Or enter 'Q' to quit"); input_to("rumour_type", 0, name, "background"); return; break; case "check": room_type = RUMOURS_D->check_room_source_type(ETP); switch (room_type) { case CITY_SOURCE: source_type = "rumours circulating in the %^ORANGE%^cities%^RESET%^ of %^MAGENTA%^humans%^RESET%^ and their allies"; break; case WILD_SOURCE: source_type = "rumours circulating among the %^BOLD%^%^GREEN%^w%^RESET%^%^GREEN%^i%^BOLD%^ld%^RESET%^%^GREEN%^ folk"; break; case BEAST_SOURCE: source_type = "rumours circulating among the %^BOLD%^%^BLACK%^b%^RESET%^%^MAGENTA%^e%^BLUE%^a%^BOLD%^%^BLACK%^st races"; break; default: source_type = "none"; } if (source_type == "none") { result = "You think you are unlikely to come across rumours of any type in this place."; } else { result = "You think that this is the sort of place you might come across " + source_type; } if (TP->query_property("tracing")) { tracing = TP->query_property("tracing"); num = sscanf(tracing, "%s_%d", word1, rumour_no); result += "\nYou are currently trying to find out who might have started rumour number " + rumour_no + " about " + capitalize(word1); } if (TP->query_property("investigating")) { investigating = TP->query_property("investigating"); num = sscanf(investigating, "%s_%d", word1, rumour_no); if (num ==2) { result += "\nYou are currently trying to find out more detail about rumour number " + rumour_no + " about " + capitalize(word1); } else { result += "\nYou are currently trying to dig up some gossip about " + capitalize(investigating); } } if (!TP->query_property("investigating") && !TP->query_property("tracing")) { if (TP->query_property("gossipping")) { result += "\nYou are currently gossipping at random"; } else { report("Query_property('gossipping') is not valid"); result += "\nYou are not currently gossipping"; } } tell_object(TP, result); return 1; break; case "edit": if (!avatarp(TP) && !TP->query_true_invis()) break; num = sscanf(rest, "rumour %d about %s", rumour_no, name); if (num !=2) { num = sscanf(rest, "rumor %d about %s", rumour_no, name); if (num !=2) { tell_object(TP, "%^BOLD%^%^WHITE%^Which rumour do you want to edit? %^RESET%^Syntax is: %^CYAN%^'edit rumour # about [name]'"); input_to("enter_editor", 0); } } rum = RUMOURS_D->query_rumour(name, rumour_no); if (!arrayp(rum) || sizeof(rum)<1) { tell_object(TP, "Sorry, unable to retrieve rumour number " + rumour_no + " about " + capitalize(name) + ". Please use the list command to check you have used the right number. If you are sure you have, please make a bug report, reminding Lujke about the importance of mental stability in a genius"); return; } display_rumour(TP, rum); tell_object(TP, "%^WHITE%^Which bit of the rumour would you like to edit? Options are:\n"); tell_object(TP, "%^RESET%^Subject True name(of subject) Rumour type (City, wild or beast)\n" +"Instigator(s) Traction Date\n" +"Surface Detail Secret"); input_to ("edit_rumour", 0, rum); return; break; case "gossip": if (rest != "at random") { tell_object(TP, "I didn't understand that. Try again - perhaps you meant 'gossip at random'?"); input_to("enter_editor", 0); return; } ////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////Remove this section to make gossip completely live for players ///////////////////// // // // if (!avatarp(TP) && !TP->query_true_invis()) // // { // // tell_object(TP, "%^BOLD%^%^CYAN%^Sorry, that gossip function is not open for players yet\n" // // +"At the moment, you can only use 'start rumour' and 'background' to \n" // // +"set rumours up. You won't be able to find any out until there are enough" // // +"recorded to make the system viable"); // // return; // // } // ////////////////////////////////////////////////////////////////////////////////////////////////////// room_type = RUMOURS_D->check_room_source_type(ETP); switch(room_type) { case CITY_SOURCE: rum_type = "circulating in the cities of humans and their allies"; break; case WILD_SOURCE: rum_type = "circulating amongst the wild folk"; break; case BEAST_SOURCE: rum_type = "circulating among the beast races of the realms"; break; case -1: tell_object(TP, "Unfortunately, this does not seem like the sort of place you are likely to pick up any rumours"); return; break; } tell_object (TP, "You set about gossipping and casually hearing what stories are " + rum_type); if (TP->query_property("tracing")) TP->remove_property("tracing"); if (TP->query_property("investigating")) TP->remove_property("investigating"); if (TP->query_property("gossipping")) TP->remove_property("gossipping"); TP->set_property("gossipping", room_type); return; break; case "start": tell_object(TP, "%^BOLD%^%^WHITE%^Who are you starting a rumour about? Give" +" a name you have recognized" +" someone by, or 'myself' if the rumour is about you.%^RESET%^\nNote: if you" +" are starting the rumour about yourself, it will be set for the name you" +" have recognized yourself by"); input_to("start_rumour",0); return; break; case "recall": if (rest == "rumours") { RUMOURS_D->recall_rumours(TP); return; } num = sscanf(rest, "rumours about %s", name); if (num <1) { num = sscanf(rest, "rumors about %s", name); if (num <1){ num = sscanf(rest, "rumour %d about %s", rumour_no, name); if (num == 2){ recall_rumour_about(name, rumour_no, TP->query_true_name()); return; } else { num = sscanf(rest, "rumor %d about %s", rumour_no, name); if (num == 2){ recall_rumour_about(name, rumour_no, TP->query_true_name()); return; } } } else { recall_rumours_about(name); return; } } else { recall_rumours_about(name); return; break; } tell_object(TP, "%^BOLD%^%^WHITE%^What do you want to recall? %^RESET%^%^ORANGE%^Options are:\n" +" 'recall rumours about [name]' - lists all the rumours you've heard about that person\n" +" 'recall rumour # about [name' - gives the full details of a particular rumour\n\n" +"OR type 'Q' to quit"); input_to("enter_editor", 0); return; break; case "list": report("Case: list"); if (rest == "rumour subjects" || rest == "rumor subjects") { report("Trying to list heard rumour subjects"); list = list_heard_rumour_subjects(TPQN); tell_object(TP, list); return; break; } if (!avatarp(TP) && !TP->query_true_invis()) { report("Someone not an avatar is trying to list all the rumours. Naughty!"); break; } num = sscanf(rest, "rumours about %s", name); if (num <1) { num = sscanf(rest, "rumors about %s", name); if (num <1){ response = "%^BOLD%^%^WHITE%^What are you trying to list? Options are:\n" +" 'list rumour subjects'\n"; if (avatarp(TP) || TP->query_true_invis()) { response += "%^ORANGE%^Avatar option: %^RESET%^'list rumours about [name]'\n"; } response += "Or type 'q' to quit"; tell_object(TP, response); input_to("enter_editor", 0); return; break; } } report("Generating list of rumours about " + name); response = list_rumours_about(name); tell_object(TP, response); return; break; case "trace": if (!stringp(rest) || strlen(rest)<1) { tell_object(TP, "Trace what?"); tell_object(TP, list_commands()); return; } ////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////Remove this section to make gossip completely live for players ///////////////////// // // // if (!avatarp(TP) && !TP->query_true_invis()) // // { // // tell_object(TP, "%^BOLD%^%^CYAN%^Sorry, that gossip function is not open for players yet\n" // // +"At the moment, you can only use 'start rumour' and 'background' to \n" // // +"set rumours up. You won't be able to find any out until there are enough" // // +"recorded to make the system viable"); // // return; // // } // ////////////////////////////////////////////////////////////////////////////////////////////////////// num = sscanf(rest, "rumour %d about %s", rumour_no, name); if (num ==2) { trace_rumour(name, rumour_no, TP); return; break; } else { num = sscanf(rest, "rumor %d about %s", rumour_no, name); if (num ==2) { trace_rumour(name, rumour_no, TP); return; break; } } case "dig": if (!stringp(rest) || strlen(rest)<1) { tell_object(TP, "Dig what?"); tell_object(TP, list_commands()); return; } ////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////Remove this section to make gossip completely live for players ///////////////////// // // // if (!avatarp(TP) && !TP->query_true_invis()) // // { // // tell_object(TP, "%^BOLD%^%^CYAN%^Sorry, that gossip function is not open for players yet\n" // // +"At the moment, you can only use 'start rumour' and 'background' to \n" // // +"set rumours up. You won't be able to find any out until there are enough" // // +"recorded to make the system viable"); // // return; // // } // ////////////////////////////////////////////////////////////////////////////////////////////////////// num = sscanf(rest, "further into rumour %d about %s", rumour_no, name); if (num ==2) { dig_further_into(rumour_no, name, TP); return; break; } else { num = sscanf(rest, "further into rumor %d about %s", rumour_no, name); if (num ==2) { dig_further_into(rumour_no, name, TP); return; break; } } num = sscanf(rest, "dirt on %s", name); if (num < 1) { tell_object(TP, "Dig what?"); cmd_gossip(); input_to("enter_editor",0); return; } dig_dirt_on(name, TP); return; break; case "circulate": ////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////Remove this section to make gossip completely live for players ///////////////////// // // // if (!avatarp(TP) && !TP->query_true_invis()) // // { // // tell_object(TP, "%^BOLD%^%^CYAN%^Sorry, that gossip function is not open for players yet\n" // // +"At the moment, you can only use 'start rumour' and 'background' to \n" // // +"set rumours up. You won't be able to find any out until there are enough" // // +"recorded to make the system viable"); // // return; // // } // ////////////////////////////////////////////////////////////////////////////////////////////////////// num = sscanf(rest, "rumour %d about %s", rumour_no, name); if (num ==2) { if (!RUMOURS_D->can_gossip(TPQN, TP->query_skill("influence"))) { if (avatarp(TP) || TP->query_tue_invis()) { tell_object(TP, "As an imm, you can circulate as many rumours as you want. Otherwise you'd be out of tries by now"); } else { tell_object("You've done all the muck spreading you can manage with your level of influence for the moment. Try again later"); return; } } circulate_rumour(rumour_no, name, TP); return; } else { tell_object(TP, "%^BOLD%^%^WHITE%^Which rumour do you want to circulate?\n%^RESET%^%^ORANGE%^Syntax is: %^RESET%^'circulate rumour # about [name]'"); input_to("enter_editor", 0); return; } break; case "quash": ////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////Remove this section to make gossip completely live for players ///////////////////// // // // if (!avatarp(TP) && !TP->query_true_invis()) // // { // // tell_object(TP, "%^BOLD%^%^CYAN%^Sorry, that gossip function is not open for players yet\n" // // +"At the moment, you can only use 'start rumour' and 'background' to \n" // // +"set rumours up. You won't be able to find any out until there are enough" // // +"recorded to make the system viable"); // // return; // // } // ////////////////////////////////////////////////////////////////////////////////////////////////////// num = sscanf(rest, "rumour %d about %s", rumour_no, name); if (num ==2) { if (!RUMOURS_D->can_gossip(TPQN, TP->query_skill("influence"))) { if (avatarp(TP) || TP->query_tue_invis()) { tell_object(TP, "As an imm, you can quash as many rumours as you want. Otherwise you'd be out of tries by now"); } else { tell_object("You've done all the intervening in gossip you can manage with your level of influence for the moment. Try again later"); return; } } quash_rumour(rumour_no, name, TP); return; } else { tell_object(TP, "%^BOLD%^%^WHITE%^Which rumour do you want to quash?\n%^RESET%^%^ORANGE%^Syntax is: %^RESET%^'quash rumour # about [name]'"); input_to("enter_editor", 0); return; } break; } tell_object(TP, "I'm sorry, that's not one of your options for gossipping. Please try again. See 'help gossip' for available commands"); return; } void display_rumour(object ob, mixed * rum){ string subject, source_type, surface, detail, secret, * insts, inst, true_name, instigators, date_string; int traction, rumour_no, when; subject = rum[RUMOUR_SUBJECT]; when = rum[TIME]; true_name = rum[TRUENAME]; insts = rum[INSTIGATOR]; rumour_no = rum[RUMOUR_NUMBER]; traction = rum[TRACTION]; surface = rum[SURFACE]; detail = rum[DETAIL]; secret = rum[SECRET]; instigators = ""; foreach(inst in insts){ instigators += inst + ", "; } if (strlen(instigators) > 2) instigators = instigators[0..strlen(instigators)-3]; switch(rum[SOURCE_TYPE]) { case CITY_SOURCE: source_type = "%^BOLD%^%^WHITE%^City%^RESET%^"; break; case WILD_SOURCE: source_type = "%^RESET%^%^GREEN%^W%^BOLD%^%^GREEN%^i%^RESET%^%^GREEN%^ld%^RESET%^"; break; case BEAST_SOURCE: source_type = "%^RESET%^%^MAGENTA%^B%^BLUE%^e%^ORANGE%^a%^MAGENTA%^st%^RESET%^"; break; } date_string = "%^BOLD%^%^RED%^" + hour(when)+"%^RESET%^:%^BOLD%^%^RED%^"+(minutes(when)/10); date_string +=(minutes(when)%10) +" %^BOLD%^%^BLUE%^"+day(when)+", "; date_string += date(when)+" "+month(when)+"%^RESET%^, %^BOLD%^%^GREEN%^"+year(when)+" SG%^RESET%^"; tell_object(ob, "%^RESET%^%^CYAN%^Subject: %^RESET%^" + capitalize(subject) + " %^CYAN%^Rumour Number: %^RESET%^" + rumour_no + " %^CYAN%^Subject's true name: %^RESET%^" + capitalize(true_name) ); tell_object(ob, "%^RESET%^%^CYAN%^Rumour type: %^RESET%^" + source_type + " %^CYAN%^Traction score: %^RESET%^"+ traction + "\n%^CYAN%^When created: %^RESET%^" + date_string); tell_object(ob, "%^RESET%^%^CYAN%^Rumour instigators: %^RESET%^" + instigators + "\n\n"); tell_object(ob, "%^RESET%^%^CYAN%^Surface level of rumour:%^RESET%^\n" + surface + "\n\n"); tell_object(ob, "%^RESET%^%^CYAN%^Further details of rumour:%^RESET%^\n" + detail + "\n\n"); tell_object(ob, "%^RESET%^%^CYAN%^Secret information about rumour:%^RESET%^\n" + secret + "\n\n"); } varargs void edit_rumour2(string str, mixed * rum, string aspect_changed, string previous_subject){ int ASPECT, trac; mixed what; what = str; report("edit_rumour2 aspect_changed: " + aspect_changed); switch(aspect_changed) { case "subject": ASPECT = RUMOUR_SUBJECT; break; case "true name": ASPECT = TRUENAME; break; case "rumour type": case "rumor type": ASPECT = SOURCE_TYPE; switch(lower_case(str)) { case "city": what = CITY_SOURCE; break; case "wild": what = WILD_SOURCE; break; case "beast": what = BEAST_SOURCE; break; } break; case "instigator": case "instigators": ASPECT = INSTIGATOR; what = explode(str, " "); return; break; case "traction": ASPECT = TRACTION; report("amending traction.str:'" + str +"'"); trac = atoi(str); break; case "date": ASPECT = TIME; what = rum[TIME] + atoi(str)*24000; break; case "surface": ASPECT = SURFACE; break; case "detail": ASPECT = DETAIL; break; case "secret": ASPECT = SECRET; break; } if (ASPECT == TRACTION) { report ("%^BOLD%^%^GREEN%^Setting rumour[" + ASPECT + "] to " + trac); rum[ASPECT] = trac; } else { report ("%^BOLD%^%^MAGENTA%^ASPECT: " + ASPECT + " TRACTION: " + TRACTION); rum[ASPECT] = what; } RUMOURS_D->save_rumour(rum); tell_object(TP, "%^BOLD%^%^WHITE%^Okay, the rumour now looks like this:"); display_rumour(TP, rum); tell_object(TP, "%^WHITE%^Do you want to edit another part of the rumour? Options are:\n"); tell_object(TP, "%^RESET%^Subject True name(of subject) Rumour type (City, wild or beast)\n" +"Instigator(s) Traction Date\n" +"Surface Detail Secret\n" +"%^BOLD%^%^WHITE%^Or type anything else to exit"); input_to ("edit_rumour", 0, rum, previous_subject); } varargs void edit_rumour(string str, mixed * rum, string previous_subject){ switch(lower_case(str)) { case "subject": if(stringp(previous_subject) && strlen(previous_subject)>0 && previous_subject != rum[RUMOUR_SUBJECT]) { tell_object(TP, "You have already amended the subject to " + rum[RUMOUR_SUBJECT] + ". Changing it again will confuse the editor. Please finish this edit, then use the edit rumour command again using the new subject name if you want to change it again."); break; } tell_object(TP, "%^BOLD%^%^WHITE%^Okay, who would you like the new subject of the rumour to be? - This should be the name that the person starting the rumour knows them by"); input_to("edit_rumour2",0,rum, str, rum[RUMOUR_SUBJECT]); return; break; case "true name": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, who would you like the new truename of the subject of the rumour to be? - This is the actual character name, the one they log in with"); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; case "rumour type": case "rumor type": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, what sort of rumour would you like this to be? - City, Wild or Beast"); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; case "instigator": case "instigators": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, who would you to appear to have started the rumour? You can enter one name, or several separated by spaces ('lujke kassius nienne octothorpe')"); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; case "traction": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, what would you like the new traction score of the rumour to be? (current score is " + rum[TRACTION] +")"); input_to("edit_rumour2",0,rum, lower_case(str), previous_subject); return; break; case "date": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, how many days would you like to adjust the rumour start date by? (Enter a positive integer to bring the date forward, or a negative to push it back"); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; case "surface": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, who would you like the %^RESET%^%^ORANGE%^surface%^BOLD%^%^WHITE%^ level of the rumour to be? It currently reads: \n" + rum[SURFACE]); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; case "detail": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, who would you like the %^BLUE%^%^detailed%^WHITE%^ information of the rumour to be? It currently reads: \n" + rum[DETAIL]); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; case "secret": tell_object(TP, "%^BOLD%^%^WHITE%^Okay, who would you like the %^CYAN%^secret%^WHITE%^ level of the rumour to be? It currently reads: \n" + rum[SECRET]); input_to("edit_rumour2",0,rum, str, previous_subject); return; break; default: if (!stringp(str) || strlen(str)<1) str = "That"; tell_object(TP, str + " is not a valid aspect of the rumour. Exiting editor"); if(stringp(previous_subject) && strlen(previous_subject)>0 && previous_subject != rum[RUMOUR_SUBJECT]) { report("subject has changed. Getting rid of previous rumour. Rumour name: " + previous_subject + " Rumour number: " + rum[RUMOUR_NUMBER]); RUMOURS_D->remove_rumour(previous_subject, rum[RUMOUR_NUMBER]); } else { report("exiting editor. Rumour subject has not changed"); } return; } } void plant_rumour(string str){ if (!stringp(str)) { tell_object(TP, "%^ORANGE%^No really, who would you like the rumour to seem to have been started by?"); input_to("plant_rumour", 0); } tell_object(TP, "%^BOLD%^%^WHITE%^Okay, so it will seem like %^RESET%^%^ORANGE%^"+ capitalize(str) + " %^BOLD%^%^WHITE%^started this rumour. Who will the rumour be about?"); input_to("start_rumour", 0, str); } void circulate_rumour(int num, string name, object ob){ report ("About to circulate rumour. Num: " + num + " name: " + name + " Circulator: " + ob->QCN); RUMOURS_D->circulate_rumour(num, name, ob); } void quash_rumour(int num, string name, object ob){ report ("About to quash rumour. Num: " + num + " name: " + name + " Circulator: " + ob->QCN); RUMOURS_D->quash_rumour(num, name, ob); } void dig_further_into(int rumour_no, string name, object ob){ string investigation_target, * subjects; int nums; mapping my_rumours, my_rumours_about_subject; my_rumours = RUMOURS_D->query_heard_rumours(ob->query_name()); if (!mapp(my_rumours) || sizeof(my_rumours)<1) { tell_object(ob, "You haven't heard any rumours, so there's nothing for you to dig any further into. You need to get out and listen to some basic gossip first"); return; } subjects = keys(my_rumours); if (member_array(name, subjects)==-1) { tell_object(ob, "You haven't heard any rumours about " + capitalize(name) + ", so there's nothing for you to dig any further into. Wait until you've started to hear things about them."); return; } my_rumours_about_subject = my_rumours[name]; if (!mapp(my_rumours_about_subject) || sizeof(my_rumours_about_subject)<1) { tell_object(ob, "You haven't heard any rumours about " + capitalize(name) + ", so there's nothing for you to dig any further into. Wait until you've started to hear things about them."); return; } nums = keys(my_rumours_about_subject); if (sizeof(nums)<1 || member_array(rumour_no, nums)==-1) { tell_object(ob, "You haven't heard a rumour numbered " + rumour_no + " in relation to " + capitalize(name) + ", so you can't dig further into it. Check your list of heard rumours about them for a valid number, and try again."); return; } investigation_target = name + "_" + rumour_no; tell_object(ob, "%^CYAN%^You set about investigating more information about rumour " + rumour_no + " in relation to %^BOLD%^%^WHITE%^" + capitalize(name)); if (ob->query_property("tracing")) ob->remove_property("tracing"); if (ob->query_property("investigating")) ob->remove_property("investigating"); if (ob->query_property("gossipping")) ob->remove_property("gossipping"); ob->remove_property("gossipping"); ob->set_property("investigating", investigation_target); ob->set_property("gossipping",investigation_target); } string list_rumours_about(string subject){ string result, surface, line, col; mapping rums, rum; int * nums, num, i, type, count; rums = RUMOURS_D->query_rumours(subject); if (!mapp(rums) || sizeof(rums)<1) return "There are no rumours in circulation about anyone called " + subject; result = ""; nums = keys(rums); foreach(num in nums) { rum = RUMOURS_D->query_rumour(subject, num); count = sizeof(rum); if (count>0) { report("Loaded a rumour. Elements are: "); for (i=0;i<count;i++){ if (stringp(rum[i])){ report("Element " + i + ": " + rum[i]); } else { report("element " + i + " is not a string"); } } } else { report("%^BOLD%^%^CYAN%^Rumour loaded is not valid"); } type = rum[SOURCE_TYPE]; switch(type) { case CITY_SOURCE: col = "%^BOLD%^%^WHITE%^"; break; case WILD_SOURCE: col = "%^RESET%^%^GREEN%^"; break; case BEAST_SOURCE: col = "%^RESET%^%^MAGENTA%^"; break; } line = col; line += num; if (num<1000) line += " "; if (num<100) line += " "; if (num<10) line += " "; line += " "; surface = rum[SURFACE]; if (strlen(surface) >29) { line += surface[0..28]; } else { line += surface; num = 29 - strlen(surface); for(i=0;i<num;i++) { line += " "; } } line += " "; if (strlen(rum[DETAIL])>0) { line += "Y"; } else { line += " "; } line += " "; if (strlen(rum[SECRET])>0) { line += "Y"; } else { line += " "; } line += " "; switch(type) { case CITY_SOURCE: line += "%^BOLD%^%^WHITE%^City"; break; case WILD_SOURCE: line += "%^BOLD%^%^GREEN%^Wild"; break; case BEAST_SOURCE: line += "%^MAGENTA%^Beast"; break; } line += "%^RESET%^\n"; if (strlen(surface) >29) { line += col + " "; if (strlen(surface)>58) { line += surface[29..57]; } else { // line += surface; } if (strlen(surface)<59) line += "\n"; } line += "%^RESET%^\n"; if (strlen(surface) >58) { line += col + " "; if (strlen(surface)>87) { line += surface[58..86]; } else { // line += surface; } line += "\n"; } result = line + result; } result = "%^ORANGE%^Rumours currently in circulation about %^BOLD%^%^WHITE%^" + capitalize(subject) + "%^RESET%^%^ORANGE%^:\n" +"Rumour no Surface details Further info? Secret info? Type\n" + result; return result; } varargs void recall_rumours_about(string name, string recaller){ mapping heard_rums, rums; mixed * heard_rumour, * rumour; string * recallers, * subjects, result, surface; int * nums, num, lvl, width, rumour_num; width = atoi(TP->getenv("SCREEN")); if (width<15) width = 70; if (!stringp(recaller)) recaller = TPQN; heard_rums = RUMOURS_D->get_my_heard_rumours(recaller); if(!mapp(heard_rums) || sizeof(heard_rums)<1){ report(recaller + "'s list of heard rumours is empty"); tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours. Get out there and talk to some people. See 'help gossip' for information about how to stumble across rumours."); return; } subjects = keys(heard_rums); if (member_array(name, subjects)==-1){ report (recaller + "'s list of heard rumours does not include any about " + name); tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours about " + name + ". See 'help gossip' for information about how to dig dirt on a particular person, if you want to find out more about them"); return; } rums = heard_rums[name]; if (!mapp(rums) || sizeof(rums) < 1){ report (recaller + "'s list of heard rumours about " + name + " is empty"); tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours about " + name + ". See 'help gossip' for information about how to dig dirt on a particular person, if you want to find out more about them"); return; } nums = keys(rums); result = "%^ORANGE%^You know the following rumours about %^RESET%^" +name + "%^ORANGE%^:\n\n" +"%^ORANGE%^Rumour no Details\n"; foreach(num in nums) { heard_rumour = rums[num]; report("Checking heard rumour number " + num + " sizeof heard rumour: " + sizeof(heard_rumour)); rumour = RUMOURS_D->query_rumour(name, heard_rumour[1]); surface = rumour[SURFACE]; report("Rumour surface: " + surface); result += "" + num; if (num>100) result += " "; if (num<10) result += " "; if (strlen(surface) <= width-16) { result += " " + surface +"\n"; } else { result += " " + surface[0..width - 18] + "...\n"; } } tell_object(TP, result); } varargs void recall_rumour_about(string name, int no, string recaller){ mapping heard_rums, rums; mixed * rum, * heard_rumour; string * recallers, * subjects, result, detail; int * nums, lvl, rumour_num; if (!stringp(recaller)) recaller = TP->query_true_name(); report ("Trying to recall rumour number " + no +" about " + name + ", as recalled by " + recaller); heard_rums = RUMOURS_D->get_my_heard_rumours(recaller); if(!mapp(heard_rums) || sizeof(heard_rums)<1){ report(recaller + "'s list of heard rumours is empty"); tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours. Get out there and talk to some people. See 'help gossip' for information about how to stumble across rumours."); return; } report ("Got some heard rumours"); subjects = keys(heard_rums); if (member_array(lower_case(name), subjects)==-1){ report (recaller + "'s list of heard rumours does not include any about " + name); tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours about " + name + ". See 'help gossip' for information about how to dig dirt on a particular person, if you want to find out more about them"); return; } rums = heard_rums[name]; if (!mapp(rums) || sizeof(rums)<1) { report("%^BOLD%^%^CYAN%^Error 1 in void recall_rumour. Name used for recaller: " + recaller + "Name used for subject: " + name); tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours about " + name + ". See 'help gossip' for information about how to dig dirt on a particular person, if you want to find out more about them"); return; } nums = keys(rums); if (sizeof(nums)<1) { tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you can't recall hearing any rumours about " + name + ". See 'help gossip' for information about how to dig dirt on a particular person, if you want to find out more about them"); return; } report ("Got some heard rumours about " + name); if (member_array(no, nums)==-1) { tell_object(TP, "%^BOLD%^%^WHITE%^Sorry, you haven't heard a rumour number " + no + " about " + name + ". Try typing 'gossip' then 'recall rumours about " + name + "' to see a list of the rumours you have heard about them."); } heard_rumour = rums[no]; rum = RUMOURS_D->query_rumour(name, heard_rumour[1]); if (sizeof(rum)<1) { tell_object(TP, "Sorry, your memory is a bit fuzzy and you can't recall rumour number " + num + " about " + name + ". It might be worth making a bug report, in case something is going wrong with the rumour system, despite Lujke's very stable genius-ness"); return; } result = ""; lvl = heard_rumour[2]; report ("Found a rumour. Rumour lvl heard is: " + lvl); switch(lvl) { case 3: result = "\n%^RESET%^%^ORANGE%^You have also discovered some interesting, secretive and hard to come across information, suggesting that:\n%^RESET%^" + rum[SECRET]; case 2: //deliberately falling through; detail = rum[DETAIL]; if (stringp(detail) && detail != ""){ result = "\n%^RESET%^%^ORANGE%^Further gossip has reached your ears that\n%^RESET%^" + rum[DETAIL] + "\n" + result; } case 1: //deliberately falling through result = "%^RESET%^%^ORANGE%^You have heard that:\n%^RESET%^" + rum[SURFACE] + "\n" + result + "\n"; } tell_object(TP, result); } void dig_dirt_on(string name, object ob){ int s_type; object room; string message; if (!objectp(ob)) return; room = environment(ob); if (!objectp(room)) { tell_object(ob, "Error with the gossip command - you are not in a valid room. Please make a bug report"); return; } s_type = RUMOURS_D->check_room_source_type(room); switch(s_type) { case CITY_SOURCE: message = "%^BOLD%^%^WHITE%^You set your ear to the %^RESET%^%^ORANGE%^rumour mill %^BOLD%^%^WHITE%^ to see if you can dig up any %^BOLD%^%^BLACK%^dirt%^WHITE%^ on "; break; case WILD_SOURCE: message = "%^BOLD%^%^GREEN%^You set your ear to the %^BOLD%^%^WHITE%^%^winds %^BOLD%^%^GREEN%^and check with the peoples of the %^RESET%^%^GREEN%^wild places%^BOLD%^%^GREEN%^to see if you can dig up any %^BOLD%^%^BLACK%^dirt%^GREEN%^ on "; break; case BEAST_SOURCE: message = "%^MAGENTA%^You set your ear to the %^RESET%^%^ORANGE%^rumour mill %^MAGENTA%^%^to see if you can dig up any %^BOLD%^%^BLACK%^dirt%^RESET%^%^MAGENTA%^ on "; break; default: tell_object(ob, "This is not a place where you'll have much luck picking up rumours. Try in urban areas, or in the wilds"); return; break; } tell_object(ob, message + capitalize(name)); if (ob->query_property("tracing")) ob->remove_property("tracing"); if (ob->query_property("investigating")) ob->remove_property("investigating"); if (ob->query_property("gossipping")) ob->remove_property("gossipping"); ob->set_property("investigating",lower_case(name)); ob->set_property("gossipping", s_type); } void trace_rumour(string name, int num, object ob){ int s_type, * nums, rumour_num, rum_type; object room; string message, * subjects; mixed * heard_rum, rum; mapping my_heard_rumours, rumours_about_subject; if (!objectp(ob)) return; room = environment(ob); my_heard_rumours = RUMOURS_D->query_heard_rumours(ob->query_true_name()); if (!mapp(my_heard_rumours) ||sizeof(my_heard_rumours)<1) { tell_object(ob, "You have not heard any rumours yet, so there's nothing for you to try to trace the source of"); return; } subjects = keys(my_heard_rumours); if (member_array(name, subjects) == -1) { tell_object(ob, "You have not heard any rumours about " + capitalize(name) + ", so there's nothing for you to try to trace the source of"); return; } rumours_about_subject = my_heard_rumours[name]; if (!mapp(rumours_about_subject) ||sizeof(rumours_about_subject)<1) { tell_object(ob, "You have not heard any rumours about " + capitalize(name) + ", so there's nothing for you to try to trace the source of"); return; } nums = keys(rumours_about_subject); if (member_array(num, nums) ==-1) { tell_object(ob, "You have not heard a rumour number " + num + " about " + capitalize(name) + ", so there's nothing for you to try to trace the source of"); return; } if (!objectp(room)) { tell_object(ob, "Error with the gossip command - you are not in a valid room. Please make a bug report"); return; } s_type = RUMOURS_D->check_room_source_type(room); heard_rum = rumours_about_subject[num]; rum = RUMOURS_D->query_rumour(name, heard_rum[1]); if (sizeof(rum)<1) { tell_object(ob, "Sorry, there was an error finding the rumour that you are trying to trace. Please make a bug report"); return; } rum_type = rum[SOURCE_TYPE]; if (rum_type != s_type) { report_wrong_type(s_type, rum_type, ob); return; } switch(s_type) { case CITY_SOURCE: message = "%^BOLD%^%^WHITE%^You set about the task of asking around to try to trace the source of the rumour "; break; case WILD_SOURCE: message = "%^BOLD%^%^GREEN%^You use your connections in the wild to try to track the rumour back to its source "; break; case BEAST_SOURCE: message = "%^MAGENTA%^You set about the task of tracing the source of the rumour "; break; default: tell_object(ob, "This is not a place where you'll have much luck picking up rumours. Try in urban areas, or in the wilds"); return; break; } tell_object(ob, message); if (ob->query_property("tracing")) ob->remove_property("tracing"); if (ob->query_property("investigating")) ob->remove_property("investigating"); if (ob->query_property("gossipping")) ob->remove_property("gossipping"); ob->set_property("tracing",lower_case(name) + "_" + num); ob->set_property("gossipping", s_type); } report_wrong_type(int s_type, int rum_type, ob){ switch(rum_type) { case CITY_SOURCE: tell_object(ob, "%^BOLD%^%^WHITE%^This rumour seems to have originated among the %^RESET%^city folk%^BOLD%^%^WHITE%^ of humans and their allies. If you want to trace its source, you'll have to check there"); break; case BEAST_SOURCE: tell_object(ob, "%^BOLD%^%^WHITE%^This rumour seems to have originated among the %^RESET%^%^MAGENTA%^beast races%^BOLD%^%^WHITE%^ of the realms. To trace its source, you'll probably have to check in one of their cities"); break; case WILD_SOURCE: tell_object(ob, "%^GREEN%^This rumour seems to have originated among the %^BOLD%^%^GREEN%^wild folk%^RESET%^%^GREEN%^ of the realms. To trace its source, you'll probably have to check in one of their cities"); break; } }
178149.c
/* * PgBouncer - Lightweight connection pooler for PostgreSQL. * * Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Admin console commands. */ #include "bouncer.h" #include <usual/regex.h> #include <usual/netdb.h> #include <usual/endian.h> /* regex elements */ #define WS0 "[ \t\n\r]*" #define WS1 "[ \t\n\r]+" #define WORD "(\"([^\"]+|\"\")*\"|[0-9a-z_]+)" #define STRING "('([^']|'')*')" /* possible max + 1 */ #define MAX_GROUPS 10 /* group numbers */ #define CMD_NAME 1 #define CMD_ARG 4 #define SET_KEY 1 #define SET_VAL 4 typedef bool (*cmd_func_t)(PgSocket *admin, const char *arg); struct cmd_lookup { const char *word; cmd_func_t func; }; /* CMD [arg]; */ static const char cmd_normal_rx[] = "^" WS0 WORD "(" WS1 WORD ")?" WS0 "(;" WS0 ")?$"; /* SET with simple value */ static const char cmd_set_word_rx[] = "^" WS0 "set" WS1 WORD WS0 "(=|to)" WS0 WORD WS0 "(;" WS0 ")?$"; /* SET with quoted value */ static const char cmd_set_str_rx[] = "^" WS0 "set" WS1 WORD WS0 "(=|to)" WS0 STRING WS0 "(;" WS0 ")?$"; /* compiled regexes */ static regex_t rc_cmd; static regex_t rc_set_word; static regex_t rc_set_str; static PgPool *admin_pool; /* only valid during processing */ static const char *current_query; void admin_cleanup(void) { regfree(&rc_cmd); regfree(&rc_set_str); regfree(&rc_set_word); admin_pool = NULL; } static bool syntax_error(PgSocket *admin) { return admin_error(admin, "invalid command '%s', use SHOW HELP;", current_query ? current_query : "<no query>"); } static bool exec_cmd(struct cmd_lookup *lookup, PgSocket *admin, const char *cmd, const char *arg) { for (; lookup->word; lookup++) { if (strcasecmp(lookup->word, cmd) == 0) return lookup->func(admin, arg); } return syntax_error(admin); } bool admin_error(PgSocket *admin, const char *fmt, ...) { char str[1024]; va_list ap; bool res = true; va_start(ap, fmt); vsnprintf(str, sizeof(str), fmt, ap); va_end(ap); log_error("%s", str); if (admin) res = send_pooler_error(admin, true, str); return res; } static int count_paused_databases(void) { struct List *item; PgDatabase *db; int cnt = 0; statlist_for_each(item, &database_list) { db = container_of(item, PgDatabase, head); cnt += db->db_paused; } return cnt; } static int count_db_active(PgDatabase *db) { struct List *item; PgPool *pool; int cnt = 0; statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); if (pool->db != db) continue; cnt += pool_server_count(pool); } return cnt; } bool admin_flush(PgSocket *admin, PktBuf *buf, const char *desc) { pktbuf_write_CommandComplete(buf, desc); pktbuf_write_ReadyForQuery(buf); return pktbuf_send_queued(buf, admin); } bool admin_ready(PgSocket *admin, const char *desc) { PktBuf buf; uint8_t tmp[512]; pktbuf_static(&buf, tmp, sizeof(tmp)); pktbuf_write_CommandComplete(&buf, desc); pktbuf_write_ReadyForQuery(&buf); return pktbuf_send_immediate(&buf, admin); } /* * some silly clients start actively messing with server parameters * without checking if thats necessary. Fake some env for them. */ struct FakeParam { const char *name; const char *value; }; static const struct FakeParam fake_param_list[] = { { "client_encoding", "UTF-8" }, { "default_transaction_isolation", "read committed" }, { "standard_conforming_strings", "on" }, { "datestyle", "ISO" }, { "timezone", "GMT" }, { NULL }, }; /* fake result send, returns if handled */ static bool fake_show(PgSocket *admin, const char *name) { PktBuf *buf; const struct FakeParam *p; bool got = false; for (p = fake_param_list; p->name; p++) { if (strcasecmp(name, p->name) == 0) { got = true; break; } } if (got) { buf = pktbuf_dynamic(256); if (buf) { pktbuf_write_RowDescription(buf, "s", p->name); pktbuf_write_DataRow(buf, "s", p->value); admin_flush(admin, buf, "SHOW"); } else admin_error(admin, "no mem"); } return got; } static bool fake_set(PgSocket *admin, const char *key, const char *val) { PktBuf *buf; const struct FakeParam *p; bool got = false; for (p = fake_param_list; p->name; p++) { if (strcasecmp(key, p->name) == 0) { got = true; break; } } if (got) { buf = pktbuf_dynamic(256); if (buf) { pktbuf_write_Notice(buf, "SET ignored"); admin_flush(admin, buf, "SET"); } else admin_error(admin, "no mem"); } return got; } /* Command: SET key = val; */ static bool admin_set(PgSocket *admin, const char *key, const char *val) { char tmp[512]; bool ok; if (fake_set(admin, key, val)) return true; if (admin->admin_user) { ok = set_config_param(key, val); if (ok) { snprintf(tmp, sizeof(tmp), "SET %s=%s", key, val); return admin_ready(admin, tmp); } else { return admin_error(admin, "SET failed"); } } else return admin_error(admin, "admin access needed"); } /* send a row with sendmsg, optionally attaching a fd */ static bool send_one_fd(PgSocket *admin, int fd, const char *task, const char *user, const char *db, const char *addr, int port, uint64_t ckey, int link, const char *client_enc, const char *std_strings, const char *datestyle, const char *timezone, const char *password) { struct msghdr msg; struct cmsghdr *cmsg; struct iovec iovec; int res; uint8_t cntbuf[CMSG_SPACE(sizeof(int))]; struct PktBuf *pkt = pktbuf_temp(); pktbuf_write_DataRow(pkt, "issssiqisssss", fd, task, user, db, addr, port, ckey, link, client_enc, std_strings, datestyle, timezone, password); if (pkt->failed) return false; iovec.iov_base = pkt->buf; iovec.iov_len = pktbuf_written(pkt); /* sending fds */ memset(&msg, 0, sizeof(msg)); msg.msg_iov = &iovec; msg.msg_iovlen = 1; /* attach a fd */ if (pga_is_unix(&admin->remote_addr) && admin->own_user && !admin->sbuf.tls) { msg.msg_control = cntbuf; msg.msg_controllen = sizeof(cntbuf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(int)); memcpy(CMSG_DATA(cmsg), &fd, sizeof(int)); msg.msg_controllen = cmsg->cmsg_len; } slog_debug(admin, "sending socket list: fd=%d, len=%d", fd, (int)msg.msg_controllen); if (msg.msg_controllen) { res = safe_sendmsg(sbuf_socket(&admin->sbuf), &msg, 0); } else { res = sbuf_op_send(&admin->sbuf, pkt->buf, pktbuf_written(pkt)); } if (res < 0) { log_error("send_one_fd: sendmsg error: %s", strerror(errno)); return false; } else if ((size_t)res != iovec.iov_len) { log_error("send_one_fd: partial sendmsg"); return false; } return true; } /* send a row with sendmsg, optionally attaching a fd */ static bool show_one_fd(PgSocket *admin, PgSocket *sk) { PgAddr *addr = &sk->remote_addr; struct MBuf tmp; VarCache *v = &sk->vars; uint64_t ckey; const struct PStr *client_encoding = v->var_list[VClientEncoding]; const struct PStr *std_strings = v->var_list[VStdStr]; const struct PStr *datestyle = v->var_list[VDateStyle]; const struct PStr *timezone = v->var_list[VTimeZone]; char addrbuf[PGADDR_BUF]; const char *password = NULL; /* Skip TLS sockets */ if (sk->sbuf.tls || (sk->link && sk->link->sbuf.tls)) return true; mbuf_init_fixed_reader(&tmp, sk->cancel_key, 8); if (!mbuf_get_uint64be(&tmp, &ckey)) return false; if (sk->pool && sk->pool->db->auth_user && sk->auth_user && !find_user(sk->auth_user->name)) password = sk->auth_user->passwd; /* PAM requires passwords as well since they are not stored externally */ if (cf_auth_type == AUTH_PAM && !find_user(sk->auth_user->name)) password = sk->auth_user->passwd; return send_one_fd(admin, sbuf_socket(&sk->sbuf), is_server_socket(sk) ? "server" : "client", sk->auth_user ? sk->auth_user->name : NULL, sk->pool ? sk->pool->db->name : NULL, pga_ntop(addr, addrbuf, sizeof(addrbuf)), pga_port(addr), ckey, sk->link ? sbuf_socket(&sk->link->sbuf) : 0, client_encoding ? client_encoding->str : NULL, std_strings ? std_strings->str : NULL, datestyle ? datestyle->str : NULL, timezone ? timezone->str : NULL, password); } static bool show_pooler_cb(void *arg, int fd, const PgAddr *a) { char buf[PGADDR_BUF]; return send_one_fd(arg, fd, "pooler", NULL, NULL, pga_ntop(a, buf, sizeof(buf)), pga_port(a), 0, 0, NULL, NULL, NULL, NULL, NULL); } /* send a row with sendmsg, optionally attaching a fd */ static bool show_pooler_fds(PgSocket *admin) { return for_each_pooler_fd(show_pooler_cb, admin); } static bool show_fds_from_list(PgSocket *admin, struct StatList *list) { struct List *item; PgSocket *sk; bool res = true; statlist_for_each(item, list) { sk = container_of(item, PgSocket, head); res = show_one_fd(admin, sk); if (!res) break; } return res; } static PgDatabase *find_or_register_database(PgSocket *admin, const char *name) { PgDatabase *db = find_database(name); if (db == NULL) { db = register_auto_database(name); if (db != NULL) { slog_info(admin, "registered new auto-database: %s", name); } } return db; } /* * Command: SHOW FDS * * If privileged connection, send also actual fds */ static bool admin_show_fds(PgSocket *admin, const char *arg) { struct List *item; PgPool *pool; bool res; /* * Dangerous to show to everybody: * - can lock pooler as code flips async option * - show cancel keys for all users * - shows passwords (md5) for dynamic users */ if (!admin->admin_user) return admin_error(admin, "admin access needed"); /* * It's very hard to send it reliably over in async manner, * so turn async off for this resultset. */ socket_set_nonblocking(sbuf_socket(&admin->sbuf), 0); /* * send resultset */ SEND_RowDescription(res, admin, "issssiqisssss", "fd", "task", "user", "database", "addr", "port", "cancel", "link", "client_encoding", "std_strings", "datestyle", "timezone", "password"); if (res) res = show_pooler_fds(admin); if (res) res = show_fds_from_list(admin, &login_client_list); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); if (pool->db->admin) continue; res = res && show_fds_from_list(admin, &pool->active_client_list); res = res && show_fds_from_list(admin, &pool->waiting_client_list); res = res && show_fds_from_list(admin, &pool->active_server_list); res = res && show_fds_from_list(admin, &pool->idle_server_list); res = res && show_fds_from_list(admin, &pool->used_server_list); res = res && show_fds_from_list(admin, &pool->tested_server_list); res = res && show_fds_from_list(admin, &pool->new_server_list); if (!res) break; } if (res) res = admin_ready(admin, "SHOW"); /* turn async back on */ socket_set_nonblocking(sbuf_socket(&admin->sbuf), 1); return res; } /* Command: SHOW DATABASES */ static bool admin_show_databases(PgSocket *admin, const char *arg) { PgDatabase *db; struct List *item; const char *f_user; PktBuf *buf; struct CfValue cv; const char *pool_mode_str; cv.extra = pool_mode_map; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "ssissiisiiii", "name", "host", "port", "database", "force_user", "pool_size", "reserve_pool", "pool_mode", "max_connections", "current_connections", "paused", "disabled"); statlist_for_each(item, &database_list) { db = container_of(item, PgDatabase, head); f_user = db->forced_user ? db->forced_user->name : NULL; pool_mode_str = NULL; cv.value_p = &db->pool_mode; if (db->pool_mode != POOL_INHERIT) pool_mode_str = cf_get_lookup(&cv); pktbuf_write_DataRow(buf, "ssissiisiiii", db->name, db->host, db->port, db->dbname, f_user, db->pool_size, db->res_pool_size, pool_mode_str, database_max_connections(db), db->connection_count, db->db_paused, db->db_disabled); } admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW LISTS */ static bool admin_show_lists(PgSocket *admin, const char *arg) { PktBuf *buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "si", "list", "items"); #define SENDLIST(name, size) pktbuf_write_DataRow(buf, "si", (name), (size)) SENDLIST("databases", statlist_count(&database_list)); SENDLIST("users", statlist_count(&user_list)); SENDLIST("pools", statlist_count(&pool_list)); SENDLIST("free_clients", slab_free_count(client_cache)); SENDLIST("used_clients", slab_active_count(client_cache)); SENDLIST("login_clients", statlist_count(&login_client_list)); SENDLIST("free_servers", slab_free_count(server_cache)); SENDLIST("used_servers", slab_active_count(server_cache)); { int names, zones, qry, pend; adns_info(adns, &names, &zones, &qry, &pend); SENDLIST("dns_names", names); SENDLIST("dns_zones", zones); SENDLIST("dns_queries", qry); SENDLIST("dns_pending", pend); } admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW USERS */ static bool admin_show_users(PgSocket *admin, const char *arg) { PgUser *user; struct List *item; PktBuf *buf = pktbuf_dynamic(256); struct CfValue cv; const char *pool_mode_str; if (!buf) { admin_error(admin, "no mem"); return true; } cv.extra = pool_mode_map; pktbuf_write_RowDescription(buf, "ss", "name", "pool_mode"); statlist_for_each(item, &user_list) { user = container_of(item, PgUser, head); pool_mode_str = NULL; cv.value_p = &user->pool_mode; if (user->pool_mode != POOL_INHERIT) pool_mode_str = cf_get_lookup(&cv); pktbuf_write_DataRow(buf, "ss", user->name, pool_mode_str); } admin_flush(admin, buf, "SHOW"); return true; } #define SKF_STD "sssssisiTTiiissis" #define SKF_DBG "sssssisiTTiiissisiiiiiii" static void socket_header(PktBuf *buf, bool debug) { pktbuf_write_RowDescription(buf, debug ? SKF_DBG : SKF_STD, "type", "user", "database", "state", "addr", "port", "local_addr", "local_port", "connect_time", "request_time", "wait", "wait_us", "close_needed", "ptr", "link", "remote_pid", "tls", /* debug follows */ "recv_pos", "pkt_pos", "pkt_remain", "send_pos", "send_remain", "pkt_avail", "send_avail"); } static void adr2txt(const PgAddr *adr, char *dst, unsigned dstlen) { pga_ntop(adr, dst, dstlen); } static void socket_row(PktBuf *buf, PgSocket *sk, const char *state, bool debug) { int pkt_avail = 0, send_avail = 0; int remote_pid; char ptrbuf[128], linkbuf[128]; char l_addr[PGADDR_BUF], r_addr[PGADDR_BUF]; IOBuf *io = sk->sbuf.io; char infobuf[96] = ""; usec_t now = get_cached_time(); usec_t wait_time = sk->query_start ? now - sk->query_start : 0; if (io) { pkt_avail = iobuf_amount_parse(sk->sbuf.io); send_avail = iobuf_amount_pending(sk->sbuf.io); } adr2txt(&sk->remote_addr, r_addr, sizeof(r_addr)); adr2txt(&sk->local_addr, l_addr, sizeof(l_addr)); snprintf(ptrbuf, sizeof(ptrbuf), "%p", sk); if (sk->link) snprintf(linkbuf, sizeof(linkbuf), "%p", sk->link); else linkbuf[0] = 0; /* get pid over unix socket */ if (pga_is_unix(&sk->remote_addr)) remote_pid = sk->remote_addr.scred.pid; else remote_pid = 0; /* if that failed, get it from cancel key */ if (is_server_socket(sk) && remote_pid == 0) remote_pid = be32dec(sk->cancel_key); if (sk->sbuf.tls) tls_get_connection_info(sk->sbuf.tls, infobuf, sizeof infobuf); pktbuf_write_DataRow(buf, debug ? SKF_DBG : SKF_STD, is_server_socket(sk) ? "S" :"C", sk->auth_user ? sk->auth_user->name : "(nouser)", sk->pool ? sk->pool->db->name : "(nodb)", state, r_addr, pga_port(&sk->remote_addr), l_addr, pga_port(&sk->local_addr), sk->connect_time, sk->request_time, (int)(wait_time / USEC), (int)(wait_time % USEC), sk->close_needed, ptrbuf, linkbuf, remote_pid, infobuf, /* debug */ io ? io->recv_pos : 0, io ? io->parse_pos : 0, sk->sbuf.pkt_remain, io ? io->done_pos : 0, 0, pkt_avail, send_avail); } /* Helper for SHOW CLIENTS/SERVERS/SOCKETS */ static void show_socket_list(PktBuf *buf, struct StatList *list, const char *state, bool debug) { struct List *item; PgSocket *sk; statlist_for_each(item, list) { sk = container_of(item, PgSocket, head); socket_row(buf, sk, state, debug); } } /* Command: SHOW CLIENTS */ static bool admin_show_clients(PgSocket *admin, const char *arg) { struct List *item; PgPool *pool; PktBuf *buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } socket_header(buf, false); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); show_socket_list(buf, &pool->active_client_list, "active", false); show_socket_list(buf, &pool->waiting_client_list, "waiting", false); } admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW SERVERS */ static bool admin_show_servers(PgSocket *admin, const char *arg) { struct List *item; PgPool *pool; PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } socket_header(buf, false); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); show_socket_list(buf, &pool->active_server_list, "active", false); show_socket_list(buf, &pool->idle_server_list, "idle", false); show_socket_list(buf, &pool->used_server_list, "used", false); show_socket_list(buf, &pool->tested_server_list, "tested", false); show_socket_list(buf, &pool->new_server_list, "new", false); } admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW SOCKETS */ static bool admin_show_sockets(PgSocket *admin, const char *arg) { struct List *item; PgPool *pool; PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } socket_header(buf, true); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); show_socket_list(buf, &pool->active_client_list, "cl_active", true); show_socket_list(buf, &pool->waiting_client_list, "cl_waiting", true); show_socket_list(buf, &pool->active_server_list, "sv_active", true); show_socket_list(buf, &pool->idle_server_list, "sv_idle", true); show_socket_list(buf, &pool->used_server_list, "sv_used", true); show_socket_list(buf, &pool->tested_server_list, "sv_tested", true); show_socket_list(buf, &pool->new_server_list, "sv_login", true); } show_socket_list(buf, &login_client_list, "cl_login", true); admin_flush(admin, buf, "SHOW"); return true; } static void show_active_socket_list(PktBuf *buf, struct StatList *list, const char *state) { struct List *item; statlist_for_each(item, list) { PgSocket *sk = container_of(item, PgSocket, head); if (!sbuf_is_empty(&sk->sbuf)) socket_row(buf, sk, state, true); } } /* Command: SHOW ACTIVE_SOCKETS */ static bool admin_show_active_sockets(PgSocket *admin, const char *arg) { struct List *item; PgPool *pool; PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } socket_header(buf, true); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); show_active_socket_list(buf, &pool->active_client_list, "cl_active"); show_active_socket_list(buf, &pool->waiting_client_list, "cl_waiting"); show_active_socket_list(buf, &pool->active_server_list, "sv_active"); show_active_socket_list(buf, &pool->idle_server_list, "sv_idle"); show_active_socket_list(buf, &pool->used_server_list, "sv_used"); show_active_socket_list(buf, &pool->tested_server_list, "sv_tested"); show_active_socket_list(buf, &pool->new_server_list, "sv_login"); } show_active_socket_list(buf, &login_client_list, "cl_login"); admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW POOLS */ static bool admin_show_pools(PgSocket *admin, const char *arg) { struct List *item; PgPool *pool; PktBuf *buf; PgSocket *waiter; usec_t now = get_cached_time(); usec_t max_wait; struct CfValue cv; int pool_mode; cv.extra = pool_mode_map; cv.value_p = &pool_mode; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "ssiiiiiiiiis", "database", "user", "cl_active", "cl_waiting", "sv_active", "sv_idle", "sv_used", "sv_tested", "sv_login", "maxwait", "maxwait_us", "pool_mode"); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); waiter = first_socket(&pool->waiting_client_list); max_wait = (waiter && waiter->query_start) ? now - waiter->query_start : 0; pool_mode = pool_pool_mode(pool); pktbuf_write_DataRow(buf, "ssiiiiiiiiis", pool->db->name, pool->user->name, statlist_count(&pool->active_client_list), statlist_count(&pool->waiting_client_list), statlist_count(&pool->active_server_list), statlist_count(&pool->idle_server_list), statlist_count(&pool->used_server_list), statlist_count(&pool->tested_server_list), statlist_count(&pool->new_server_list), /* how long is the oldest client waited */ (int)(max_wait / USEC), (int)(max_wait % USEC), cf_get_lookup(&cv)); } admin_flush(admin, buf, "SHOW"); return true; } static void slab_stat_cb(void *arg, const char *slab_name, unsigned size, unsigned free, unsigned total) { PktBuf *buf = arg; unsigned alloc = total * size; pktbuf_write_DataRow(buf, "siiii", slab_name, size, total - free, free, alloc); } /* Command: SHOW MEM */ static bool admin_show_mem(PgSocket *admin, const char *arg) { PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "siiii", "name", "size", "used", "free", "memtotal"); slab_stats(slab_stat_cb, buf); admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW DNS_HOSTS */ static void dns_name_cb(void *arg, const char *name, const struct addrinfo *ai, usec_t ttl) { PktBuf *buf = arg; char *s, *end; char adrs[1024]; usec_t now = get_cached_time(); end = adrs + sizeof(adrs) - 2; for (s = adrs; ai && s < end; ai = ai->ai_next) { if (s != adrs) *s++ = ','; sa2str(ai->ai_addr, s, end - s); s += strlen(s); } *s = 0; /* * Ttl can be smaller than now if we are waiting for dns reply for long. * * It's better to show 0 in that case as otherwise it confuses users into * thinking that there is large ttl for the name. */ pktbuf_write_DataRow(buf, "sqs", name, ttl < now ? 0 : (ttl - now) / USEC, adrs); } static bool admin_show_dns_hosts(PgSocket *admin, const char *arg) { PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "sqs", "hostname", "ttl", "addrs"); adns_walk_names(adns, dns_name_cb, buf); admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW DNS_ZONES */ static void dns_zone_cb(void *arg, const char *name, uint32_t serial, int nhosts) { PktBuf *buf = arg; pktbuf_write_DataRow(buf, "sqi", name, (uint64_t)serial, nhosts); } static bool admin_show_dns_zones(PgSocket *admin, const char *arg) { PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "sqi", "zonename", "serial", "count"); adns_walk_zones(adns, dns_zone_cb, buf); admin_flush(admin, buf, "SHOW"); return true; } /* Command: SHOW CONFIG */ static void show_one_param(void *arg, const char *name, const char *val, bool reloadable) { PktBuf *buf = arg; pktbuf_write_DataRow(buf, "sss", name, val, reloadable ? "yes" : "no"); } static bool admin_show_config(PgSocket *admin, const char *arg) { PktBuf *buf; buf = pktbuf_dynamic(256); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "sss", "key", "value", "changeable"); config_for_each(show_one_param, buf); admin_flush(admin, buf, "SHOW"); return true; } /* Command: RELOAD */ static bool admin_cmd_reload(PgSocket *admin, const char *arg) { if (arg && *arg) return syntax_error(admin); if (!admin->admin_user) return admin_error(admin, "admin access needed"); log_info("RELOAD command issued"); load_config(); return admin_ready(admin, "RELOAD"); } /* Command: SHUTDOWN */ static bool admin_cmd_shutdown(PgSocket *admin, const char *arg) { if (arg && *arg) return syntax_error(admin); if (!admin->admin_user) return admin_error(admin, "admin access needed"); /* * note: new pooler expects unix socket file gone when it gets * event from fd. Currently atexit() cleanup should be called * before closing open sockets. */ log_info("SHUTDOWN command issued"); cf_shutdown = 2; event_loopbreak(); return true; } static void full_resume(void) { int tmp_mode = cf_pause_mode; cf_pause_mode = P_NONE; if (tmp_mode == P_SUSPEND) resume_all(); /* avoid surprise later if cf_shutdown stays set */ if (cf_shutdown) { log_info("canceling shutdown"); cf_shutdown = 0; } } /* Command: RESUME */ static bool admin_cmd_resume(PgSocket *admin, const char *arg) { if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (!arg[0]) { log_info("RESUME command issued"); if (cf_pause_mode != P_NONE) full_resume(); else return admin_error(admin, "pooler is not paused/suspended"); } else { PgDatabase *db = find_database(arg); log_info("RESUME '%s' command issued", arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (!db->db_paused) return admin_error(admin, "database %s is not paused", arg); db->db_paused = 0; } return admin_ready(admin, "RESUME"); } /* Command: SUSPEND */ static bool admin_cmd_suspend(PgSocket *admin, const char *arg) { if (arg && *arg) return syntax_error(admin); if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (cf_pause_mode) return admin_error(admin, "already suspended/paused"); /* suspend needs to be able to flush buffers */ if (count_paused_databases() > 0) return admin_error(admin, "cannot suspend with paused databases"); log_info("SUSPEND command issued"); cf_pause_mode = P_SUSPEND; admin->wait_for_response = 1; suspend_pooler(); g_suspend_start = get_cached_time(); return true; } /* Command: PAUSE */ static bool admin_cmd_pause(PgSocket *admin, const char *arg) { if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (cf_pause_mode) return admin_error(admin, "already suspended/paused"); if (!arg[0]) { log_info("PAUSE command issued"); cf_pause_mode = P_PAUSE; admin->wait_for_response = 1; } else { PgDatabase *db; log_info("PAUSE '%s' command issued", arg); db = find_or_register_database(admin, arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (db == admin->pool->db) return admin_error(admin, "cannot pause admin db: %s", arg); db->db_paused = 1; if (count_db_active(db) > 0) admin->wait_for_response = 1; else return admin_ready(admin, "PAUSE"); } return true; } /* Command: RECONNECT */ static bool admin_cmd_reconnect(PgSocket *admin, const char *arg) { if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (!arg[0]) { struct List *item; PgPool *pool; log_info("RECONNECT command issued"); statlist_for_each(item, &pool_list) { pool = container_of(item, PgPool, head); if (pool->db->admin) continue; tag_database_dirty(pool->db); } } else { PgDatabase *db; log_info("RECONNECT '%s' command issued", arg); db = find_or_register_database(admin, arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (db == admin->pool->db) return admin_error(admin, "cannot reconnect admin db: %s", arg); tag_database_dirty(db); } return admin_ready(admin, "RECONNECT"); } /* Command: DISABLE */ static bool admin_cmd_disable(PgSocket *admin, const char *arg) { PgDatabase *db; if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (!arg[0]) return admin_error(admin, "a database is required"); log_info("DISABLE '%s' command issued", arg); db = find_or_register_database(admin, arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (db->admin) return admin_error(admin, "cannot disable admin db: %s", arg); db->db_disabled = 1; return admin_ready(admin, "DISABLE"); } /* Command: ENABLE */ static bool admin_cmd_enable(PgSocket *admin, const char *arg) { PgDatabase *db; if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (!arg[0]) return admin_error(admin, "a database is required"); log_info("ENABLE '%s' command issued", arg); db = find_database(arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (db->admin) return admin_error(admin, "cannot disable admin db: %s", arg); db->db_disabled = 0; return admin_ready(admin, "ENABLE"); } /* Command: KILL */ static bool admin_cmd_kill(PgSocket *admin, const char *arg) { struct List *item, *tmp; PgDatabase *db; PgPool *pool; if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (cf_pause_mode) return admin_error(admin, "already suspended/paused"); if (!arg[0]) return admin_error(admin, "a database is required"); log_info("KILL '%s' command issued", arg); db = find_or_register_database(admin, arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (db == admin->pool->db) return admin_error(admin, "cannot kill admin db: %s", arg); db->db_paused = 1; statlist_for_each_safe(item, &pool_list, tmp) { pool = container_of(item, PgPool, head); if (pool->db == db) kill_pool(pool); } return admin_ready(admin, "KILL"); } /* Command: WAIT_CLOSE */ static bool admin_cmd_wait_close(PgSocket *admin, const char *arg) { if (!admin->admin_user) return admin_error(admin, "admin access needed"); if (!arg[0]) { struct List *item; PgPool *pool; int active = 0; log_info("WAIT_CLOSE command issued"); statlist_for_each(item, &pool_list) { PgDatabase *db; pool = container_of(item, PgPool, head); db = pool->db; db->db_wait_close = 1; active += count_db_active(db); } if (active > 0) admin->wait_for_response = 1; else return admin_ready(admin, "WAIT_CLOSE"); } else { PgDatabase *db; log_info("WAIT_CLOSE '%s' command issued", arg); db = find_or_register_database(admin, arg); if (db == NULL) return admin_error(admin, "no such database: %s", arg); if (db == admin->pool->db) return admin_error(admin, "cannot wait in admin db: %s", arg); db->db_wait_close = 1; if (count_db_active(db) > 0) admin->wait_for_response = 1; else return admin_ready(admin, "WAIT_CLOSE"); } return true; } /* extract substring from regex group */ static bool copy_arg(const char *src, regmatch_t *glist, int gnum, char *dst, unsigned dstmax, char qchar) { regmatch_t *g = &glist[gnum]; unsigned len; const char *s; char *d = dst; unsigned i; /* no match, if regex allows, it must be fine */ if (g->rm_so < 0 || g->rm_eo < 0) { dst[0] = 0; return true; } len = g->rm_eo - g->rm_so; s = src + g->rm_so; /* too big value */ if (len >= dstmax) { dst[0] = 0; return false; } /* copy and unquote */ if (*s == qchar) { for (i = 1; i < len - 1; i++) { if (s[i] == qchar && s[i+1] == qchar) i++; *d++ = s[i]; } len = d - dst; } else { memcpy(dst, s, len); } dst[len] = 0; return true; } static bool admin_show_help(PgSocket *admin, const char *arg) { bool res; SEND_generic(res, admin, 'N', "sssss", "SNOTICE", "C00000", "MConsole usage", "D\n\tSHOW HELP|CONFIG|DATABASES" "|POOLS|CLIENTS|SERVERS|USERS|VERSION\n" "\tSHOW FDS|SOCKETS|ACTIVE_SOCKETS|LISTS|MEM\n" "\tSHOW DNS_HOSTS|DNS_ZONES\n" "\tSHOW STATS|STATS_TOTALS|STATS_AVERAGES|TOTALS\n" "\tSET key = arg\n" "\tRELOAD\n" "\tPAUSE [<db>]\n" "\tRESUME [<db>]\n" "\tDISABLE <db>\n" "\tENABLE <db>\n" "\tRECONNECT [<db>]\n" "\tKILL <db>\n" "\tSUSPEND\n" "\tSHUTDOWN\n", "\tWAIT_CLOSE [<db>]", ""); if (res) res = admin_ready(admin, "SHOW"); return res; } static bool admin_show_version(PgSocket *admin, const char *arg) { PktBuf *buf; buf = pktbuf_dynamic(128); if (!buf) { admin_error(admin, "no mem"); return true; } pktbuf_write_RowDescription(buf, "s", "version"); pktbuf_write_DataRow(buf, "s", PACKAGE_STRING); admin_flush(admin, buf, "SHOW"); return true; } static bool admin_show_stats(PgSocket *admin, const char *arg) { return admin_database_stats(admin, &pool_list); } static bool admin_show_stats_totals(PgSocket *admin, const char *arg) { return admin_database_stats_totals(admin, &pool_list); } static bool admin_show_stats_averages(PgSocket *admin, const char *arg) { return admin_database_stats_averages(admin, &pool_list); } static bool admin_show_totals(PgSocket *admin, const char *arg) { return show_stat_totals(admin, &pool_list); } static struct cmd_lookup show_map [] = { {"clients", admin_show_clients}, {"config", admin_show_config}, {"databases", admin_show_databases}, {"fds", admin_show_fds}, {"help", admin_show_help}, {"lists", admin_show_lists}, {"pools", admin_show_pools}, {"servers", admin_show_servers}, {"sockets", admin_show_sockets}, {"active_sockets", admin_show_active_sockets}, {"stats", admin_show_stats}, {"stats_totals", admin_show_stats_totals}, {"stats_averages", admin_show_stats_averages}, {"users", admin_show_users}, {"version", admin_show_version}, {"totals", admin_show_totals}, {"mem", admin_show_mem}, {"dns_hosts", admin_show_dns_hosts}, {"dns_zones", admin_show_dns_zones}, {NULL, NULL} }; static bool admin_cmd_show(PgSocket *admin, const char *arg) { if (fake_show(admin, arg)) return true; return exec_cmd(show_map, admin, arg, NULL); } static struct cmd_lookup cmd_list [] = { {"disable", admin_cmd_disable}, {"enable", admin_cmd_enable}, {"kill", admin_cmd_kill}, {"pause", admin_cmd_pause}, {"reconnect", admin_cmd_reconnect}, {"reload", admin_cmd_reload}, {"resume", admin_cmd_resume}, {"select", admin_cmd_show}, {"show", admin_cmd_show}, {"shutdown", admin_cmd_shutdown}, {"suspend", admin_cmd_suspend}, {"wait_close", admin_cmd_wait_close}, {NULL, NULL} }; /* handle user query */ static bool admin_parse_query(PgSocket *admin, const char *q) { regmatch_t grp[MAX_GROUPS]; char cmd[16]; char arg[64]; char val[256]; bool res; bool ok; current_query = q; if (regexec(&rc_cmd, q, MAX_GROUPS, grp, 0) == 0) { ok = copy_arg(q, grp, CMD_NAME, cmd, sizeof(cmd), '"'); if (!ok) goto failed; ok = copy_arg(q, grp, CMD_ARG, arg, sizeof(arg), '"'); if (!ok) goto failed; res = exec_cmd(cmd_list, admin, cmd, arg); } else if (regexec(&rc_set_str, q, MAX_GROUPS, grp, 0) == 0) { ok = copy_arg(q, grp, SET_KEY, arg, sizeof(arg), '"'); if (!ok || !arg[0]) goto failed; ok = copy_arg(q, grp, SET_VAL, val, sizeof(val), '\''); if (!ok) goto failed; res = admin_set(admin, arg, val); } else if (regexec(&rc_set_word, q, MAX_GROUPS, grp, 0) == 0) { ok = copy_arg(q, grp, SET_KEY, arg, sizeof(arg), '"'); if (!ok || !arg[0]) goto failed; ok = copy_arg(q, grp, SET_VAL, val, sizeof(val), '"'); if (!ok) goto failed; res = admin_set(admin, arg, val); } else res = syntax_error(admin); done: current_query = NULL; if (!res) disconnect_client(admin, true, "failure"); return res; failed: res = admin_error(admin, "bad arguments"); goto done; } /* handle packets */ bool admin_handle_client(PgSocket *admin, PktHdr *pkt) { const char *q; bool res; /* don't tolerate partial packets */ if (incomplete_pkt(pkt)) { disconnect_client(admin, true, "incomplete pkt"); return false; } switch (pkt->type) { case 'Q': if (!mbuf_get_string(&pkt->data, &q)) { disconnect_client(admin, true, "incomplete query"); return false; } log_debug("got admin query: %s", q); res = admin_parse_query(admin, q); if (res) sbuf_prepare_skip(&admin->sbuf, pkt->len); return res; case 'X': disconnect_client(admin, false, "close req"); break; default: admin_error(admin, "unsupported pkt type: %d", pkt_desc(pkt)); disconnect_client(admin, true, "bad pkt"); break; } return false; } /** * Client is unauthenticated, look if it wants to connect * to special "pgbouncer" user. */ bool admin_pre_login(PgSocket *client, const char *username) { uid_t peer_uid = -1; gid_t peer_gid = -1; int res; client->admin_user = 0; client->own_user = 0; /* tag same uid as special */ if (pga_is_unix(&client->remote_addr)) { res = getpeereid(sbuf_socket(&client->sbuf), &peer_uid, &peer_gid); if (res >= 0 && peer_uid == getuid() && strcmp("pgbouncer", username) == 0) { client->auth_user = admin_pool->db->forced_user; client->own_user = 1; client->admin_user = 1; slog_info(client, "pgbouncer access from unix socket"); return true; } } /* * auth_type=any does not keep original username around, * so username based check has to take place here */ if (cf_auth_type == AUTH_ANY) { if (strlist_contains(cf_admin_users, username)) { client->auth_user = admin_pool->db->forced_user; client->admin_user = 1; return true; } else if (strlist_contains(cf_stats_users, username)) { client->auth_user = admin_pool->db->forced_user; return true; } } return false; } bool admin_post_login(PgSocket *client) { const char *username = client->auth_user->name; if (cf_auth_type == AUTH_ANY) return true; if (client->admin_user || strlist_contains(cf_admin_users, username)) { client->admin_user = 1; return true; } else if (strlist_contains(cf_stats_users, username)) { return true; } disconnect_client(client, true, "not allowed"); return false; } /* init special database and query parsing */ void admin_setup(void) { PgDatabase *db; PgPool *pool; PgUser *user; PktBuf *msg; int res; /* fake database */ db = add_database("pgbouncer"); if (!db) fatal("no memory for admin database"); db->port = cf_listen_port; db->pool_size = 2; db->admin = 1; db->pool_mode = POOL_STMT; if (!force_user(db, "pgbouncer", "")) fatal("no mem on startup - cannot alloc pgbouncer user"); /* fake pool */ pool = get_pool(db, db->forced_user); if (!pool) fatal("cannot create admin pool?"); admin_pool = pool; /* user */ user = find_user("pgbouncer"); if (!user) { /* fake user with disabled psw */ user = add_user("pgbouncer", ""); if (!user) fatal("cannot create admin user?"); } /* prepare welcome */ msg = pktbuf_dynamic(128); if (!msg) fatal("cannot create admin welcome"); pktbuf_write_AuthenticationOk(msg); pktbuf_write_ParameterStatus(msg, "server_version", PACKAGE_VERSION "/bouncer"); pktbuf_write_ParameterStatus(msg, "client_encoding", "UTF8"); pktbuf_write_ParameterStatus(msg, "server_encoding", "UTF8"); pktbuf_write_ParameterStatus(msg, "DateStyle", "ISO"); pktbuf_write_ParameterStatus(msg, "TimeZone", "GMT"); pktbuf_write_ParameterStatus(msg, "standard_conforming_strings", "on"); pktbuf_write_ParameterStatus(msg, "is_superuser", "on"); if (msg->failed) fatal("admin welcome failed"); pool->welcome_msg = msg; pool->welcome_msg_ready = 1; msg = pktbuf_dynamic(128); if (!msg) fatal("cannot create admin startup pkt"); db->startup_params = msg; pktbuf_put_string(msg, "database"); db->dbname = "pgbouncer"; pktbuf_put_string(msg, db->dbname); /* initialize regexes */ res = regcomp(&rc_cmd, cmd_normal_rx, REG_EXTENDED | REG_ICASE); if (res != 0) fatal("cmd regex compilation error"); res = regcomp(&rc_set_word, cmd_set_word_rx, REG_EXTENDED | REG_ICASE); if (res != 0) fatal("set/word regex compilation error"); res = regcomp(&rc_set_str, cmd_set_str_rx, REG_EXTENDED | REG_ICASE); if (res != 0) fatal("set/str regex compilation error"); } void admin_pause_done(void) { struct List *item, *tmp; PgSocket *admin; bool res; statlist_for_each_safe(item, &admin_pool->active_client_list, tmp) { admin = container_of(item, PgSocket, head); if (!admin->wait_for_response) continue; res = false; switch (cf_pause_mode) { case P_PAUSE: res = admin_ready(admin, "PAUSE"); break; case P_SUSPEND: res = admin_ready(admin, "SUSPEND"); break; default: if (count_paused_databases() > 0) res = admin_ready(admin, "PAUSE"); else /* FIXME */ fatal("admin_pause_done: bad state"); } if (!res) disconnect_client(admin, false, "dead admin"); else admin->wait_for_response = 0; } if (statlist_empty(&admin_pool->active_client_list) && cf_pause_mode == P_SUSPEND) { log_info("admin disappeared when suspended, doing RESUME"); cf_pause_mode = P_NONE; resume_all(); } } void admin_wait_close_done(void) { struct List *item, *tmp; PgSocket *admin; bool res; statlist_for_each_safe(item, &admin_pool->active_client_list, tmp) { admin = container_of(item, PgSocket, head); if (!admin->wait_for_response) continue; res = admin_ready(admin, "WAIT_CLOSE"); if (!res) disconnect_client(admin, false, "dead admin"); else admin->wait_for_response = 0; } } /* admin on console has pressed ^C */ void admin_handle_cancel(PgSocket *admin) { /* weird, but no reason to fail */ if (!admin->wait_for_response) slog_warning(admin, "admin cancel request for non-waiting client?"); if (cf_pause_mode != P_NONE) full_resume(); }
677642.c
#ifndef BUILD_LK #include <linux/string.h> #endif #include "lcm_drv.h" #ifdef BUILD_LK #include <platform/mt_gpio.h> #elif defined(BUILD_UBOOT) #include <asm/arch/mt_gpio.h> #else #include <mach/mt_gpio.h> #endif // --------------------------------------------------------------------------- // Local Constants // --------------------------------------------------------------------------- #define FRAME_WIDTH (540) #define FRAME_HEIGHT (960) #define LCM_ID (0x69) #define REGFLAG_DELAY 0xAB #define REGFLAG_END_OF_TABLE 0xAA // END OF REGISTERS MARKER #define LCM_ID1 0x00 #define LCM_ID2 0x00 #define LCM_ID3 0x00 #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif //static unsigned int lcm_esd_test = FALSE; ///only for ESD test #define LCM_DSI_CMD_MODE 1 // --------------------------------------------------------------------------- // Local Variables // --------------------------------------------------------------------------- static LCM_UTIL_FUNCS lcm_util = {0}; #define SET_RESET_PIN(v) (lcm_util.set_reset_pin((v))) #define UDELAY(n) (lcm_util.udelay(n)) #define MDELAY(n) (lcm_util.mdelay(n)) // --------------------------------------------------------------------------- // Local Functions // --------------------------------------------------------------------------- #define dsi_set_cmdq_V2(cmd, count, ppara, force_update) lcm_util.dsi_set_cmdq_V2(cmd, count, ppara, force_update) #define dsi_set_cmdq(pdata, queue_size, force_update) lcm_util.dsi_set_cmdq(pdata, queue_size, force_update) #define wrtie_cmd(cmd) lcm_util.dsi_write_cmd(cmd) #define write_regs(addr, pdata, byte_nums) lcm_util.dsi_write_regs(addr, pdata, byte_nums) #define read_reg(cmd) lcm_util.dsi_dcs_read_lcm_reg(cmd) #define read_reg_v2(cmd, buffer, buffer_size) lcm_util.dsi_dcs_read_lcm_reg_v2(cmd, buffer, buffer_size) struct LCM_setting_table { unsigned char cmd; unsigned char count; unsigned char para_list[64]; }; #if 0 static struct LCM_setting_table lcm_initialization_setting[] = { /* Note : Data ID will depends on the following rule. count of parameters > 1 => Data ID = 0x39 count of parameters = 1 => Data ID = 0x15 count of parameters = 0 => Data ID = 0x05 Structure Format : {DCS command, count of parameters, {parameter list}} {REGFLAG_DELAY, milliseconds of time, {}}, ... Setting ending by predefined flag {REGFLAG_END_OF_TABLE, 0x00, {}} */ {0xC2, 1, {0x08}}, {0xFF, 1, {0x00}}, {0xBA, 1, {0x02}}, // 3lane {0x11, 1, {0x00}}, {REGFLAG_DELAY, 120, {}}, // Display ON {0x29, 1, {0x00}}, {REGFLAG_END_OF_TABLE, 0x00, {}}, // Note // Strongly recommend not to set Sleep out / Display On here. That will cause messed frame to be shown as later the backlight is on. // Setting ending by predefined flag {REGFLAG_END_OF_TABLE, 0x00, {}} }; #endif #if 0 static struct LCM_setting_table lcm_set_window[] = { {0x2A, 4, {0x00, 0x00, (FRAME_WIDTH>>8), (FRAME_WIDTH&0xFF)}}, {0x2B, 4, {0x00, 0x00, (FRAME_HEIGHT>>8), (FRAME_HEIGHT&0xFF)}}, {REGFLAG_END_OF_TABLE, 0x00, {}} }; static struct LCM_setting_table lcm_sleep_out_setting[] = { // Sleep Out {0x11, 1, {0x00}}, {REGFLAG_DELAY, 120, {}}, // Display ON {0x29, 1, {0x00}}, {REGFLAG_END_OF_TABLE, 0x00, {}} }; static struct LCM_setting_table lcm_deep_sleep_mode_in_setting[] = { // Display off sequence {0x28, 1, {0x00}}, {REGFLAG_DELAY, 50, {}}, // Sleep Mode On {0x10, 1, {0x00}}, {REGFLAG_DELAY, 100, {}}, {0x4F, 1, {0x01}}, {REGFLAG_END_OF_TABLE, 0x00, {}} }; #endif /* static struct LCM_setting_table lcm_compare_id_setting[] = { // Display off sequence {0xB9, 3, {0xFF, 0x83, 0x69}}, {REGFLAG_DELAY, 10, {}}, // Sleep Mode On // {0xC3, 1, {0xFF}}, {REGFLAG_END_OF_TABLE, 0x00, {}} }; */ #if 0 static struct LCM_setting_table lcm_backlight_level_setting[] = { {0x51, 1, {0xFF}}, {REGFLAG_END_OF_TABLE, 0x00, {}} }; static void push_table(struct LCM_setting_table *table, unsigned int count, unsigned char force_update) { unsigned int i; for(i = 0; i < count; i++) { unsigned cmd; cmd = table[i].cmd; switch (cmd) { case REGFLAG_DELAY : MDELAY(table[i].count); break; case REGFLAG_END_OF_TABLE : break; default: dsi_set_cmdq_V2(cmd, table[i].count, table[i].para_list, force_update); if (cmd != 0xFF && cmd != 0x2C && cmd != 0x3C) { //#if defined(BUILD_UBOOT) // printf("[DISP] - uboot - REG_R(0x%x) = 0x%x. \n", cmd, table[i].para_list[0]); //#endif while(read_reg(cmd) != table[i].para_list[0]); } } } } #endif // --------------------------------------------------------------------------- // LCM Driver Implementations // --------------------------------------------------------------------------- static void lcm_set_util_funcs(const LCM_UTIL_FUNCS *util) { memcpy((void*)&lcm_util, (void*)util, (size_t)sizeof(LCM_UTIL_FUNCS)); } static void lcm_get_params(LCM_PARAMS *params) { memset((void*)params, (void*)0, (size_t)sizeof(LCM_PARAMS)); params->type = LCM_TYPE_DSI; params->width = FRAME_WIDTH; params->height = FRAME_HEIGHT; #if (LCM_DSI_CMD_MODE) params->dsi.mode = CMD_MODE; #else params->dsi.mode = BURST_VDO_MODE; #endif // DSI /* Command mode setting */ params->dsi.LANE_NUM = LCM_THREE_LANE; //The following defined the fomat for data coming from LCD engine. params->dsi.data_format.format = LCM_DSI_FORMAT_RGB888; params->dsi.PS=LCM_PACKED_PS_24BIT_RGB888; params->dsi.CLK_HS_POST=26; params->dsi.PLL_CLOCK = 286;//dsi clock customization: should config clock value directly } static void lcm_init(void) { //int i; //unsigned char buffer[10]; //unsigned int array[16]; unsigned int data_array[16]; MDELAY(40); SET_RESET_PIN(1); MDELAY(5); data_array[0] = 0x00023902; data_array[1] = 0x0000EEFF; dsi_set_cmdq(data_array, 2, 1); MDELAY(2); data_array[0] = 0x00023902; data_array[1] = 0x00000826; dsi_set_cmdq(data_array, 2, 1); MDELAY(2); data_array[0] = 0x00023902; data_array[1] = 0x00000026; dsi_set_cmdq(data_array, 2, 1); MDELAY(2); data_array[0] = 0x00023902; data_array[1] = 0x000000FF; dsi_set_cmdq(data_array, 2, 1); MDELAY(20); SET_RESET_PIN(0); MDELAY(1); SET_RESET_PIN(1); MDELAY(40); data_array[0]=0x00023902; data_array[1]=0x000008C2;//cmd mode dsi_set_cmdq(data_array, 2, 1); data_array[0]=0x00023902; data_array[1]=0x000002BA;//MIPI lane dsi_set_cmdq(data_array, 2, 1); //{0x44, 2, {((FRAME_HEIGHT/2)>>8), ((FRAME_HEIGHT/2)&0xFF)}}, data_array[0] = 0x00033902; data_array[1] = (((FRAME_HEIGHT/2)&0xFF) << 16) | (((FRAME_HEIGHT/2)>>8) << 8) | 0x44; dsi_set_cmdq(data_array, 2, 1); data_array[0] = 0x00351500;// TE ON dsi_set_cmdq(data_array, 1, 1); //MDELAY(10); data_array[0]=0x00110500; dsi_set_cmdq(data_array, 1, 1); MDELAY(120); data_array[0]=0x00290500; dsi_set_cmdq(data_array, 1, 1); //MDELAY(50); // push_table(lcm_initialization_setting, sizeof(lcm_initialization_setting) / sizeof(struct LCM_setting_table), 1); } static void lcm_suspend(void) { unsigned int data_array[16]; data_array[0]=0x00280500; dsi_set_cmdq(data_array, 1, 1); MDELAY(120); data_array[0]=0x00100500; dsi_set_cmdq(data_array, 1, 1); MDELAY(50); data_array[0]=0x00023902; data_array[1]=0x0000014F; dsi_set_cmdq(data_array, 2, 1); } static void lcm_resume(void) { unsigned int data_array[16]; SET_RESET_PIN(1); MDELAY(10); SET_RESET_PIN(0); MDELAY(10); SET_RESET_PIN(1); MDELAY(50); data_array[0] = 0x00023902; data_array[1] = 0x0000EEFF; dsi_set_cmdq(data_array, 2, 1); MDELAY(2); data_array[0] = 0x00023902; data_array[1] = 0x00000826; dsi_set_cmdq(data_array, 2, 1); MDELAY(2); data_array[0] = 0x00023902; data_array[1] = 0x00000026; dsi_set_cmdq(data_array, 2, 1); MDELAY(2); data_array[0] = 0x00023902; data_array[1] = 0x000000FF; dsi_set_cmdq(data_array, 2, 1); MDELAY(20); SET_RESET_PIN(0); MDELAY(1); SET_RESET_PIN(1); MDELAY(40); data_array[0]=0x00023902; data_array[1]=0x000008C2;//cmd mode //data_array[1]=0x000003C2;//vdo mode dsi_set_cmdq(data_array, 2, 1); data_array[0]=0x00023902; data_array[1]=0x000002BA;//MIPI lane dsi_set_cmdq(data_array, 2, 1); //{0x44, 2, {((FRAME_HEIGHT/2)>>8), ((FRAME_HEIGHT/2)&0xFF)}}, data_array[0] = 0x00033902; data_array[1] = (((FRAME_HEIGHT/2)&0xFF) << 16) | (((FRAME_HEIGHT/2)>>8) << 8) | 0x44; dsi_set_cmdq(data_array, 2, 1); data_array[0] = 0x00351500;// TE ON dsi_set_cmdq(data_array, 1, 1); //MDELAY(10); data_array[0]=0x00110500; dsi_set_cmdq(data_array, 1, 1); MDELAY(120); data_array[0]=0x00290500; dsi_set_cmdq(data_array, 1, 1); } static void lcm_update(unsigned int x, unsigned int y, unsigned int width, unsigned int height) { unsigned int x0 = x; unsigned int y0 = y; unsigned int x1 = x0 + width - 1; unsigned int y1 = y0 + height - 1; unsigned char x0_MSB = ((x0>>8)&0xFF); unsigned char x0_LSB = (x0&0xFF); unsigned char x1_MSB = ((x1>>8)&0xFF); unsigned char x1_LSB = (x1&0xFF); unsigned char y0_MSB = ((y0>>8)&0xFF); unsigned char y0_LSB = (y0&0xFF); unsigned char y1_MSB = ((y1>>8)&0xFF); unsigned char y1_LSB = (y1&0xFF); unsigned int data_array[16]; data_array[0]= 0x00053902; data_array[1]= (x1_MSB<<24)|(x0_LSB<<16)|(x0_MSB<<8)|0x2a; data_array[2]= (x1_LSB); dsi_set_cmdq(data_array, 3, 1); data_array[0]= 0x00053902; data_array[1]= (y1_MSB<<24)|(y0_LSB<<16)|(y0_MSB<<8)|0x2b; data_array[2]= (y1_LSB); dsi_set_cmdq(data_array, 3, 1); data_array[0]= 0x002c3909; dsi_set_cmdq(data_array, 1, 0); } #if 0 static void lcm_setbacklight(unsigned int level) { unsigned int default_level = 145; unsigned int mapped_level = 0; //for LGE backlight IC mapping table if(level > 255) level = 255; if(level >0) mapped_level = default_level+(level)*(255-default_level)/(255); else mapped_level=0; // Refresh value of backlight level. lcm_backlight_level_setting[0].para_list[0] = mapped_level; push_table(lcm_backlight_level_setting, sizeof(lcm_backlight_level_setting) / sizeof(struct LCM_setting_table), 1); } static unsigned int lcm_esd_check(void) { #ifndef BUILD_UBOOT if(lcm_esd_test) { lcm_esd_test = FALSE; return TRUE; } /// please notice: the max return packet size is 1 /// if you want to change it, you can refer to the following marked code /// but read_reg currently only support read no more than 4 bytes.... /// if you need to read more, please let BinHan knows. /* unsigned int data_array[16]; unsigned int max_return_size = 1; data_array[0]= 0x00003700 | (max_return_size << 16); dsi_set_cmdq(&data_array, 1, 1); */ if(read_reg(0xB6) == 0x42) { return FALSE; } else { return TRUE; } #endif } static unsigned int lcm_esd_recover(void) { unsigned char para = 0; SET_RESET_PIN(1); SET_RESET_PIN(0); MDELAY(1); SET_RESET_PIN(1); MDELAY(120); push_table(lcm_initialization_setting, sizeof(lcm_initialization_setting) / sizeof(struct LCM_setting_table), 1); MDELAY(10); push_table(lcm_sleep_out_setting, sizeof(lcm_sleep_out_setting) / sizeof(struct LCM_setting_table), 1); MDELAY(10); dsi_set_cmdq_V2(0x35, 1, &para, 1); ///enable TE MDELAY(10); return TRUE; } /* static unsigned int lcm_compare_id(void) { unsigned int id1, id2, id3; unsigned char buffer[2]; unsigned int array[16]; SET_RESET_PIN(1); SET_RESET_PIN(0); MDELAY(10); SET_RESET_PIN(1); MDELAY(10); // Set Maximum return byte = 1 array[0] = 0x00013700; dsi_set_cmdq(array, 1, 1); id1 = read_reg(0xDA); id2 = read_reg(0xDB); id2 = read_reg(0xDC); #if defined(BUILD_UBOOT) printf("%s, Module ID = {%x, %x, %x} \n", __func__, id1, id2, id3); #endif return (LCM_ID1 == id1 && LCM_ID2 == id2)?1:0; } */ #endif void lcm_read_fb(unsigned char *buffer) { unsigned int array[2]; array[0] = 0x000A3700;// read size dsi_set_cmdq(array, 1, 1); read_reg_v2(0x2E,buffer,10); read_reg_v2(0x3E,buffer+10,10); read_reg_v2(0x3E,buffer+10*2,10); read_reg_v2(0x3E,buffer+10*3,10); read_reg_v2(0x3E,buffer+10*4,10); read_reg_v2(0x3E,buffer+10*5,10); } // --------------------------------------------------------------------------- // Get LCM Driver Hooks // --------------------------------------------------------------------------- LCM_DRIVER nt35590_hd720_dsi_cmd_auo_qhd_lcm_drv = { .name = "nt35590_AUO", .set_util_funcs = lcm_set_util_funcs, .get_params = lcm_get_params, .init = lcm_init, .suspend = lcm_suspend, .resume = lcm_resume, #if (LCM_DSI_CMD_MODE) .update = lcm_update, //.set_backlight = lcm_setbacklight, // .set_pwm = lcm_setpwm, // .get_pwm = lcm_getpwm, //.esd_check = lcm_esd_check, //.esd_recover = lcm_esd_recover, //.compare_id = lcm_compare_id, .read_fb = lcm_read_fb, #endif };
199787.c
/* ChibiOS/HAL - Copyright (C) 2006,2007,2008,2009,2010, 2011,2012,2013,2014 Giovanni Di Sirio. This file is part of ChibiOS/HAL ChibiOS/HAL is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. ChibiOS/RT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Concepts and parts of this file have been contributed by Uladzimir Pylinsky aka barthess. */ /** * @file nand.c * @brief NAND Driver code. * * @addtogroup NAND * @{ */ #include "hal.h" #if HAL_USE_NAND || defined(__DOXYGEN__) #include "string.h" /* for memset */ /*===========================================================================*/ /* Driver local definitions. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver exported variables. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver local types. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver local variables. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver local functions. */ /*===========================================================================*/ /** * @brief Check page size. * * @param[in] page_data_size size of page data area * * @notapi */ static void pagesize_check(size_t page_data_size){ /* Page size out of bounds.*/ osalDbgCheck((page_data_size >= NAND_MIN_PAGE_SIZE) && (page_data_size <= NAND_MAX_PAGE_SIZE)); /* Page size must be power of 2.*/ osalDbgCheck(((page_data_size - 1) & page_data_size) == 0); } /** * @brief Translate block-page-offset scheme to NAND internal address. * * @param[in] cfg pointer to the @p NANDConfig from * corresponding NAND driver * @param[in] block block number * @param[in] page page number related to begin of block * @param[in] offset data offset related to begin of page * @param[out] addr buffer to store calculated address * @param[in] addr_len length of address buffer * * @notapi */ static void calc_addr(const NANDConfig *cfg, uint32_t block, uint32_t page, uint32_t offset, uint8_t *addr, size_t addr_len){ size_t i = 0; uint32_t row = 0; /* Incorrect buffer length.*/ osalDbgCheck(cfg->rowcycles + cfg->colcycles == addr_len); osalDbgCheck((block < cfg->blocks) && (page < cfg->pages_per_block) && (offset < cfg->page_data_size + cfg->page_spare_size)); /* convert address to NAND specific */ memset(addr, 0, addr_len); row = (block * cfg->pages_per_block) + page; for (i=0; i<cfg->colcycles; i++){ addr[i] = offset & 0xFF; offset = offset >> 8; } for (; i<addr_len; i++){ addr[i] = row & 0xFF; row = row >> 8; } } /** * @brief Translate block number to NAND internal address. * @note This function designed for erasing purpose. * * @param[in] cfg pointer to the @p NANDConfig from * corresponding NAND driver * @param[in] block block number * @param[out] addr buffer to store calculated address * @param[in] addr_len length of address buffer * * @notapi */ static void calc_blk_addr(const NANDConfig *cfg, uint32_t block, uint8_t *addr, size_t addr_len){ size_t i = 0; uint32_t row = 0; /* Incorrect buffer length.*/ osalDbgCheck(cfg->rowcycles == addr_len); osalDbgCheck((block < cfg->blocks)); /* convert address to NAND specific */ memset(addr, 0, addr_len); row = block * cfg->pages_per_block; for (i=0; i<addr_len; i++){ addr[i] = row & 0xFF; row = row >> 8; } } #if NAND_USE_BAD_MAP /** * @brief Add new bad block to map. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] map pointer to bad block map */ static void bad_map_update(NANDDriver *nandp, size_t block) { uint32_t *map = nandp->config->bb_map; const size_t BPMC = sizeof(uint32_t) * 8; /* bits per map claster */ size_t i; size_t shift; /* Nand device overflow.*/ osalDbgCheck(nandp->config->blocks > block); i = block / BPMC; shift = block % BPMC; /* This block already mapped.*/ osalDbgCheck(((map[i] >> shift) & 1) != 1); map[i] |= (uint32_t)1 << shift; } /** * @brief Scan for bad blocks and fill map with their numbers. * * @param[in] nandp pointer to the @p NANDDriver object */ static void scan_bad_blocks(NANDDriver *nandp) { const size_t blocks = nandp->config->blocks; const size_t maplen = blocks / 32; size_t b; uint8_t m0; uint8_t m1; /* clear map just to be safe */ for (b=0; b<maplen; b++) nandp->config->bb_map[b] = 0; /* now write numbers of bad block to map */ for (b=0; b<blocks; b++){ m0 = nandReadBadMark(nandp, b, 0); m1 = nandReadBadMark(nandp, b, 1); if ((0xFF != m0) || (0xFF != m1)){ bad_map_update(nandp, b); } } } #endif /* NAND_USE_BAD_MAP */ /*===========================================================================*/ /* Driver exported functions. */ /*===========================================================================*/ /** * @brief NAND Driver initialization. * @note This function is implicitly invoked by @p halInit(), there is * no need to explicitly initialize the driver. * * @init */ void nandInit(void) { nand_lld_init(); } /** * @brief Initializes the standard part of a @p NANDDriver structure. * * @param[out] nandp pointer to the @p NANDDriver object * * @init */ void nandObjectInit(NANDDriver *nandp) { #if NAND_USE_MUTUAL_EXCLUSION #if CH_CFG_USE_MUTEXES chMtxObjectInit(&nandp->mutex); #else chSemObjectInit(&nandp->semaphore, 1); #endif /* CH_CFG_USE_MUTEXES */ #endif /* NAND_USE_MUTUAL_EXCLUSION */ nandp->state = NAND_STOP; nandp->config = NULL; } /** * @brief Configures and activates the NAND peripheral. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] config pointer to the @p NANDConfig object * * @api */ void nandStart(NANDDriver *nandp, const NANDConfig *config) { osalDbgCheck((nandp != NULL) && (config != NULL)); osalDbgAssert((nandp->state == NAND_STOP) || (nandp->state == NAND_READY), "invalid state"); nandp->config = config; pagesize_check(nandp->config->page_data_size); nand_lld_start(nandp); nandp->state = NAND_READY; #if NAND_USE_BAD_MAP scan_bad_blocks(nandp); #endif /* NAND_USE_BAD_MAP */ } /** * @brief Deactivates the NAND peripheral. * * @param[in] nandp pointer to the @p NANDDriver object * * @api */ void nandStop(NANDDriver *nandp) { osalDbgCheck(nandp != NULL); osalDbgAssert((nandp->state == NAND_STOP) || (nandp->state == NAND_READY), "invalid state"); nand_lld_stop(nandp); nandp->state = NAND_STOP; } /** * @brief Read whole page. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * @param[out] data buffer to store data * @param[in] datalen length of data buffer * * @api */ void nandReadPageWhole(NANDDriver *nandp, uint32_t block, uint32_t page, uint8_t *data, size_t datalen) { const NANDConfig *cfg = nandp->config; uint8_t addrbuf[8]; size_t addrlen = cfg->rowcycles + cfg->colcycles; osalDbgCheck((nandp != NULL) && (data != NULL)); osalDbgCheck((datalen <= (cfg->page_data_size + cfg->page_spare_size))); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_addr(cfg, block, page, 0, addrbuf, addrlen); nand_lld_read_data(nandp, data, datalen, addrbuf, addrlen, NULL); } /** * @brief Write whole page. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * @param[in] data buffer with data to be written * @param[in] datalen length of data buffer * * @return The operation status reported by NAND IC (0x70 command). * * @api */ uint8_t nandWritePageWhole(NANDDriver *nandp, uint32_t block, uint32_t page, const uint8_t *data, size_t datalen) { uint8_t retval; const NANDConfig *cfg = nandp->config; uint8_t addr[8]; size_t addrlen = cfg->rowcycles + cfg->colcycles; osalDbgCheck((nandp != NULL) && (data != NULL)); osalDbgCheck((datalen <= (cfg->page_data_size + cfg->page_spare_size))); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_addr(cfg, block, page, 0, addr, addrlen); retval = nand_lld_write_data(nandp, data, datalen, addr, addrlen, NULL); return retval; } /** * @brief Read page data without spare area. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * @param[out] data buffer to store data * @param[in] datalen length of data buffer * @param[out] ecc pointer to calculated ECC. Ignored when NULL. * * @api */ void nandReadPageData(NANDDriver *nandp, uint32_t block, uint32_t page, uint8_t *data, size_t datalen, uint32_t *ecc) { const NANDConfig *cfg = nandp->config; uint8_t addrbuf[8]; size_t addrlen = cfg->rowcycles + cfg->colcycles; osalDbgCheck((nandp != NULL) && (data != NULL)); osalDbgCheck((datalen <= cfg->page_data_size)); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_addr(cfg, block, page, 0, addrbuf, addrlen); nand_lld_read_data(nandp, data, datalen, addrbuf, addrlen, ecc); } /** * @brief Write page data without spare area. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * @param[in] data buffer with data to be written * @param[in] datalen length of data buffer * @param[out] ecc pointer to calculated ECC. Ignored when NULL. * * @return The operation status reported by NAND IC (0x70 command). * * @api */ uint8_t nandWritePageData(NANDDriver *nandp, uint32_t block, uint32_t page, const uint8_t *data, size_t datalen, uint32_t *ecc) { uint8_t retval; const NANDConfig *cfg = nandp->config; uint8_t addr[8]; size_t addrlen = cfg->rowcycles + cfg->colcycles; osalDbgCheck((nandp != NULL) && (data != NULL)); osalDbgCheck((datalen <= cfg->page_data_size)); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_addr(cfg, block, page, 0, addr, addrlen); retval = nand_lld_write_data(nandp, data, datalen, addr, addrlen, ecc); return retval; } /** * @brief Read page spare area. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * @param[out] spare buffer to store data * @param[in] sparelen length of data buffer * * @api */ void nandReadPageSpare(NANDDriver *nandp, uint32_t block, uint32_t page, uint8_t *spare, size_t sparelen) { const NANDConfig *cfg = nandp->config; uint8_t addr[8]; size_t addrlen = cfg->rowcycles + cfg->colcycles; osalDbgCheck((NULL != spare) && (nandp != NULL)); osalDbgCheck(sparelen <= cfg->page_spare_size); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_addr(cfg, block, page, cfg->page_data_size, addr, addrlen); nand_lld_read_data(nandp, spare, sparelen, addr, addrlen, NULL); } /** * @brief Write page spare area. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * @param[in] spare buffer with spare data to be written * @param[in] sparelen length of data buffer * * @return The operation status reported by NAND IC (0x70 command). * * @api */ uint8_t nandWritePageSpare(NANDDriver *nandp, uint32_t block, uint32_t page, const uint8_t *spare, size_t sparelen) { uint8_t retVal; const NANDConfig *cfg = nandp->config; uint8_t addr[8]; size_t addrlen = cfg->rowcycles + cfg->colcycles; osalDbgCheck((NULL != spare) && (nandp != NULL)); osalDbgCheck(sparelen <= cfg->page_spare_size); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_addr(cfg, block, page, cfg->page_data_size, addr, addrlen); retVal = nand_lld_write_data(nandp, spare, sparelen, addr, addrlen, NULL); return retVal; } /** * @brief Mark block as bad. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * * @api */ void nandMarkBad(NANDDriver *nandp, uint32_t block) { uint8_t bb_mark[2] = {0, 0}; uint8_t op_status; op_status = nandWritePageSpare(nandp, block, 0, bb_mark, sizeof(bb_mark)); osalDbgCheck(0 == (op_status & 1)); /* operation failed*/ op_status = nandWritePageSpare(nandp, block, 1, bb_mark, sizeof(bb_mark)); osalDbgCheck(0 == (op_status & 1)); /* operation failed*/ #if NAND_USE_BAD_MAP bad_map_update(nandp, block); #endif } /** * @brief Read bad mark out. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * @param[in] page page number related to begin of block * * @return Bad mark. * * @api */ uint8_t nandReadBadMark(NANDDriver *nandp, uint32_t block, uint32_t page) { uint8_t bb_mark[1]; nandReadPageSpare(nandp, block, page, bb_mark, sizeof(bb_mark)); return bb_mark[0]; } /** * @brief Erase block. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * * @return The operation status reported by NAND IC (0x70 command). * * @api */ uint8_t nandErase(NANDDriver *nandp, uint32_t block){ uint8_t retVal; const NANDConfig *cfg = nandp->config; uint8_t addr[4]; size_t addrlen = cfg->rowcycles; osalDbgCheck(nandp != NULL); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); calc_blk_addr(cfg, block, addr, addrlen); retVal = nand_lld_erase(nandp, addr, addrlen); return retVal; } /** * @brief Report block badness. * * @param[in] nandp pointer to the @p NANDDriver object * @param[in] block block number * * @return block condition * @retval true if the block is bad. * @retval false if the block is good. * * @api */ bool nandIsBad(NANDDriver *nandp, uint32_t block){ osalDbgCheck(nandp != NULL); osalDbgAssert(nandp->state == NAND_READY, "invalid state"); #if NAND_USE_BAD_MAP uint32_t *map = nandp->config->bb_map; const size_t BPMC = sizeof(uint32_t) * 8; /* bits per map claster */ size_t i; size_t shift; i = block / BPMC; shift = block % BPMC; if (((map[i] >> shift) & 1) == 1) return true; else return false; #else uint8_t m0, m1; m0 = nandReadBadMark(nandp, block, 0); m1 = nandReadBadMark(nandp, block, 1); if ((0xFF != m0) || (0xFF != m1)) return true; else return false; #endif /* NAND_USE_BAD_MAP */ } #if NAND_USE_MUTUAL_EXCLUSION || defined(__DOXYGEN__) /** * @brief Gains exclusive access to the NAND bus. * @details This function tries to gain ownership to the NAND bus, if the bus * is already being used then the invoking thread is queued. * @pre In order to use this function the option * @p NAND_USE_MUTUAL_EXCLUSION must be enabled. * * @param[in] nandp pointer to the @p NANDDriver object * * @api */ void nandAcquireBus(NANDDriver *nandp) { osalDbgCheck(nandp != NULL); #if CH_CFG_USE_MUTEXES chMtxLock(&nandp->mutex); #elif CH_CFG_USE_SEMAPHORES chSemWait(&nandp->semaphore); #endif } /** * @brief Releases exclusive access to the NAND bus. * @pre In order to use this function the option * @p NAND_USE_MUTUAL_EXCLUSION must be enabled. * * @param[in] nandp pointer to the @p NANDDriver object * * @api */ void nandReleaseBus(NANDDriver *nandp) { osalDbgCheck(nandp != NULL); #if CH_CFG_USE_MUTEXES chMtxUnlock(&nandp->mutex); #elif CH_CFG_USE_SEMAPHORES chSemSignal(&nandp->semaphore); #endif } #endif /* NAND_USE_MUTUAL_EXCLUSION */ #endif /* HAL_USE_NAND */ /** @} */
437410.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include "channels/rail.h" #include "plugins/channels.h" #include "rdp.h" #include "settings.h" #include <freerdp/client/rail.h> #include <freerdp/event.h> #include <freerdp/freerdp.h> #include <freerdp/rail.h> #include <guacamole/client.h> #include <winpr/wtypes.h> #include <winpr/wtsapi.h> #include <stddef.h> #include <string.h> #ifdef FREERDP_RAIL_CALLBACKS_REQUIRE_CONST /** * FreeRDP 2.0.0-rc4 and newer requires the final argument for all RAIL * callbacks to be const. */ #define RAIL_CONST const #else /** * FreeRDP 2.0.0-rc3 and older requires the final argument for all RAIL * callbacks to NOT be const. */ #define RAIL_CONST #endif /** * Completes initialization of the RemoteApp session, responding to the server * handshake, sending client status and system parameters, and executing the * desired RemoteApp command. This is accomplished using the Handshake PDU, * Client Information PDU, one or more Client System Parameters Update PDUs, * and the Client Execute PDU respectively. These PDUs MUST be sent for the * desired RemoteApp to run, and MUST NOT be sent until after a Handshake or * HandshakeEx PDU has been received. See: * * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdperp/cec4eb83-b304-43c9-8378-b5b8f5e7082a (Handshake PDU) * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdperp/743e782d-f59b-40b5-a0f3-adc74e68a2ff (Client Information PDU) * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdperp/60344497-883f-4711-8b9a-828d1c580195 (System Parameters Update PDU) * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdperp/98a6e3c3-c2a9-42cc-ad91-0d9a6c211138 (Client Execute PDU) * * @param rail * The RailClientContext structure used by FreeRDP to handle the RAIL * channel for the current RDP session. * * @return * CHANNEL_RC_OK (zero) if the PDUs were sent successfully, an error code * (non-zero) otherwise. */ static UINT guac_rdp_rail_complete_handshake(RailClientContext* rail) { UINT status; guac_client* client = (guac_client*) rail->custom; guac_rdp_client* rdp_client = (guac_rdp_client*) client->data; RAIL_HANDSHAKE_ORDER handshake = { /* Build number 7600 (0x1DB0) apparently represents Windows 7 and * compatibility with RDP 7.0. As of this writing, this is the same * build number sent for RAIL connections by xfreerdp. */ .buildNumber = 7600 }; /* Send client handshake response */ status = rail->ClientHandshake(rail, &handshake); if (status != CHANNEL_RC_OK) return status; RAIL_CLIENT_STATUS_ORDER client_status = { .flags = 0x00 }; /* Send client status */ status = rail->ClientInformation(rail, &client_status); if (status != CHANNEL_RC_OK) return status; RAIL_SYSPARAM_ORDER sysparam = { .dragFullWindows = FALSE, .highContrast = { .flags = HCF_AVAILABLE | HCF_CONFIRMHOTKEY | HCF_HOTKEYACTIVE | HCF_HOTKEYAVAILABLE | HCF_HOTKEYSOUND | HCF_INDICATOR, .colorScheme = { .string = NULL, .length = 0 } }, .keyboardCues = FALSE, .keyboardPref = FALSE, .mouseButtonSwap = FALSE, .workArea = { .left = 0, .top = 0, .right = rdp_client->settings->width, .bottom = rdp_client->settings->height }, .params = SPI_MASK_SET_DRAG_FULL_WINDOWS | SPI_MASK_SET_HIGH_CONTRAST | SPI_MASK_SET_KEYBOARD_CUES | SPI_MASK_SET_KEYBOARD_PREF | SPI_MASK_SET_MOUSE_BUTTON_SWAP | SPI_MASK_SET_WORK_AREA }; /* Send client system parameters */ status = rail->ClientSystemParam(rail, &sysparam); if (status != CHANNEL_RC_OK) return status; RAIL_EXEC_ORDER exec = { .flags = RAIL_EXEC_FLAG_EXPAND_ARGUMENTS, .RemoteApplicationProgram = rdp_client->settings->remote_app, .RemoteApplicationWorkingDir = rdp_client->settings->remote_app_dir, .RemoteApplicationArguments = rdp_client->settings->remote_app_args, }; /* Execute desired RemoteApp command */ return rail->ClientExecute(rail, &exec); } /** * Callback which is invoked when a Handshake PDU is received from the RDP * server. No communication for RemoteApp may occur until the Handshake PDU * (or, alternatively, the HandshakeEx PDU) is received. See: * * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdperp/cec4eb83-b304-43c9-8378-b5b8f5e7082a * * @param rail * The RailClientContext structure used by FreeRDP to handle the RAIL * channel for the current RDP session. * * @param handshake * The RAIL_HANDSHAKE_ORDER structure representing the Handshake PDU that * was received. * * @return * CHANNEL_RC_OK (zero) if the PDU was handled successfully, an error code * (non-zero) otherwise. */ static UINT guac_rdp_rail_handshake(RailClientContext* rail, RAIL_CONST RAIL_HANDSHAKE_ORDER* handshake) { return guac_rdp_rail_complete_handshake(rail); } /** * Callback which is invoked when a HandshakeEx PDU is received from the RDP * server. No communication for RemoteApp may occur until the HandshakeEx PDU * (or, alternatively, the Handshake PDU) is received. See: * * https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdperp/5cec5414-27de-442e-8d4a-c8f8b41f3899 * * @param rail * The RailClientContext structure used by FreeRDP to handle the RAIL * channel for the current RDP session. * * @param handshake_ex * The RAIL_HANDSHAKE_EX_ORDER structure representing the HandshakeEx PDU * that was received. * * @return * CHANNEL_RC_OK (zero) if the PDU was handled successfully, an error code * (non-zero) otherwise. */ static UINT guac_rdp_rail_handshake_ex(RailClientContext* rail, RAIL_CONST RAIL_HANDSHAKE_EX_ORDER* handshake_ex) { return guac_rdp_rail_complete_handshake(rail); } /** * Callback which associates handlers specific to Guacamole with the * RailClientContext instance allocated by FreeRDP to deal with received * RAIL (RemoteApp) messages. * * This function is called whenever a channel connects via the PubSub event * system within FreeRDP, but only has any effect if the connected channel is * the RAIL channel. This specific callback is registered with the PubSub * system of the relevant rdpContext when guac_rdp_rail_load_plugin() is * called. * * @param context * The rdpContext associated with the active RDP session. * * @param e * Event-specific arguments, mainly the name of the channel, and a * reference to the associated plugin loaded for that channel by FreeRDP. */ static void guac_rdp_rail_channel_connected(rdpContext* context, ChannelConnectedEventArgs* e) { guac_client* client = ((rdp_freerdp_context*) context)->client; /* Ignore connection event if it's not for the RAIL channel */ if (strcmp(e->name, RAIL_SVC_CHANNEL_NAME) != 0) return; /* The structure pointed to by pInterface is guaranteed to be a * RailClientContext if the channel is RAIL */ RailClientContext* rail = (RailClientContext*) e->pInterface; /* Init FreeRDP RAIL context, ensuring the guac_client can be accessed from * within any RAIL-specific callbacks */ rail->custom = client; rail->ServerHandshake = guac_rdp_rail_handshake; rail->ServerHandshakeEx = guac_rdp_rail_handshake_ex; guac_client_log(client, GUAC_LOG_DEBUG, "RAIL (RemoteApp) channel " "connected."); } void guac_rdp_rail_load_plugin(rdpContext* context) { guac_client* client = ((rdp_freerdp_context*) context)->client; /* Attempt to load FreeRDP support for the RAIL channel */ if (guac_freerdp_channels_load_plugin(context, "rail", context->settings)) { guac_client_log(client, GUAC_LOG_WARNING, "Support for the RAIL channel (RemoteApp) could not be " "loaded. This support normally takes the form of a plugin " "which is built into FreeRDP. Lacking this support, " "RemoteApp will not work."); return; } /* Complete RDP side of initialization when channel is connected */ PubSub_SubscribeChannelConnected(context->pubSub, (pChannelConnectedEventHandler) guac_rdp_rail_channel_connected); guac_client_log(client, GUAC_LOG_DEBUG, "Support for RAIL (RemoteApp) " "registered. Awaiting channel connection."); }
111883.c
#include "SimpleDNS.h" int get_A_Record(uint8_t addr[4], const char domain_name[]){ if (strcmp("foo.bar.com", domain_name) == 0){ addr[0] = 192; addr[1] = 168; addr[2] = 1; addr[3] = 1; return 0; }else{ return -1; } } int get_AAAA_Record(uint8_t addr[16], const char domain_name[]){ if (strcmp("foo.bar.com", domain_name) == 0){ addr[0] = 0xfe; addr[1] = 0x80; addr[2] = 0x00; addr[3] = 0x00; addr[4] = 0x00; addr[5] = 0x00; addr[6] = 0x00; addr[7] = 0x00; addr[8] = 0x00; addr[9] = 0x00; addr[10] = 0x00; addr[11] = 0x00; addr[12] = 0x00; addr[13] = 0x00; addr[14] = 0x00; addr[15] = 0x01; return 0; }else{ return -1; } } void print_hex(uint8_t* buf, size_t len){ int i; printf("%zu bytes:\n", len); for(i = 0; i < len; ++i) printf("%02x ", buf[i]); printf("\n"); } void print_resource_record(struct ResourceRecord* rr){ int i; while (rr){ printf(" ResourceRecord { name '%s', type %u, class %u, ttl %u, rd_length %u, ", rr->name, rr->type, rr->class, rr->ttl, rr->rd_length ); union ResourceData *rd = &rr->rd_data; switch (rr->type){ case A_Resource_RecordType: printf("Address Resource Record { address "); for(i = 0; i < 4; ++i) printf("%s%u", (i ? "." : ""), rd->a_record.addr[i]); printf(" }"); break; case NS_Resource_RecordType: printf("Name Server Resource Record { name %s }", rd->name_server_record.name ); break; case CNAME_Resource_RecordType: printf("Canonical Name Resource Record { name %u }", rd->cname_record.name ); break; case SOA_Resource_RecordType: printf("SOA { MName '%s', RName '%s', serial %u, refresh %u, retry %u, expire %u, minimum %u }", rd->soa_record.MName, rd->soa_record.RName, rd->soa_record.serial, rd->soa_record.refresh, rd->soa_record.retry, rd->soa_record.expire, rd->soa_record.minimum ); break; case PTR_Resource_RecordType: printf("Pointer Resource Record { name '%s' }", rd->ptr_record.name ); break; case MX_Resource_RecordType: printf("Mail Exchange Record { preference %u, exchange '%s' }", rd->mx_record.preference, rd->mx_record.exchange ); break; case TXT_Resource_RecordType: printf("Text Resource Record { txt_data '%s' }", rd->txt_record.txt_data ); break; case AAAA_Resource_RecordType: printf("AAAA Resource Record { address "); for(i = 0; i < 16; ++i) printf("%s%02x", (i ? ":" : ""), rd->aaaa_record.addr[i]); printf(" }"); break; default: printf("Unknown Resource Record { ??? }"); } printf("}\n"); rr = rr->next; } } void print_query(struct Message* msg){ printf("QUERY { ID: %02x", msg->id); printf(". FIELDS: [ QR: %u, OpCode: %u ]", msg->qr, msg->opcode); printf(", QDcount: %u", msg->qdCount); printf(", ANcount: %u", msg->anCount); printf(", NScount: %u", msg->nsCount); printf(", ARcount: %u,\n", msg->arCount); struct Question* q = msg->questions; while (q){ printf(" Question { qName '%s', qType %u, qClass %u }\n", q->qName, q->qType, q->qClass ); q = q->next; } print_resource_record(msg->answers); print_resource_record(msg->authorities); print_resource_record(msg->additionals); printf("}\n"); } size_t get16bits(const uint8_t** buffer){ uint16_t value; memcpy(&value, *buffer, 2); *buffer += 2; return ntohs(value); } void put8bits(uint8_t** buffer, uint8_t value){ memcpy(*buffer, &value, 1); *buffer += 1; } void put16bits(uint8_t** buffer, uint16_t value){ value = htons(value); memcpy(*buffer, &value, 2); *buffer += 2; } void put32bits(uint8_t** buffer, uint32_t value){ value = htons(value); memcpy(*buffer, &value, 4); *buffer += 4; } // 3foo3bar3com0 => foo.bar.com (No full validation is done!) char *decode_domain_name(const uint8_t **buf, size_t len){ char domain[256]; for (int i = 1; i < MIN(256, len); i += 1) { uint8_t c = (*buf)[i]; if (c == 0) { domain[i - 1] = 0; *buf += i + 1; return strdup(domain); } else if (c <= 63) { domain[i - 1] = '.'; } else { domain[i - 1] = c; } } return NULL; } // foo.bar.com => 3foo3bar3com0 void encode_domain_name(uint8_t** buffer, const char* domain){ uint8_t* buf = *buffer; const char* beg = domain; const char* pos; int len = 0; int i = 0; while ((pos = strchr(beg, '.'))){ len = pos - beg; buf[i] = len; i += 1; memcpy(buf+i, beg, len); i += len; beg = pos + 1; } len = strlen(domain) - (beg - domain); buf[i] = len; i += 1; memcpy(buf + i, beg, len); i += len; buf[i] = 0; i += 1; *buffer += i; } void decode_header(struct Message* msg, const uint8_t** buffer){ msg->id = get16bits(buffer); uint32_t fields = get16bits(buffer); msg->qr = (fields & QR_MASK) >> 15; msg->opcode = (fields & OPCODE_MASK) >> 11; msg->aa = (fields & AA_MASK) >> 10; msg->tc = (fields & TC_MASK) >> 9; msg->rd = (fields & RD_MASK) >> 8; msg->ra = (fields & RA_MASK) >> 7; msg->rcode = (fields & RCODE_MASK) >> 0; msg->qdCount = get16bits(buffer); msg->anCount = get16bits(buffer); msg->nsCount = get16bits(buffer); msg->arCount = get16bits(buffer); } void encode_header(struct Message* msg, uint8_t** buffer){ put16bits(buffer, msg->id); int fields = 0; fields |= (msg->qr << 15) & QR_MASK; fields |= (msg->rcode << 0) & RCODE_MASK; // TODO: insert the rest of the fields put16bits(buffer, fields); put16bits(buffer, msg->qdCount); put16bits(buffer, msg->anCount); put16bits(buffer, msg->nsCount); put16bits(buffer, msg->arCount); } int decode_msg(struct Message* msg, const uint8_t* buffer, int size){ int i; decode_header(msg, &buffer); if (msg->anCount != 0 || msg->nsCount != 0){ printf("Only questions expected!\n"); return -1; } // parse questions uint32_t qcount = msg->qdCount; struct Question* qs = msg->questions; for (i = 0; i < qcount; ++i){ struct Question* q = malloc(sizeof(struct Question)); q->qName = decode_domain_name(&buffer, size); q->qType = get16bits(&buffer); q->qClass = get16bits(&buffer); // prepend question to questions list q->next = qs; msg->questions = q; } // We do not expect any resource records to parse here. return 0; } // For every question in the message add a appropiate resource record // in either section 'answers', 'authorities' or 'additionals'. void resolver_process(struct Message* msg){ struct ResourceRecord* beg; struct ResourceRecord* rr; struct Question* q; int rc; // leave most values intact for response msg->qr = 1; // this is a response msg->aa = 1; // this server is authoritative msg->ra = 0; // no recursion available msg->rcode = Ok_ResponseType; // should already be 0 msg->anCount = 0; msg->nsCount = 0; msg->arCount = 0; // for every question append resource records q = msg->questions; while (q){ rr = malloc(sizeof(struct ResourceRecord)); memset(rr, 0, sizeof(struct ResourceRecord)); rr->name = strdup(q->qName); rr->type = q->qType; rr->class = q->qClass; rr->ttl = 60*60; // in seconds; 0 means no caching // printf("Query for '%s'\n", q->qName); // We only can only answer two question types so far // and the answer (resource records) will be all put // into the answers list. // This behavior is probably non-standard! switch (q->qType){ case A_Resource_RecordType: rr->rd_length = 4; rc = get_A_Record(rr->rd_data.a_record.addr, q->qName); if (rc < 0){ free(rr->name); free(rr); goto next; } break; case AAAA_Resource_RecordType: rr->rd_length = 16; rc = get_AAAA_Record(rr->rd_data.aaaa_record.addr, q->qName); if (rc < 0){ free(rr->name); free(rr); goto next; } break; /* case NS_Resource_RecordType: case CNAME_Resource_RecordType: case SOA_Resource_RecordType: case PTR_Resource_RecordType: case MX_Resource_RecordType: case TXT_Resource_RecordType: */ default: free(rr); msg->rcode = NotImplemented_ResponseType; printf("Cannot answer question of type %d.\n", q->qType); goto next; } msg->anCount++; // prepend resource record to answers list beg = msg->answers; msg->answers = rr; rr->next = beg; // jump here to omit question next: // process next question q = q->next; } } /* @return 0 upon failure, 1 upon success */ int encode_resource_records(struct ResourceRecord* rr, uint8_t** buffer){ int i; while (rr){ // Answer questions by attaching resource sections. encode_domain_name(buffer, rr->name); put16bits(buffer, rr->type); put16bits(buffer, rr->class); put32bits(buffer, rr->ttl); put16bits(buffer, rr->rd_length); switch (rr->type){ case A_Resource_RecordType: for(i = 0; i < 4; ++i) put8bits(buffer, rr->rd_data.a_record.addr[i]); break; case AAAA_Resource_RecordType: for(i = 0; i < 16; ++i) put8bits(buffer, rr->rd_data.aaaa_record.addr[i]); break; default: fprintf(stderr, "Unknown type %u. => Ignore resource record.\n", rr->type); return 1; } rr = rr->next; } return 0; } /* @return 0 upon failure, 1 upon success */ int encode_msg(struct Message* msg, uint8_t** buffer){ struct Question* q; int rc; encode_header(msg, buffer); q = msg->questions; while (q){ encode_domain_name(buffer, q->qName); put16bits(buffer, q->qType); put16bits(buffer, q->qClass); q = q->next; } rc = 0; rc |= encode_resource_records(msg->answers, buffer); rc |= encode_resource_records(msg->authorities, buffer); rc |= encode_resource_records(msg->additionals, buffer); return rc; } void free_resource_records(struct ResourceRecord* rr){ struct ResourceRecord* next; while (rr) { free(rr->name); next = rr->next; free(rr); rr = next; } } void free_questions(struct Question* qq){ struct Question* next; while (qq) { free(qq->qName); next = qq->next; free(qq); qq = next; } }
410356.c
/* * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include "dpif-netlink.h" #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <inttypes.h> #include <net/if.h> #include <linux/types.h> #include <linux/pkt_sched.h> #include <poll.h> #include <stdlib.h> #include <strings.h> #include <sys/epoll.h> #include <sys/stat.h> #include <unistd.h> #include "bitmap.h" #include "dpif-provider.h" #include "dynamic-string.h" #include "flow.h" #include "fat-rwlock.h" #include "netdev.h" #include "netdev-linux.h" #include "netdev-vport.h" #include "netlink-notifier.h" #include "netlink-socket.h" #include "netlink.h" #include "odp-util.h" #include "ofpbuf.h" #include "packets.h" #include "poll-loop.h" #include "random.h" #include "shash.h" #include "sset.h" #include "timeval.h" #include "unaligned.h" #include "util.h" #include "vlog.h" VLOG_DEFINE_THIS_MODULE(dpif_netlink); #ifdef _WIN32 enum { WINDOWS = 1 }; #else enum { WINDOWS = 0 }; #endif enum { MAX_PORTS = USHRT_MAX }; /* This ethtool flag was introduced in Linux 2.6.24, so it might be * missing if we have old headers. */ #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */ struct dpif_netlink_dp { /* Generic Netlink header. */ uint8_t cmd; /* struct ovs_header. */ int dp_ifindex; /* Attributes. */ const char *name; /* OVS_DP_ATTR_NAME. */ const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */ uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */ const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */ const struct ovs_dp_megaflow_stats *megaflow_stats; /* OVS_DP_ATTR_MEGAFLOW_STATS.*/ }; static void dpif_netlink_dp_init(struct dpif_netlink_dp *); static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *, const struct ofpbuf *); static void dpif_netlink_dp_dump_start(struct nl_dump *); static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request, struct dpif_netlink_dp *reply, struct ofpbuf **bufp); static int dpif_netlink_dp_get(const struct dpif *, struct dpif_netlink_dp *reply, struct ofpbuf **bufp); struct dpif_netlink_flow { /* Generic Netlink header. */ uint8_t cmd; /* struct ovs_header. */ unsigned int nlmsg_flags; int dp_ifindex; /* Attributes. * * The 'stats' member points to 64-bit data that might only be aligned on * 32-bit boundaries, so get_unaligned_u64() should be used to access its * values. * * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in * the Netlink version of the command, even if actions_len is zero. */ const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */ size_t key_len; const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */ size_t mask_len; const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */ size_t actions_len; const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */ const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */ const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */ bool clear; /* OVS_FLOW_ATTR_CLEAR. */ bool probe; /* OVS_FLOW_ATTR_PROBE. */ }; static void dpif_netlink_flow_init(struct dpif_netlink_flow *); static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *, const struct ofpbuf *); static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *, struct ofpbuf *); static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request, struct dpif_netlink_flow *reply, struct ofpbuf **bufp); static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *, struct dpif_flow_stats *); static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *, const struct dpif_netlink_flow *); /* One of the dpif channels between the kernel and userspace. */ struct dpif_channel { struct nl_sock *sock; /* Netlink socket. */ long long int last_poll; /* Last time this channel was polled. */ }; #ifdef _WIN32 #define VPORT_SOCK_POOL_SIZE 1 /* On Windows, there is no native support for epoll. There are equivalent * interfaces though, that are not used currently. For simpicity, a pool of * netlink sockets is used. Each socket is represented by 'struct * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be * sharing the same socket. In the future, we can add a reference count and * such fields. */ struct dpif_windows_vport_sock { struct nl_sock *nl_sock; /* netlink socket. */ }; #endif struct dpif_handler { struct dpif_channel *channels;/* Array of channels for each handler. */ struct epoll_event *epoll_events; int epoll_fd; /* epoll fd that includes channel socks. */ int n_events; /* Num events returned by epoll_wait(). */ int event_offset; /* Offset into 'epoll_events'. */ #ifdef _WIN32 /* Pool of sockets. */ struct dpif_windows_vport_sock *vport_sock_pool; size_t last_used_pool_idx; /* Index to aid in allocating a socket in the pool to a port. */ #endif }; /* Datapath interface for the openvswitch Linux kernel module. */ struct dpif_netlink { struct dpif dpif; int dp_ifindex; /* Upcall messages. */ struct fat_rwlock upcall_lock; struct dpif_handler *handlers; uint32_t n_handlers; /* Num of upcall handlers. */ int uc_array_size; /* Size of 'handler->channels' and */ /* 'handler->epoll_events'. */ /* Change notification. */ struct nl_sock *port_notifier; /* vport multicast group subscriber. */ bool refresh_channels; }; static void report_loss(struct dpif_netlink *, struct dpif_channel *, uint32_t ch_idx, uint32_t handler_id); static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5); /* Generic Netlink family numbers for OVS. * * Initialized by dpif_netlink_init(). */ static int ovs_datapath_family; static int ovs_vport_family; static int ovs_flow_family; static int ovs_packet_family; /* Generic Netlink multicast groups for OVS. * * Initialized by dpif_netlink_init(). */ static unsigned int ovs_vport_mcgroup; static int dpif_netlink_init(void); static int open_dpif(const struct dpif_netlink_dp *, struct dpif **); static uint32_t dpif_netlink_port_get_pid(const struct dpif *, odp_port_t port_no, uint32_t hash); static void dpif_netlink_handler_uninit(struct dpif_handler *handler); static int dpif_netlink_refresh_channels(struct dpif_netlink *, uint32_t n_handlers); static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *, struct ofpbuf *); static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *, const struct ofpbuf *); static struct dpif_netlink * dpif_netlink_cast(const struct dpif *dpif) { dpif_assert_class(dpif, &dpif_netlink_class); return CONTAINER_OF(dpif, struct dpif_netlink, dpif); } static int dpif_netlink_enumerate(struct sset *all_dps, const struct dpif_class *dpif_class OVS_UNUSED) { struct nl_dump dump; uint64_t reply_stub[NL_DUMP_BUFSIZE / 8]; struct ofpbuf msg, buf; int error; error = dpif_netlink_init(); if (error) { return error; } ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub); dpif_netlink_dp_dump_start(&dump); while (nl_dump_next(&dump, &msg, &buf)) { struct dpif_netlink_dp dp; if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) { sset_add(all_dps, dp.name); } } ofpbuf_uninit(&buf); return nl_dump_done(&dump); } static int dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name, bool create, struct dpif **dpifp) { struct dpif_netlink_dp dp_request, dp; struct ofpbuf *buf; uint32_t upcall_pid; int error; error = dpif_netlink_init(); if (error) { return error; } /* Create or look up datapath. */ dpif_netlink_dp_init(&dp_request); if (create) { dp_request.cmd = OVS_DP_CMD_NEW; upcall_pid = 0; dp_request.upcall_pid = &upcall_pid; } else { /* Use OVS_DP_CMD_SET to report user features */ dp_request.cmd = OVS_DP_CMD_SET; } dp_request.name = name; dp_request.user_features |= OVS_DP_F_UNALIGNED; dp_request.user_features |= OVS_DP_F_VPORT_PIDS; error = dpif_netlink_dp_transact(&dp_request, &dp, &buf); if (error) { return error; } error = open_dpif(&dp, dpifp); ofpbuf_delete(buf); return error; } static int open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp) { struct dpif_netlink *dpif; dpif = xzalloc(sizeof *dpif); dpif->port_notifier = NULL; fat_rwlock_init(&dpif->upcall_lock); dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name, dp->dp_ifindex, dp->dp_ifindex); dpif->dp_ifindex = dp->dp_ifindex; *dpifp = &dpif->dpif; return 0; } /* Destroys the netlink sockets pointed by the elements in 'socksp' * and frees the 'socksp'. */ static void vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks) { size_t i; for (i = 0; i < n_socks; i++) { nl_sock_destroy(socksp[i]); } free(socksp); } /* Creates an array of netlink sockets. Returns an array of the * corresponding pointers. Records the error in 'error'. */ static struct nl_sock ** vport_create_socksp__(uint32_t n_socks, int *error) { struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp); size_t i; for (i = 0; i < n_socks; i++) { *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]); if (*error) { goto error; } } return socksp; error: vport_del_socksp__(socksp, n_socks); return NULL; } #ifdef _WIN32 static void vport_delete_sock_pool(struct dpif_handler *handler) OVS_REQ_WRLOCK(dpif->upcall_lock) { if (handler->vport_sock_pool) { uint32_t i; struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool; for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { if (sock_pool[i].nl_sock) { nl_sock_unsubscribe_packets(sock_pool[i].nl_sock); nl_sock_destroy(sock_pool[i].nl_sock); sock_pool[i].nl_sock = NULL; } } free(handler->vport_sock_pool); handler->vport_sock_pool = NULL; } } static int vport_create_sock_pool(struct dpif_handler *handler) OVS_REQ_WRLOCK(dpif->upcall_lock) { struct dpif_windows_vport_sock *sock_pool; size_t i; int error = 0; sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool); for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock); if (error) { goto error; } /* Enable the netlink socket to receive packets. This is equivalent to * calling nl_sock_join_mcgroup() to receive events. */ error = nl_sock_subscribe_packets(sock_pool[i].nl_sock); if (error) { goto error; } } handler->vport_sock_pool = sock_pool; handler->last_used_pool_idx = 0; return 0; error: vport_delete_sock_pool(handler); return error; } /* Returns an array pointers to netlink sockets. The sockets are picked from a * pool. Records the error in 'error'. */ static struct nl_sock ** vport_create_socksp_windows(struct dpif_netlink *dpif, int *error) OVS_REQ_WRLOCK(dpif->upcall_lock) { uint32_t n_socks = dpif->n_handlers; struct nl_sock **socksp; size_t i; ovs_assert(n_socks <= 1); socksp = xzalloc(n_socks * sizeof *socksp); /* Pick netlink sockets to use in a round-robin fashion from each * handler's pool of sockets. */ for (i = 0; i < n_socks; i++) { struct dpif_handler *handler = &dpif->handlers[i]; struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool; size_t index = handler->last_used_pool_idx; /* A pool of sockets is allocated when the handler is initialized. */ if (sock_pool == NULL) { free(socksp); *error = EINVAL; return NULL; } ovs_assert(index < VPORT_SOCK_POOL_SIZE); socksp[i] = sock_pool[index].nl_sock; socksp[i] = sock_pool[index].nl_sock; ovs_assert(socksp[i]); index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1; handler->last_used_pool_idx = index; } return socksp; } static void vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp) { free(socksp); } #endif /* _WIN32 */ static struct nl_sock ** vport_create_socksp(struct dpif_netlink *dpif, int *error) { #ifdef _WIN32 return vport_create_socksp_windows(dpif, error); #else return vport_create_socksp__(dpif->n_handlers, error); #endif } static void vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp) { #ifdef _WIN32 vport_del_socksp_windows(dpif, socksp); #else vport_del_socksp__(socksp, dpif->n_handlers); #endif } /* Given the array of pointers to netlink sockets 'socksp', returns * the array of corresponding pids. If the 'socksp' is NULL, returns * a single-element array of value 0. */ static uint32_t * vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks) { uint32_t *pids; if (!socksp) { pids = xzalloc(sizeof *pids); } else { size_t i; pids = xzalloc(n_socks * sizeof *pids); for (i = 0; i < n_socks; i++) { pids[i] = nl_sock_pid(socksp[i]); } } return pids; } /* Given the port number 'port_idx', extracts the pids of netlink sockets * associated to the port and assigns it to 'upcall_pids'. */ static bool vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx, uint32_t **upcall_pids) { uint32_t *pids; size_t i; /* Since the nl_sock can only be assigned in either all * or none "dpif->handlers" channels, the following check * would suffice. */ if (!dpif->handlers[0].channels[port_idx].sock) { return false; } ovs_assert(!WINDOWS || dpif->n_handlers <= 1); pids = xzalloc(dpif->n_handlers * sizeof *pids); for (i = 0; i < dpif->n_handlers; i++) { pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock); } *upcall_pids = pids; return true; } static int vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no, struct nl_sock **socksp) { struct epoll_event event; uint32_t port_idx = odp_to_u32(port_no); size_t i, j; int error; if (dpif->handlers == NULL) { return 0; } /* We assume that the datapath densely chooses port numbers, which can * therefore be used as an index into 'channels' and 'epoll_events' of * 'dpif->handler'. */ if (port_idx >= dpif->uc_array_size) { uint32_t new_size = port_idx + 1; if (new_size > MAX_PORTS) { VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big", dpif_name(&dpif->dpif), port_no); return EFBIG; } for (i = 0; i < dpif->n_handlers; i++) { struct dpif_handler *handler = &dpif->handlers[i]; handler->channels = xrealloc(handler->channels, new_size * sizeof *handler->channels); for (j = dpif->uc_array_size; j < new_size; j++) { handler->channels[j].sock = NULL; } handler->epoll_events = xrealloc(handler->epoll_events, new_size * sizeof *handler->epoll_events); } dpif->uc_array_size = new_size; } memset(&event, 0, sizeof event); event.events = EPOLLIN; event.data.u32 = port_idx; for (i = 0; i < dpif->n_handlers; i++) { struct dpif_handler *handler = &dpif->handlers[i]; #ifndef _WIN32 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]), &event) < 0) { error = errno; goto error; } #endif dpif->handlers[i].channels[port_idx].sock = socksp[i]; dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN; } return 0; error: for (j = 0; j < i; j++) { #ifndef _WIN32 epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL, nl_sock_fd(socksp[j]), NULL); #endif dpif->handlers[j].channels[port_idx].sock = NULL; } return error; } static void vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no) { uint32_t port_idx = odp_to_u32(port_no); size_t i; if (!dpif->handlers || port_idx >= dpif->uc_array_size) { return; } /* Since the sock can only be assigned in either all or none * of "dpif->handlers" channels, the following check would * suffice. */ if (!dpif->handlers[0].channels[port_idx].sock) { return; } for (i = 0; i < dpif->n_handlers; i++) { struct dpif_handler *handler = &dpif->handlers[i]; #ifndef _WIN32 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL, nl_sock_fd(handler->channels[port_idx].sock), NULL); nl_sock_destroy(handler->channels[port_idx].sock); #endif handler->channels[port_idx].sock = NULL; handler->event_offset = handler->n_events = 0; } } static void destroy_all_channels(struct dpif_netlink *dpif) OVS_REQ_WRLOCK(dpif->upcall_lock) { unsigned int i; if (!dpif->handlers) { return; } for (i = 0; i < dpif->uc_array_size; i++ ) { struct dpif_netlink_vport vport_request; uint32_t upcall_pids = 0; /* Since the sock can only be assigned in either all or none * of "dpif->handlers" channels, the following check would * suffice. */ if (!dpif->handlers[0].channels[i].sock) { continue; } /* Turn off upcalls. */ dpif_netlink_vport_init(&vport_request); vport_request.cmd = OVS_VPORT_CMD_SET; vport_request.dp_ifindex = dpif->dp_ifindex; vport_request.port_no = u32_to_odp(i); vport_request.upcall_pids = &upcall_pids; dpif_netlink_vport_transact(&vport_request, NULL, NULL); vport_del_channels(dpif, u32_to_odp(i)); } for (i = 0; i < dpif->n_handlers; i++) { struct dpif_handler *handler = &dpif->handlers[i]; dpif_netlink_handler_uninit(handler); free(handler->epoll_events); free(handler->channels); } free(dpif->handlers); dpif->handlers = NULL; dpif->n_handlers = 0; dpif->uc_array_size = 0; } static void dpif_netlink_close(struct dpif *dpif_) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); nl_sock_destroy(dpif->port_notifier); fat_rwlock_wrlock(&dpif->upcall_lock); destroy_all_channels(dpif); fat_rwlock_unlock(&dpif->upcall_lock); fat_rwlock_destroy(&dpif->upcall_lock); free(dpif); } static int dpif_netlink_destroy(struct dpif *dpif_) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); struct dpif_netlink_dp dp; dpif_netlink_dp_init(&dp); dp.cmd = OVS_DP_CMD_DEL; dp.dp_ifindex = dpif->dp_ifindex; return dpif_netlink_dp_transact(&dp, NULL, NULL); } static bool dpif_netlink_run(struct dpif *dpif_) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); if (dpif->refresh_channels) { dpif->refresh_channels = false; fat_rwlock_wrlock(&dpif->upcall_lock); dpif_netlink_refresh_channels(dpif, dpif->n_handlers); fat_rwlock_unlock(&dpif->upcall_lock); } return false; } static int dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats) { struct dpif_netlink_dp dp; struct ofpbuf *buf; int error; error = dpif_netlink_dp_get(dpif_, &dp, &buf); if (!error) { memset(stats, 0, sizeof *stats); if (dp.stats) { stats->n_hit = get_32aligned_u64(&dp.stats->n_hit); stats->n_missed = get_32aligned_u64(&dp.stats->n_missed); stats->n_lost = get_32aligned_u64(&dp.stats->n_lost); stats->n_flows = get_32aligned_u64(&dp.stats->n_flows); } if (dp.megaflow_stats) { stats->n_masks = dp.megaflow_stats->n_masks; stats->n_mask_hit = get_32aligned_u64( &dp.megaflow_stats->n_mask_hit); } else { stats->n_masks = UINT32_MAX; stats->n_mask_hit = UINT64_MAX; } ofpbuf_delete(buf); } return error; } static const char * get_vport_type(const struct dpif_netlink_vport *vport) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); switch (vport->type) { case OVS_VPORT_TYPE_NETDEV: { const char *type = netdev_get_type_from_name(vport->name); return type ? type : "system"; } case OVS_VPORT_TYPE_INTERNAL: return "internal"; case OVS_VPORT_TYPE_GENEVE: return "geneve"; case OVS_VPORT_TYPE_GRE: return "gre"; case OVS_VPORT_TYPE_GRE64: return "gre64"; case OVS_VPORT_TYPE_VXLAN: return "vxlan"; case OVS_VPORT_TYPE_LISP: return "lisp"; case OVS_VPORT_TYPE_UNSPEC: case __OVS_VPORT_TYPE_MAX: break; } VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u", vport->dp_ifindex, vport->name, (unsigned int) vport->type); return "unknown"; } static enum ovs_vport_type netdev_to_ovs_vport_type(const struct netdev *netdev) { const char *type = netdev_get_type(netdev); if (!strcmp(type, "tap") || !strcmp(type, "system")) { return OVS_VPORT_TYPE_NETDEV; } else if (!strcmp(type, "internal")) { return OVS_VPORT_TYPE_INTERNAL; } else if (!strcmp(type, "geneve")) { return OVS_VPORT_TYPE_GENEVE; } else if (strstr(type, "gre64")) { return OVS_VPORT_TYPE_GRE64; } else if (strstr(type, "gre")) { return OVS_VPORT_TYPE_GRE; } else if (!strcmp(type, "vxlan")) { return OVS_VPORT_TYPE_VXLAN; } else if (!strcmp(type, "lisp")) { return OVS_VPORT_TYPE_LISP; } else { return OVS_VPORT_TYPE_UNSPEC; } } static int dpif_netlink_port_add__(struct dpif_netlink *dpif, struct netdev *netdev, odp_port_t *port_nop) OVS_REQ_WRLOCK(dpif->upcall_lock) { const struct netdev_tunnel_config *tnl_cfg; char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; const char *name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); const char *type = netdev_get_type(netdev); struct dpif_netlink_vport request, reply; struct ofpbuf *buf; uint64_t options_stub[64 / 8]; struct ofpbuf options; struct nl_sock **socksp = NULL; uint32_t *upcall_pids; int error = 0; if (dpif->handlers) { socksp = vport_create_socksp(dpif, &error); if (!socksp) { return error; } } dpif_netlink_vport_init(&request); request.cmd = OVS_VPORT_CMD_NEW; request.dp_ifindex = dpif->dp_ifindex; request.type = netdev_to_ovs_vport_type(netdev); if (request.type == OVS_VPORT_TYPE_UNSPEC) { VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has " "unsupported type `%s'", dpif_name(&dpif->dpif), name, type); vport_del_socksp(dpif, socksp); return EINVAL; } request.name = name; if (request.type == OVS_VPORT_TYPE_NETDEV) { #ifdef _WIN32 /* XXX : Map appropiate Windows handle */ #else netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false); #endif } tnl_cfg = netdev_get_tunnel_config(netdev); if (tnl_cfg && tnl_cfg->dst_port != 0) { ofpbuf_use_stack(&options, options_stub, sizeof options_stub); nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT, ntohs(tnl_cfg->dst_port)); request.options = ofpbuf_data(&options); request.options_len = ofpbuf_size(&options); } request.port_no = *port_nop; upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers); request.n_upcall_pids = socksp ? dpif->n_handlers : 1; request.upcall_pids = upcall_pids; error = dpif_netlink_vport_transact(&request, &reply, &buf); if (!error) { *port_nop = reply.port_no; } else { if (error == EBUSY && *port_nop != ODPP_NONE) { VLOG_INFO("%s: requested port %"PRIu32" is in use", dpif_name(&dpif->dpif), *port_nop); } vport_del_socksp(dpif, socksp); goto exit; } if (socksp) { error = vport_add_channels(dpif, *port_nop, socksp); if (error) { VLOG_INFO("%s: could not add channel for port %s", dpif_name(&dpif->dpif), name); /* Delete the port. */ dpif_netlink_vport_init(&request); request.cmd = OVS_VPORT_CMD_DEL; request.dp_ifindex = dpif->dp_ifindex; request.port_no = *port_nop; dpif_netlink_vport_transact(&request, NULL, NULL); vport_del_socksp(dpif, socksp); goto exit; } } free(socksp); exit: ofpbuf_delete(buf); free(upcall_pids); return error; } static int dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev, odp_port_t *port_nop) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); int error; fat_rwlock_wrlock(&dpif->upcall_lock); error = dpif_netlink_port_add__(dpif, netdev, port_nop); fat_rwlock_unlock(&dpif->upcall_lock); return error; } static int dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no) OVS_REQ_WRLOCK(dpif->upcall_lock) { struct dpif_netlink_vport vport; int error; dpif_netlink_vport_init(&vport); vport.cmd = OVS_VPORT_CMD_DEL; vport.dp_ifindex = dpif->dp_ifindex; vport.port_no = port_no; error = dpif_netlink_vport_transact(&vport, NULL, NULL); vport_del_channels(dpif, port_no); return error; } static int dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); int error; fat_rwlock_wrlock(&dpif->upcall_lock); error = dpif_netlink_port_del__(dpif, port_no); fat_rwlock_unlock(&dpif->upcall_lock); return error; } static int dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no, const char *port_name, struct dpif_port *dpif_port) { struct dpif_netlink_vport request; struct dpif_netlink_vport reply; struct ofpbuf *buf; int error; dpif_netlink_vport_init(&request); request.cmd = OVS_VPORT_CMD_GET; request.dp_ifindex = dpif->dp_ifindex; request.port_no = port_no; request.name = port_name; error = dpif_netlink_vport_transact(&request, &reply, &buf); if (!error) { if (reply.dp_ifindex != request.dp_ifindex) { /* A query by name reported that 'port_name' is in some datapath * other than 'dpif', but the caller wants to know about 'dpif'. */ error = ENODEV; } else if (dpif_port) { dpif_port->name = xstrdup(reply.name); dpif_port->type = xstrdup(get_vport_type(&reply)); dpif_port->port_no = reply.port_no; } ofpbuf_delete(buf); } return error; } static int dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no, struct dpif_port *dpif_port) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port); } static int dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname, struct dpif_port *dpif_port) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); return dpif_netlink_port_query__(dpif, 0, devname, dpif_port); } static uint32_t dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif, odp_port_t port_no, uint32_t hash) OVS_REQ_RDLOCK(dpif->upcall_lock) { uint32_t port_idx = odp_to_u32(port_no); uint32_t pid = 0; if (dpif->handlers && dpif->uc_array_size > 0) { /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s * channel, since it is not heavily loaded. */ uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx; struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers]; /* Needs to check in case the socket pointer is changed in between * the holding of upcall_lock. A known case happens when the main * thread deletes the vport while the handler thread is handling * the upcall from that port. */ if (h->channels[idx].sock) { pid = nl_sock_pid(h->channels[idx].sock); } } return pid; } static uint32_t dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no, uint32_t hash) { const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); uint32_t ret; fat_rwlock_rdlock(&dpif->upcall_lock); ret = dpif_netlink_port_get_pid__(dpif, port_no, hash); fat_rwlock_unlock(&dpif->upcall_lock); return ret; } static int dpif_netlink_flow_flush(struct dpif *dpif_) { const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); struct dpif_netlink_flow flow; dpif_netlink_flow_init(&flow); flow.cmd = OVS_FLOW_CMD_DEL; flow.dp_ifindex = dpif->dp_ifindex; return dpif_netlink_flow_transact(&flow, NULL, NULL); } struct dpif_netlink_port_state { struct nl_dump dump; struct ofpbuf buf; }; static void dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif, struct nl_dump *dump) { struct dpif_netlink_vport request; struct ofpbuf *buf; dpif_netlink_vport_init(&request); request.cmd = OVS_VPORT_CMD_GET; request.dp_ifindex = dpif->dp_ifindex; buf = ofpbuf_new(1024); dpif_netlink_vport_to_ofpbuf(&request, buf); nl_dump_start(dump, NETLINK_GENERIC, buf); ofpbuf_delete(buf); } static int dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); struct dpif_netlink_port_state *state; *statep = state = xmalloc(sizeof *state); dpif_netlink_port_dump_start__(dpif, &state->dump); ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE); return 0; } static int dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif, struct nl_dump *dump, struct dpif_netlink_vport *vport, struct ofpbuf *buffer) { struct ofpbuf buf; int error; if (!nl_dump_next(dump, &buf, buffer)) { return EOF; } error = dpif_netlink_vport_from_ofpbuf(vport, &buf); if (error) { VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)", dpif_name(&dpif->dpif), ovs_strerror(error)); } return error; } static int dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_, struct dpif_port *dpif_port) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); struct dpif_netlink_port_state *state = state_; struct dpif_netlink_vport vport; int error; error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport, &state->buf); if (error) { return error; } dpif_port->name = CONST_CAST(char *, vport.name); dpif_port->type = CONST_CAST(char *, get_vport_type(&vport)); dpif_port->port_no = vport.port_no; return 0; } static int dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_) { struct dpif_netlink_port_state *state = state_; int error = nl_dump_done(&state->dump); ofpbuf_uninit(&state->buf); free(state); return error; } static int dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); /* Lazily create the Netlink socket to listen for notifications. */ if (!dpif->port_notifier) { struct nl_sock *sock; int error; error = nl_sock_create(NETLINK_GENERIC, &sock); if (error) { return error; } error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup); if (error) { nl_sock_destroy(sock); return error; } dpif->port_notifier = sock; /* We have no idea of the current state so report that everything * changed. */ return ENOBUFS; } for (;;) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); uint64_t buf_stub[4096 / 8]; struct ofpbuf buf; int error; ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub); error = nl_sock_recv(dpif->port_notifier, &buf, false); if (!error) { struct dpif_netlink_vport vport; error = dpif_netlink_vport_from_ofpbuf(&vport, &buf); if (!error) { if (vport.dp_ifindex == dpif->dp_ifindex && (vport.cmd == OVS_VPORT_CMD_NEW || vport.cmd == OVS_VPORT_CMD_DEL || vport.cmd == OVS_VPORT_CMD_SET)) { VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8, dpif->dpif.full_name, vport.name, vport.cmd); if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) { dpif->refresh_channels = true; } *devnamep = xstrdup(vport.name); ofpbuf_uninit(&buf); return 0; } } } else if (error != EAGAIN) { VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)", ovs_strerror(error)); nl_sock_drain(dpif->port_notifier); error = ENOBUFS; } ofpbuf_uninit(&buf); if (error) { return error; } } } static void dpif_netlink_port_poll_wait(const struct dpif *dpif_) { const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); if (dpif->port_notifier) { nl_sock_wait(dpif->port_notifier, POLLIN); } else { poll_immediate_wake(); } } static void dpif_netlink_init_flow_get(const struct dpif_netlink *dpif, const struct nlattr *key, size_t key_len, struct dpif_netlink_flow *request) { dpif_netlink_flow_init(request); request->cmd = OVS_FLOW_CMD_GET; request->dp_ifindex = dpif->dp_ifindex; request->key = key; request->key_len = key_len; } static int dpif_netlink_flow_get(const struct dpif_netlink *dpif, const struct nlattr *key, size_t key_len, struct dpif_netlink_flow *reply, struct ofpbuf **bufp) { struct dpif_netlink_flow request; dpif_netlink_init_flow_get(dpif, key, key_len, &request); return dpif_netlink_flow_transact(&request, reply, bufp); } static void dpif_netlink_init_flow_put(struct dpif_netlink *dpif, const struct dpif_flow_put *put, struct dpif_netlink_flow *request) { static const struct nlattr dummy_action; dpif_netlink_flow_init(request); request->cmd = (put->flags & DPIF_FP_CREATE ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET); request->dp_ifindex = dpif->dp_ifindex; request->key = put->key; request->key_len = put->key_len; request->mask = put->mask; request->mask_len = put->mask_len; /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */ request->actions = (put->actions ? put->actions : CONST_CAST(struct nlattr *, &dummy_action)); request->actions_len = put->actions_len; if (put->flags & DPIF_FP_ZERO_STATS) { request->clear = true; } if (put->flags & DPIF_FP_PROBE) { request->probe = true; } request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE; } static void dpif_netlink_init_flow_del(struct dpif_netlink *dpif, const struct dpif_flow_del *del, struct dpif_netlink_flow *request) { dpif_netlink_flow_init(request); request->cmd = OVS_FLOW_CMD_DEL; request->dp_ifindex = dpif->dp_ifindex; request->key = del->key; request->key_len = del->key_len; } struct dpif_netlink_flow_dump { struct dpif_flow_dump up; struct nl_dump nl_dump; atomic_int status; }; static struct dpif_netlink_flow_dump * dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump) { return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up); } static struct dpif_flow_dump * dpif_netlink_flow_dump_create(const struct dpif *dpif_) { const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); struct dpif_netlink_flow_dump *dump; struct dpif_netlink_flow request; struct ofpbuf *buf; dump = xmalloc(sizeof *dump); dpif_flow_dump_init(&dump->up, dpif_); dpif_netlink_flow_init(&request); request.cmd = OVS_FLOW_CMD_GET; request.dp_ifindex = dpif->dp_ifindex; buf = ofpbuf_new(1024); dpif_netlink_flow_to_ofpbuf(&request, buf); nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf); ofpbuf_delete(buf); atomic_init(&dump->status, 0); return &dump->up; } static int dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_) { struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_); unsigned int nl_status = nl_dump_done(&dump->nl_dump); int dump_status; /* No other thread has access to 'dump' at this point. */ atomic_read_relaxed(&dump->status, &dump_status); free(dump); return dump_status ? dump_status : nl_status; } struct dpif_netlink_flow_dump_thread { struct dpif_flow_dump_thread up; struct dpif_netlink_flow_dump *dump; struct dpif_netlink_flow flow; struct dpif_flow_stats stats; struct ofpbuf nl_flows; /* Always used to store flows. */ struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */ }; static struct dpif_netlink_flow_dump_thread * dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread) { return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up); } static struct dpif_flow_dump_thread * dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_) { struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_); struct dpif_netlink_flow_dump_thread *thread; thread = xmalloc(sizeof *thread); dpif_flow_dump_thread_init(&thread->up, &dump->up); thread->dump = dump; ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE); thread->nl_actions = NULL; return &thread->up; } static void dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_) { struct dpif_netlink_flow_dump_thread *thread = dpif_netlink_flow_dump_thread_cast(thread_); ofpbuf_uninit(&thread->nl_flows); ofpbuf_delete(thread->nl_actions); free(thread); } static void dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow, const struct dpif_netlink_flow *datapath_flow) { dpif_flow->key = datapath_flow->key; dpif_flow->key_len = datapath_flow->key_len; dpif_flow->mask = datapath_flow->mask; dpif_flow->mask_len = datapath_flow->mask_len; dpif_flow->actions = datapath_flow->actions; dpif_flow->actions_len = datapath_flow->actions_len; dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len, &dpif_flow->ufid); dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats); } static int dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_, struct dpif_flow *flows, int max_flows) { struct dpif_netlink_flow_dump_thread *thread = dpif_netlink_flow_dump_thread_cast(thread_); struct dpif_netlink_flow_dump *dump = thread->dump; struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif); int n_flows; ofpbuf_delete(thread->nl_actions); thread->nl_actions = NULL; n_flows = 0; while (!n_flows || (n_flows < max_flows && ofpbuf_size(&thread->nl_flows))) { struct dpif_netlink_flow datapath_flow; struct ofpbuf nl_flow; int error; /* Try to grab another flow. */ if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) { break; } /* Convert the flow to our output format. */ error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow); if (error) { atomic_store_relaxed(&dump->status, error); break; } if (datapath_flow.actions) { /* Common case: the flow includes actions. */ dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++], &datapath_flow); } else { /* Rare case: the flow does not include actions. Retrieve this * individual flow again to get the actions. */ error = dpif_netlink_flow_get(dpif, datapath_flow.key, datapath_flow.key_len, &datapath_flow, &thread->nl_actions); if (error == ENOENT) { VLOG_DBG("dumped flow disappeared on get"); continue; } else if (error) { VLOG_WARN("error fetching dumped flow: %s", ovs_strerror(error)); atomic_store_relaxed(&dump->status, error); break; } /* Save this flow. Then exit, because we only have one buffer to * handle this case. */ dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++], &datapath_flow); break; } } return n_flows; } static void dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec, struct ofpbuf *buf) { struct ovs_header *k_exec; size_t key_ofs; ofpbuf_prealloc_tailroom(buf, (64 + ofpbuf_size(d_exec->packet) + ODP_KEY_METADATA_SIZE + d_exec->actions_len)); nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST, OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION); k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec); k_exec->dp_ifindex = dp_ifindex; nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET, ofpbuf_data(d_exec->packet), ofpbuf_size(d_exec->packet)); key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY); odp_key_from_pkt_metadata(buf, &d_exec->md); nl_msg_end_nested(buf, key_ofs); nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS, d_exec->actions, d_exec->actions_len); if (d_exec->probe) { nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE); } } #define MAX_OPS 50 static void dpif_netlink_operate__(struct dpif_netlink *dpif, struct dpif_op **ops, size_t n_ops) { struct op_auxdata { struct nl_transaction txn; struct ofpbuf request; uint64_t request_stub[1024 / 8]; struct ofpbuf reply; uint64_t reply_stub[1024 / 8]; } auxes[MAX_OPS]; struct nl_transaction *txnsp[MAX_OPS]; size_t i; ovs_assert(n_ops <= MAX_OPS); for (i = 0; i < n_ops; i++) { struct op_auxdata *aux = &auxes[i]; struct dpif_op *op = ops[i]; struct dpif_flow_put *put; struct dpif_flow_del *del; struct dpif_execute *execute; struct dpif_flow_get *get; struct dpif_netlink_flow flow; ofpbuf_use_stub(&aux->request, aux->request_stub, sizeof aux->request_stub); aux->txn.request = &aux->request; ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub); aux->txn.reply = NULL; switch (op->type) { case DPIF_OP_FLOW_PUT: put = &op->u.flow_put; dpif_netlink_init_flow_put(dpif, put, &flow); if (put->stats) { flow.nlmsg_flags |= NLM_F_ECHO; aux->txn.reply = &aux->reply; } dpif_netlink_flow_to_ofpbuf(&flow, &aux->request); break; case DPIF_OP_FLOW_DEL: del = &op->u.flow_del; dpif_netlink_init_flow_del(dpif, del, &flow); if (del->stats) { flow.nlmsg_flags |= NLM_F_ECHO; aux->txn.reply = &aux->reply; } dpif_netlink_flow_to_ofpbuf(&flow, &aux->request); break; case DPIF_OP_EXECUTE: execute = &op->u.execute; dpif_netlink_encode_execute(dpif->dp_ifindex, execute, &aux->request); break; case DPIF_OP_FLOW_GET: get = &op->u.flow_get; dpif_netlink_init_flow_get(dpif, get->key, get->key_len, &flow); aux->txn.reply = get->buffer; dpif_netlink_flow_to_ofpbuf(&flow, &aux->request); break; default: OVS_NOT_REACHED(); } } for (i = 0; i < n_ops; i++) { txnsp[i] = &auxes[i].txn; } nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops); for (i = 0; i < n_ops; i++) { struct op_auxdata *aux = &auxes[i]; struct nl_transaction *txn = &auxes[i].txn; struct dpif_op *op = ops[i]; struct dpif_flow_put *put; struct dpif_flow_del *del; struct dpif_flow_get *get; op->error = txn->error; switch (op->type) { case DPIF_OP_FLOW_PUT: put = &op->u.flow_put; if (put->stats) { if (!op->error) { struct dpif_netlink_flow reply; op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply); if (!op->error) { dpif_netlink_flow_get_stats(&reply, put->stats); } } } break; case DPIF_OP_FLOW_DEL: del = &op->u.flow_del; if (del->stats) { if (!op->error) { struct dpif_netlink_flow reply; op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply); if (!op->error) { dpif_netlink_flow_get_stats(&reply, del->stats); } } } break; case DPIF_OP_EXECUTE: break; case DPIF_OP_FLOW_GET: get = &op->u.flow_get; if (!op->error) { struct dpif_netlink_flow reply; op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply); if (!op->error) { dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow, &reply); } } break; default: OVS_NOT_REACHED(); } ofpbuf_uninit(&aux->request); ofpbuf_uninit(&aux->reply); } } static void dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); while (n_ops > 0) { size_t chunk = MIN(n_ops, MAX_OPS); dpif_netlink_operate__(dpif, ops, chunk); ops += chunk; n_ops -= chunk; } } #if _WIN32 static void dpif_netlink_handler_uninit(struct dpif_handler *handler) { vport_delete_sock_pool(handler); } static int dpif_netlink_handler_init(struct dpif_handler *handler) { return vport_create_sock_pool(handler); } #else static int dpif_netlink_handler_init(struct dpif_handler *handler) { handler->epoll_fd = epoll_create(10); return handler->epoll_fd < 0 ? errno : 0; } static void dpif_netlink_handler_uninit(struct dpif_handler *handler) { close(handler->epoll_fd); } #endif /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports * currently in 'dpif' in the kernel, by adding a new set of channels for * any kernel vport that lacks one and deleting any channels that have no * backing kernel vports. */ static int dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers) OVS_REQ_WRLOCK(dpif->upcall_lock) { unsigned long int *keep_channels; struct dpif_netlink_vport vport; size_t keep_channels_nbits; struct nl_dump dump; uint64_t reply_stub[NL_DUMP_BUFSIZE / 8]; struct ofpbuf buf; int retval = 0; size_t i; ovs_assert(!WINDOWS || n_handlers <= 1); ovs_assert(!WINDOWS || dpif->n_handlers <= 1); if (dpif->n_handlers != n_handlers) { destroy_all_channels(dpif); dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers); for (i = 0; i < n_handlers; i++) { int error; struct dpif_handler *handler = &dpif->handlers[i]; error = dpif_netlink_handler_init(handler); if (error) { size_t j; struct dpif_handler *tmp = &dpif->handlers[i]; for (j = 0; j < i; j++) { dpif_netlink_handler_uninit(tmp); } free(dpif->handlers); dpif->handlers = NULL; return error; } } dpif->n_handlers = n_handlers; } for (i = 0; i < n_handlers; i++) { struct dpif_handler *handler = &dpif->handlers[i]; handler->event_offset = handler->n_events = 0; } keep_channels_nbits = dpif->uc_array_size; keep_channels = bitmap_allocate(keep_channels_nbits); ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub); dpif_netlink_port_dump_start__(dpif, &dump); while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) { uint32_t port_no = odp_to_u32(vport.port_no); uint32_t *upcall_pids = NULL; int error; if (port_no >= dpif->uc_array_size || !vport_get_pids(dpif, port_no, &upcall_pids)) { struct nl_sock **socksp = vport_create_socksp(dpif, &error); if (!socksp) { goto error; } error = vport_add_channels(dpif, vport.port_no, socksp); if (error) { VLOG_INFO("%s: could not add channels for port %s", dpif_name(&dpif->dpif), vport.name); vport_del_socksp(dpif, socksp); retval = error; goto error; } upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers); free(socksp); } /* Configure the vport to deliver misses to 'sock'. */ if (vport.upcall_pids[0] == 0 || vport.n_upcall_pids != dpif->n_handlers || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof *upcall_pids)) { struct dpif_netlink_vport vport_request; dpif_netlink_vport_init(&vport_request); vport_request.cmd = OVS_VPORT_CMD_SET; vport_request.dp_ifindex = dpif->dp_ifindex; vport_request.port_no = vport.port_no; vport_request.n_upcall_pids = dpif->n_handlers; vport_request.upcall_pids = upcall_pids; error = dpif_netlink_vport_transact(&vport_request, NULL, NULL); if (error) { VLOG_WARN_RL(&error_rl, "%s: failed to set upcall pid on port: %s", dpif_name(&dpif->dpif), ovs_strerror(error)); if (error != ENODEV && error != ENOENT) { retval = error; } else { /* The vport isn't really there, even though the dump says * it is. Probably we just hit a race after a port * disappeared. */ } goto error; } } if (port_no < keep_channels_nbits) { bitmap_set1(keep_channels, port_no); } free(upcall_pids); continue; error: free(upcall_pids); vport_del_channels(dpif, vport.port_no); } nl_dump_done(&dump); ofpbuf_uninit(&buf); /* Discard any saved channels that we didn't reuse. */ for (i = 0; i < keep_channels_nbits; i++) { if (!bitmap_is_set(keep_channels, i)) { vport_del_channels(dpif, u32_to_odp(i)); } } free(keep_channels); return retval; } static int dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable) OVS_REQ_WRLOCK(dpif->upcall_lock) { if ((dpif->handlers != NULL) == enable) { return 0; } else if (!enable) { destroy_all_channels(dpif); return 0; } else { return dpif_netlink_refresh_channels(dpif, 1); } } static int dpif_netlink_recv_set(struct dpif *dpif_, bool enable) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); int error; fat_rwlock_wrlock(&dpif->upcall_lock); error = dpif_netlink_recv_set__(dpif, enable); fat_rwlock_unlock(&dpif->upcall_lock); return error; } static int dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); int error = 0; #ifdef _WIN32 /* Multiple upcall handlers will be supported once kernel datapath supports * it. */ if (n_handlers > 1) { return error; } #endif fat_rwlock_wrlock(&dpif->upcall_lock); if (dpif->handlers) { error = dpif_netlink_refresh_channels(dpif, n_handlers); } fat_rwlock_unlock(&dpif->upcall_lock); return error; } static int dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED, uint32_t queue_id, uint32_t *priority) { if (queue_id < 0xf000) { *priority = TC_H_MAKE(1 << 16, queue_id + 1); return 0; } else { return EINVAL; } } static int parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf, struct dpif_upcall *upcall, int *dp_ifindex) { static const struct nl_policy ovs_packet_policy[] = { /* Always present. */ [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC, .min_len = ETH_HEADER_LEN }, [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED }, /* OVS_PACKET_CMD_ACTION only. */ [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true }, [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true }, }; struct ovs_header *ovs_header; struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)]; struct nlmsghdr *nlmsg; struct genlmsghdr *genl; struct ofpbuf b; int type; ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf)); nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); genl = ofpbuf_try_pull(&b, sizeof *genl); ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); if (!nlmsg || !genl || !ovs_header || nlmsg->nlmsg_type != ovs_packet_family || !nl_policy_parse(&b, 0, ovs_packet_policy, a, ARRAY_SIZE(ovs_packet_policy))) { return EINVAL; } type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION : -1); if (type < 0) { return EINVAL; } /* (Re)set ALL fields of '*upcall' on successful return. */ upcall->type = type; upcall->key = CONST_CAST(struct nlattr *, nl_attr_get(a[OVS_PACKET_ATTR_KEY])); upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]); dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid); upcall->userdata = a[OVS_PACKET_ATTR_USERDATA]; upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY]; /* Allow overwriting the netlink attribute header without reallocating. */ ofpbuf_use_stub(&upcall->packet, CONST_CAST(struct nlattr *, nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) + sizeof(struct nlattr)); ofpbuf_set_data(&upcall->packet, (char *)ofpbuf_data(&upcall->packet) + sizeof(struct nlattr)); ofpbuf_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET])); *dp_ifindex = ovs_header->dp_ifindex; return 0; } #ifdef _WIN32 #define PACKET_RECV_BATCH_SIZE 50 static int dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id, struct dpif_upcall *upcall, struct ofpbuf *buf) OVS_REQ_RDLOCK(dpif->upcall_lock) { struct dpif_handler *handler; int read_tries = 0; struct dpif_windows_vport_sock *sock_pool; uint32_t i; if (!dpif->handlers) { return EAGAIN; } /* Only one handler is supported currently. */ if (handler_id >= 1) { return EAGAIN; } if (handler_id >= dpif->n_handlers) { return EAGAIN; } handler = &dpif->handlers[handler_id]; sock_pool = handler->vport_sock_pool; for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { for (;;) { int dp_ifindex; int error; if (++read_tries > PACKET_RECV_BATCH_SIZE) { return EAGAIN; } error = nl_sock_recv(sock_pool[i].nl_sock, buf, false); if (error == ENOBUFS) { /* ENOBUFS typically means that we've received so many * packets that the buffer overflowed. Try again * immediately because there's almost certainly a packet * waiting for us. */ /* XXX: report_loss(dpif, ch, idx, handler_id); */ continue; } /* XXX: ch->last_poll = time_msec(); */ if (error) { if (error == EAGAIN) { break; } return error; } error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex); if (!error && dp_ifindex == dpif->dp_ifindex) { return 0; } else if (error) { return error; } } } return EAGAIN; } #else static int dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id, struct dpif_upcall *upcall, struct ofpbuf *buf) OVS_REQ_RDLOCK(dpif->upcall_lock) { struct dpif_handler *handler; int read_tries = 0; if (!dpif->handlers || handler_id >= dpif->n_handlers) { return EAGAIN; } handler = &dpif->handlers[handler_id]; if (handler->event_offset >= handler->n_events) { int retval; handler->event_offset = handler->n_events = 0; do { retval = epoll_wait(handler->epoll_fd, handler->epoll_events, dpif->uc_array_size, 0); } while (retval < 0 && errno == EINTR); if (retval < 0) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno)); } else if (retval > 0) { handler->n_events = retval; } } while (handler->event_offset < handler->n_events) { int idx = handler->epoll_events[handler->event_offset].data.u32; struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx]; handler->event_offset++; for (;;) { int dp_ifindex; int error; if (++read_tries > 50) { return EAGAIN; } error = nl_sock_recv(ch->sock, buf, false); if (error == ENOBUFS) { /* ENOBUFS typically means that we've received so many * packets that the buffer overflowed. Try again * immediately because there's almost certainly a packet * waiting for us. */ report_loss(dpif, ch, idx, handler_id); continue; } ch->last_poll = time_msec(); if (error) { if (error == EAGAIN) { break; } return error; } error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex); if (!error && dp_ifindex == dpif->dp_ifindex) { return 0; } else if (error) { return error; } } } return EAGAIN; } #endif static int dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id, struct dpif_upcall *upcall, struct ofpbuf *buf) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); int error; fat_rwlock_rdlock(&dpif->upcall_lock); #ifdef _WIN32 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf); #else error = dpif_netlink_recv__(dpif, handler_id, upcall, buf); #endif fat_rwlock_unlock(&dpif->upcall_lock); return error; } static void dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id) OVS_REQ_RDLOCK(dpif->upcall_lock) { #ifdef _WIN32 uint32_t i; struct dpif_windows_vport_sock *sock_pool = dpif->handlers[handler_id].vport_sock_pool; /* Only one handler is supported currently. */ if (handler_id >= 1) { return; } for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) { nl_sock_wait(sock_pool[i].nl_sock, POLLIN); } #else if (dpif->handlers && handler_id < dpif->n_handlers) { struct dpif_handler *handler = &dpif->handlers[handler_id]; poll_fd_wait(handler->epoll_fd, POLLIN); } #endif } static void dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); fat_rwlock_rdlock(&dpif->upcall_lock); dpif_netlink_recv_wait__(dpif, handler_id); fat_rwlock_unlock(&dpif->upcall_lock); } static void dpif_netlink_recv_purge__(struct dpif_netlink *dpif) OVS_REQ_WRLOCK(dpif->upcall_lock) { if (dpif->handlers) { size_t i, j; for (i = 0; i < dpif->uc_array_size; i++ ) { if (!dpif->handlers[0].channels[i].sock) { continue; } for (j = 0; j < dpif->n_handlers; j++) { nl_sock_drain(dpif->handlers[j].channels[i].sock); } } } } static void dpif_netlink_recv_purge(struct dpif *dpif_) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); fat_rwlock_wrlock(&dpif->upcall_lock); dpif_netlink_recv_purge__(dpif); fat_rwlock_unlock(&dpif->upcall_lock); } static char * dpif_netlink_get_datapath_version(void) { char *version_str = NULL; #ifdef __linux__ #define MAX_VERSION_STR_SIZE 80 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version" FILE *f; f = fopen(LINUX_DATAPATH_VERSION_FILE, "r"); if (f) { char *newline; char version[MAX_VERSION_STR_SIZE]; if (fgets(version, MAX_VERSION_STR_SIZE, f)) { newline = strchr(version, '\n'); if (newline) { *newline = '\0'; } version_str = xstrdup(version); } fclose(f); } #endif return version_str; } const struct dpif_class dpif_netlink_class = { "system", dpif_netlink_enumerate, NULL, dpif_netlink_open, dpif_netlink_close, dpif_netlink_destroy, dpif_netlink_run, NULL, /* wait */ dpif_netlink_get_stats, dpif_netlink_port_add, dpif_netlink_port_del, dpif_netlink_port_query_by_number, dpif_netlink_port_query_by_name, dpif_netlink_port_get_pid, dpif_netlink_port_dump_start, dpif_netlink_port_dump_next, dpif_netlink_port_dump_done, dpif_netlink_port_poll, dpif_netlink_port_poll_wait, dpif_netlink_flow_flush, dpif_netlink_flow_dump_create, dpif_netlink_flow_dump_destroy, dpif_netlink_flow_dump_thread_create, dpif_netlink_flow_dump_thread_destroy, dpif_netlink_flow_dump_next, dpif_netlink_operate, dpif_netlink_recv_set, dpif_netlink_handlers_set, NULL, /* poll_thread_set */ dpif_netlink_queue_to_priority, dpif_netlink_recv, dpif_netlink_recv_wait, dpif_netlink_recv_purge, NULL, /* register_upcall_cb */ NULL, /* enable_upcall */ NULL, /* disable_upcall */ dpif_netlink_get_datapath_version, /* get_datapath_version */ }; static int dpif_netlink_init(void) { static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; static int error; if (ovsthread_once_start(&once)) { error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY, &ovs_datapath_family); if (error) { VLOG_ERR("Generic Netlink family '%s' does not exist. " "The Open vSwitch kernel module is probably not loaded.", OVS_DATAPATH_FAMILY); } if (!error) { error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family); } if (!error) { error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family); } if (!error) { error = nl_lookup_genl_family(OVS_PACKET_FAMILY, &ovs_packet_family); } if (!error) { error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP, &ovs_vport_mcgroup); } ovsthread_once_done(&once); } return error; } bool dpif_netlink_is_internal_device(const char *name) { struct dpif_netlink_vport reply; struct ofpbuf *buf; int error; error = dpif_netlink_vport_get(name, &reply, &buf); if (!error) { ofpbuf_delete(buf); } else if (error != ENODEV && error != ENOENT) { VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)", name, ovs_strerror(error)); } return reply.type == OVS_VPORT_TYPE_INTERNAL; } /* Parses the contents of 'buf', which contains a "struct ovs_header" followed * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a * positive errno value. * * 'vport' will contain pointers into 'buf', so the caller should not free * 'buf' while 'vport' is still in use. */ static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport, const struct ofpbuf *buf) { static const struct nl_policy ovs_vport_policy[] = { [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 }, [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 }, [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ }, [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC }, [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats), .optional = true }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true }, }; struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)]; struct ovs_header *ovs_header; struct nlmsghdr *nlmsg; struct genlmsghdr *genl; struct ofpbuf b; dpif_netlink_vport_init(vport); ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf)); nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); genl = ofpbuf_try_pull(&b, sizeof *genl); ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); if (!nlmsg || !genl || !ovs_header || nlmsg->nlmsg_type != ovs_vport_family || !nl_policy_parse(&b, 0, ovs_vport_policy, a, ARRAY_SIZE(ovs_vport_policy))) { return EINVAL; } vport->cmd = genl->cmd; vport->dp_ifindex = ovs_header->dp_ifindex; vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]); vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]); vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]); if (a[OVS_VPORT_ATTR_UPCALL_PID]) { vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID]) / (sizeof *vport->upcall_pids); vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]); } if (a[OVS_VPORT_ATTR_STATS]) { vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]); } if (a[OVS_VPORT_ATTR_OPTIONS]) { vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]); vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]); } return 0; } /* Appends to 'buf' (which must initially be empty) a "struct ovs_header" * followed by Netlink attributes corresponding to 'vport'. */ static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport, struct ofpbuf *buf) { struct ovs_header *ovs_header; nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO, vport->cmd, OVS_VPORT_VERSION); ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); ovs_header->dp_ifindex = vport->dp_ifindex; if (vport->port_no != ODPP_NONE) { nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no); } if (vport->type != OVS_VPORT_TYPE_UNSPEC) { nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type); } if (vport->name) { nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name); } if (vport->upcall_pids) { nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pids, vport->n_upcall_pids * sizeof *vport->upcall_pids); } if (vport->stats) { nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS, vport->stats, sizeof *vport->stats); } if (vport->options) { nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS, vport->options, vport->options_len); } } /* Clears 'vport' to "empty" values. */ void dpif_netlink_vport_init(struct dpif_netlink_vport *vport) { memset(vport, 0, sizeof *vport); vport->port_no = ODPP_NONE; } /* Executes 'request' in the kernel datapath. If the command fails, returns a * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the * result of the command is expected to be an ovs_vport also, which is decoded * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the * reply is no longer needed ('reply' will contain pointers into '*bufp'). */ int dpif_netlink_vport_transact(const struct dpif_netlink_vport *request, struct dpif_netlink_vport *reply, struct ofpbuf **bufp) { struct ofpbuf *request_buf; int error; ovs_assert((reply != NULL) == (bufp != NULL)); error = dpif_netlink_init(); if (error) { if (reply) { *bufp = NULL; dpif_netlink_vport_init(reply); } return error; } request_buf = ofpbuf_new(1024); dpif_netlink_vport_to_ofpbuf(request, request_buf); error = nl_transact(NETLINK_GENERIC, request_buf, bufp); ofpbuf_delete(request_buf); if (reply) { if (!error) { error = dpif_netlink_vport_from_ofpbuf(reply, *bufp); } if (error) { dpif_netlink_vport_init(reply); ofpbuf_delete(*bufp); *bufp = NULL; } } return error; } /* Obtains information about the kernel vport named 'name' and stores it into * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no * longer needed ('reply' will contain pointers into '*bufp'). */ int dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply, struct ofpbuf **bufp) { struct dpif_netlink_vport request; dpif_netlink_vport_init(&request); request.cmd = OVS_VPORT_CMD_GET; request.name = name; return dpif_netlink_vport_transact(&request, reply, bufp); } /* Parses the contents of 'buf', which contains a "struct ovs_header" followed * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a * positive errno value. * * 'dp' will contain pointers into 'buf', so the caller should not free 'buf' * while 'dp' is still in use. */ static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf) { static const struct nl_policy ovs_datapath_policy[] = { [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ }, [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats), .optional = true }, [OVS_DP_ATTR_MEGAFLOW_STATS] = { NL_POLICY_FOR(struct ovs_dp_megaflow_stats), .optional = true }, }; struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)]; struct ovs_header *ovs_header; struct nlmsghdr *nlmsg; struct genlmsghdr *genl; struct ofpbuf b; dpif_netlink_dp_init(dp); ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf)); nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); genl = ofpbuf_try_pull(&b, sizeof *genl); ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); if (!nlmsg || !genl || !ovs_header || nlmsg->nlmsg_type != ovs_datapath_family || !nl_policy_parse(&b, 0, ovs_datapath_policy, a, ARRAY_SIZE(ovs_datapath_policy))) { return EINVAL; } dp->cmd = genl->cmd; dp->dp_ifindex = ovs_header->dp_ifindex; dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]); if (a[OVS_DP_ATTR_STATS]) { dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]); } if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) { dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]); } return 0; } /* Appends to 'buf' the Generic Netlink message described by 'dp'. */ static void dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf) { struct ovs_header *ovs_header; nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family, NLM_F_REQUEST | NLM_F_ECHO, dp->cmd, OVS_DATAPATH_VERSION); ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); ovs_header->dp_ifindex = dp->dp_ifindex; if (dp->name) { nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name); } if (dp->upcall_pid) { nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid); } if (dp->user_features) { nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features); } /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */ } /* Clears 'dp' to "empty" values. */ static void dpif_netlink_dp_init(struct dpif_netlink_dp *dp) { memset(dp, 0, sizeof *dp); } static void dpif_netlink_dp_dump_start(struct nl_dump *dump) { struct dpif_netlink_dp request; struct ofpbuf *buf; dpif_netlink_dp_init(&request); request.cmd = OVS_DP_CMD_GET; buf = ofpbuf_new(1024); dpif_netlink_dp_to_ofpbuf(&request, buf); nl_dump_start(dump, NETLINK_GENERIC, buf); ofpbuf_delete(buf); } /* Executes 'request' in the kernel datapath. If the command fails, returns a * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the * result of the command is expected to be of the same form, which is decoded * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the * reply is no longer needed ('reply' will contain pointers into '*bufp'). */ static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request, struct dpif_netlink_dp *reply, struct ofpbuf **bufp) { struct ofpbuf *request_buf; int error; ovs_assert((reply != NULL) == (bufp != NULL)); request_buf = ofpbuf_new(1024); dpif_netlink_dp_to_ofpbuf(request, request_buf); error = nl_transact(NETLINK_GENERIC, request_buf, bufp); ofpbuf_delete(request_buf); if (reply) { dpif_netlink_dp_init(reply); if (!error) { error = dpif_netlink_dp_from_ofpbuf(reply, *bufp); } if (error) { ofpbuf_delete(*bufp); *bufp = NULL; } } return error; } /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'. * The caller must free '*bufp' when the reply is no longer needed ('reply' * will contain pointers into '*bufp'). */ static int dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply, struct ofpbuf **bufp) { struct dpif_netlink *dpif = dpif_netlink_cast(dpif_); struct dpif_netlink_dp request; dpif_netlink_dp_init(&request); request.cmd = OVS_DP_CMD_GET; request.dp_ifindex = dpif->dp_ifindex; return dpif_netlink_dp_transact(&request, reply, bufp); } /* Parses the contents of 'buf', which contains a "struct ovs_header" followed * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a * positive errno value. * * 'flow' will contain pointers into 'buf', so the caller should not free 'buf' * while 'flow' is still in use. */ static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow, const struct ofpbuf *buf) { static const struct nl_policy ovs_flow_policy[] = { [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED }, [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true }, [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true }, [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats), .optional = true }, [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true }, [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true }, /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */ /* The kernel never uses OVS_FLOW_ATTR_PROBE. */ }; struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)]; struct ovs_header *ovs_header; struct nlmsghdr *nlmsg; struct genlmsghdr *genl; struct ofpbuf b; dpif_netlink_flow_init(flow); ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf)); nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg); genl = ofpbuf_try_pull(&b, sizeof *genl); ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header); if (!nlmsg || !genl || !ovs_header || nlmsg->nlmsg_type != ovs_flow_family || !nl_policy_parse(&b, 0, ovs_flow_policy, a, ARRAY_SIZE(ovs_flow_policy))) { return EINVAL; } flow->nlmsg_flags = nlmsg->nlmsg_flags; flow->dp_ifindex = ovs_header->dp_ifindex; flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]); flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]); if (a[OVS_FLOW_ATTR_MASK]) { flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]); flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]); } if (a[OVS_FLOW_ATTR_ACTIONS]) { flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]); flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]); } if (a[OVS_FLOW_ATTR_STATS]) { flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]); } if (a[OVS_FLOW_ATTR_TCP_FLAGS]) { flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]); } if (a[OVS_FLOW_ATTR_USED]) { flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]); } return 0; } /* Appends to 'buf' (which must initially be empty) a "struct ovs_header" * followed by Netlink attributes corresponding to 'flow'. */ static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow, struct ofpbuf *buf) { struct ovs_header *ovs_header; nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family, NLM_F_REQUEST | flow->nlmsg_flags, flow->cmd, OVS_FLOW_VERSION); ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header); ovs_header->dp_ifindex = flow->dp_ifindex; if (flow->key_len) { nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY, flow->key, flow->key_len); } if (flow->mask_len) { nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK, flow->mask, flow->mask_len); } if (flow->actions || flow->actions_len) { nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS, flow->actions, flow->actions_len); } /* We never need to send these to the kernel. */ ovs_assert(!flow->stats); ovs_assert(!flow->tcp_flags); ovs_assert(!flow->used); if (flow->clear) { nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR); } if (flow->probe) { nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE); } } /* Clears 'flow' to "empty" values. */ static void dpif_netlink_flow_init(struct dpif_netlink_flow *flow) { memset(flow, 0, sizeof *flow); } /* Executes 'request' in the kernel datapath. If the command fails, returns a * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the * result of the command is expected to be a flow also, which is decoded and * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply * is no longer needed ('reply' will contain pointers into '*bufp'). */ static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request, struct dpif_netlink_flow *reply, struct ofpbuf **bufp) { struct ofpbuf *request_buf; int error; ovs_assert((reply != NULL) == (bufp != NULL)); if (reply) { request->nlmsg_flags |= NLM_F_ECHO; } request_buf = ofpbuf_new(1024); dpif_netlink_flow_to_ofpbuf(request, request_buf); error = nl_transact(NETLINK_GENERIC, request_buf, bufp); ofpbuf_delete(request_buf); if (reply) { if (!error) { error = dpif_netlink_flow_from_ofpbuf(reply, *bufp); } if (error) { dpif_netlink_flow_init(reply); ofpbuf_delete(*bufp); *bufp = NULL; } } return error; } static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow, struct dpif_flow_stats *stats) { if (flow->stats) { stats->n_packets = get_32aligned_u64(&flow->stats->n_packets); stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes); } else { stats->n_packets = 0; stats->n_bytes = 0; } stats->used = flow->used ? get_32aligned_u64(flow->used) : 0; stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0; } /* Logs information about a packet that was recently lost in 'ch' (in * 'dpif_'). */ static void report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx, uint32_t handler_id) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); struct ds s; if (VLOG_DROP_WARN(&rl)) { return; } ds_init(&s); if (ch->last_poll != LLONG_MIN) { ds_put_format(&s, " (last polled %lld ms ago)", time_msec() - ch->last_poll); } VLOG_WARN("%s: lost packet on port channel %u of handler %u", dpif_name(&dpif->dpif), ch_idx, handler_id); ds_destroy(&s); }
854417.c
/* * Copyright 2021 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ // // Created by tlea on 7/9/19. // #include <stdint.h> #include <pthread.h> #include <propsMgr/propsHelper.h> #include <propsMgr/commonProperties.h> #include <icLog/logging.h> #include <zhal/zhal.h> #include <ipc/deviceEventProducer.h> #include "zigbeeHealthCheck.h" #define LOG_TAG "zigbeeHealthCheck" // dont allow health checking faster than this #define MIN_NETWORK_HEALTH_CHECK_INTERVAL_MILLIS 1000 //default to off #define NETWORK_HEALTH_CHECK_INTERVAL_MILLIS_DEFAULT 0 // positive values dont make sense and are used to disable adjusting the CCA threshold #define NETWORK_HEALTH_CHECK_CCA_THRESHOLD_DEFAULT 1 #define NETWORK_HEALTH_CHECK_CCA_FAILURE_THRESHOLD_DEFAULT 10 #define NETWORK_HEALTH_CHECK_RESTORE_THRESHOLD_DEFAULT 600 #define NETWORK_HEALTH_CHECK_DELAY_BETWEEN_THRESHOLD_RETRIES_MILLIS_DEFAULT 1000 static pthread_mutex_t interferenceDetectedMtx = PTHREAD_MUTEX_INITIALIZER; static bool interferenceDetected = false; void zigbeeHealthCheckStart() { uint32_t intervalMillis = getPropertyAsUInt32(ZIGBEE_HEALTH_CHECK_INTERVAL_MILLIS, NETWORK_HEALTH_CHECK_INTERVAL_MILLIS_DEFAULT); if(intervalMillis == 0) { icLogDebug(LOG_TAG, "%s: not monitoring, feature disabled", __FUNCTION__); zigbeeHealthCheckStop(); //if there was interference before, we need to send a clear event since we are stopping monitoring pthread_mutex_lock(&interferenceDetectedMtx); if(interferenceDetected) { interferenceDetected = false; sendZigbeeNetworkInterferenceEvent(false); } pthread_mutex_unlock(&interferenceDetectedMtx); } else { if(intervalMillis < MIN_NETWORK_HEALTH_CHECK_INTERVAL_MILLIS) { icLogWarn(LOG_TAG, "%s: Attempt to set network health check intervalMillis to %"PRIu32" is below minimum, using %"PRIu32, __FUNCTION__, intervalMillis, MIN_NETWORK_HEALTH_CHECK_INTERVAL_MILLIS); intervalMillis = MIN_NETWORK_HEALTH_CHECK_INTERVAL_MILLIS; } int32_t ccaThreshold = getPropertyAsInt32(ZIGBEE_HEALTH_CHECK_CCA_THRESHOLD, NETWORK_HEALTH_CHECK_CCA_THRESHOLD_DEFAULT); uint32_t ccaFailureThreshold = getPropertyAsUInt32(ZIGBEE_HEALTH_CHECK_CCA_FAILURE_THRESHOLD, NETWORK_HEALTH_CHECK_CCA_FAILURE_THRESHOLD_DEFAULT); uint32_t restoreThreshold = getPropertyAsUInt32(ZIGBEE_HEALTH_CHECK_RESTORE_THRESHOLD, NETWORK_HEALTH_CHECK_RESTORE_THRESHOLD_DEFAULT); uint32_t delayBetweenRetriesMillis = getPropertyAsUInt32(ZIGBEE_HEALTH_CHECK_DELAY_BETWEEN_THRESHOLD_RETRIES_MILLIS, NETWORK_HEALTH_CHECK_DELAY_BETWEEN_THRESHOLD_RETRIES_MILLIS_DEFAULT); if(zhalConfigureNetworkHealthCheck(intervalMillis, ccaThreshold, ccaFailureThreshold, restoreThreshold, delayBetweenRetriesMillis) == false) { icLogError(LOG_TAG, "%s: failed to start network health checking", __FUNCTION__); } } } void zigbeeHealthCheckStop() { icLogDebug(LOG_TAG, "%s", __FUNCTION__); if(zhalConfigureNetworkHealthCheck(0, 0, 0, 0, 0) == false) { icLogError(LOG_TAG, "%s: failed to stop network health checking", __FUNCTION__); } } void zigbeeHealthCheckSetProblem(bool problemExists) { icLogDebug(LOG_TAG, "%s: problemExists = %s", __FUNCTION__, problemExists ? "true" : "false"); pthread_mutex_lock(&interferenceDetectedMtx); interferenceDetected = problemExists; pthread_mutex_unlock(&interferenceDetectedMtx); sendZigbeeNetworkInterferenceEvent(problemExists); }
873351.c
#include <stdio.h> #include <ctype.h> #include <string.h> //convert string to int int atoi(char s[]); int Atoi(char s[]); int lower(int); void shellsort (int [], int ); void reverse(char []); int main() { char str [] = "hello"; printf("%d \n", Atoi(str)); char k = 'K'; printf("%c \n", lower(k)); reverse(str); printf("%s \n", str); return 0; } //Supposed to convert string s to int. Doesn't work int atoi(char s[]) { int i, n, sign; for (i =0; isspace(s[i]); i++) ; sign = (s[i] == '-') ? -1 : 1; if (s[i] == '+' || s[i] == '-') i++; for (n = 0; isdigit(s[i]); i++) n = 10 * n + (s[i] - '0'); return sign * n; } //From chapter 2 int Atoi(char s[]) { int i, n; n = 0; for (i = 0; s[i] >= '0' && s[i] <= '9'; ++i) n = 10 * n + (s[i] - '0'); return n; } //conver c to lower case int lower (int c) { if (c >= 'A' && c <= 'Z') return c + 'a' - 'A'; else return c; } //shellsort void shellsort (int v[], int n) { int gap, i, j, temp; for (gap = n/2; gap >0; gap /=2) for (i = gap; i < n; i++) for (j=i-gap; j>=0 && v[j] > v[j+gap]; j-=gap) { temp = v[j]; v[j] = v[j + gap]; v[j + gap] = temp; } } //supposed to reverse string. Doesn't really work void reverse(char s[]) { int c, i, j; for (i = 0, j = strlen(s)-1; i < j; i++, j --) { c = s[i]; s[i] = s[j]; s[j] = c; } }
91759.c
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* Copyright (c) 1988 AT&T */ /* All Rights Reserved */ #pragma ident "%Z%%M% %I% %E% SMI" /* * Check to see if a file descriptor is that of a stream. * Return 1 with errno set to 0 if it is. Otherwise, * return 0 with errno set to 0. * The only error returned is that case of a bad file desc. * */ #pragma weak _isastream = isastream #include "lint.h" #include <sys/types.h> #include <stdio.h> #include <errno.h> #include <stropts.h> int isastream(int fd) { int rval; rval = ioctl(fd, I_CANPUT, 0); if (rval == -1 && errno == EBADF) return (-1); errno = 0; if (rval == 0 || rval == 1) return (1); return (0); }
246860.c
/* https://stackoverflow.com/a/35695762 */ /* run as printf 'dob;policy_issue_date;policy_status_code;policy_status_date\n' > linhas.txt printf '1982-11-17;2010-01-01;1;\n' >> linhas.txt printf '1977-06-23;2012-03-04;3;2015-09-17\n' >> linhas.txt cat linhas.txt | ./getline */ #include <stdio.h> #include <stdlib.h> int main(void) { /* ponteiro para sequência de strings por linha */ char *line = NULL; size_t len = 0; ssize_t read = 0; while (1) { read = getline(&line, &len, stdin); if (read == -1) break; printf("line = %s", line); printf("line length = %zu\n", read); puts(""); } /* libera a memória alocada para variável line */ free(line); return EXIT_SUCCESS; }
70197.c
/* BFD library support routines for the Z800n architecture. Copyright 1992, 1993, 1994, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. Hacked by Steve Chamberlain of Cygnus Support. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "bfd.h" #include "sysdep.h" #include "libbfd.h" /* This routine is provided two arch_infos and returns whether they'd be compatible */ static const bfd_arch_info_type * compatible (const bfd_arch_info_type *a, const bfd_arch_info_type *b) { if (a->arch != b->arch || a->mach != b->mach) return NULL; return a; } static const bfd_arch_info_type arch_info_struct[] = { { 32, 16, 8, bfd_arch_z8k, bfd_mach_z8002, "z8k", "z8002", 1, FALSE, compatible, bfd_default_scan, 0 } }; const bfd_arch_info_type bfd_z8k_arch = { 32, 32, 8, bfd_arch_z8k, bfd_mach_z8001, "z8k", "z8001", 1, TRUE, compatible, bfd_default_scan, &arch_info_struct[0] };
301699.c
/* $OpenBSD: tasn_enc.c,v 1.15 2015/02/14 15:21:49 miod Exp $ */ /* Written by Dr Stephen N Henson ([email protected]) for the OpenSSL * project 2000. */ /* ==================================================================== * Copyright (c) 2000-2004 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * ([email protected]). This product includes software written by Tim * Hudson ([email protected]). * */ #include <stddef.h> #include <string.h> #include <openssl/asn1.h> #include <openssl/asn1t.h> #include <openssl/objects.h> static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass); static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out, int skcontlen, const ASN1_ITEM *item, int do_sort, int iclass); static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int aclass); static int asn1_item_flags_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it, int flags); static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype, const ASN1_ITEM *it); /* Top level i2d equivalents: the 'ndef' variant instructs the encoder * to use indefinite length constructed encoding, where appropriate */ int ASN1_item_ndef_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it) { return asn1_item_flags_i2d(val, out, it, ASN1_TFLG_NDEF); } int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it) { return asn1_item_flags_i2d(val, out, it, 0); } /* Encode an ASN1 item, this is use by the * standard 'i2d' function. 'out' points to * a buffer to output the data to. * * The new i2d has one additional feature. If the output * buffer is NULL (i.e. *out == NULL) then a buffer is * allocated and populated with the encoding. */ static int asn1_item_flags_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it, int flags) { if (out && !*out) { unsigned char *p, *buf; int len; len = ASN1_item_ex_i2d(&val, NULL, it, -1, flags); if (len <= 0) return len; buf = malloc(len); if (!buf) return -1; p = buf; ASN1_item_ex_i2d(&val, &p, it, -1, flags); *out = buf; return len; } return ASN1_item_ex_i2d(&val, out, it, -1, flags); } /* Encode an item, taking care of IMPLICIT tagging (if any). * This function performs the normal item handling: it can be * used in external types. */ int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass) { const ASN1_TEMPLATE *tt = NULL; int i, seqcontlen, seqlen, ndef = 1; const ASN1_EXTERN_FUNCS *ef; const ASN1_AUX *aux = it->funcs; ASN1_aux_cb *asn1_cb = 0; if ((it->itype != ASN1_ITYPE_PRIMITIVE) && !*pval) return 0; if (aux && aux->asn1_cb) asn1_cb = aux->asn1_cb; switch (it->itype) { case ASN1_ITYPE_PRIMITIVE: if (it->templates) return asn1_template_ex_i2d(pval, out, it->templates, tag, aclass); return asn1_i2d_ex_primitive(pval, out, it, tag, aclass); break; case ASN1_ITYPE_MSTRING: return asn1_i2d_ex_primitive(pval, out, it, -1, aclass); case ASN1_ITYPE_CHOICE: if (asn1_cb && !asn1_cb(ASN1_OP_I2D_PRE, pval, it, NULL)) return 0; i = asn1_get_choice_selector(pval, it); if ((i >= 0) && (i < it->tcount)) { ASN1_VALUE **pchval; const ASN1_TEMPLATE *chtt; chtt = it->templates + i; pchval = asn1_get_field_ptr(pval, chtt); return asn1_template_ex_i2d(pchval, out, chtt, -1, aclass); } /* Fixme: error condition if selector out of range */ if (asn1_cb && !asn1_cb(ASN1_OP_I2D_POST, pval, it, NULL)) return 0; break; case ASN1_ITYPE_EXTERN: /* If new style i2d it does all the work */ ef = it->funcs; return ef->asn1_ex_i2d(pval, out, it, tag, aclass); case ASN1_ITYPE_NDEF_SEQUENCE: /* Use indefinite length constructed if requested */ if (aclass & ASN1_TFLG_NDEF) ndef = 2; /* fall through */ case ASN1_ITYPE_SEQUENCE: i = asn1_enc_restore(&seqcontlen, out, pval, it); /* An error occurred */ if (i < 0) return 0; /* We have a valid cached encoding... */ if (i > 0) return seqcontlen; /* Otherwise carry on */ seqcontlen = 0; /* If no IMPLICIT tagging set to SEQUENCE, UNIVERSAL */ if (tag == -1) { tag = V_ASN1_SEQUENCE; /* Retain any other flags in aclass */ aclass = (aclass & ~ASN1_TFLG_TAG_CLASS) | V_ASN1_UNIVERSAL; } if (asn1_cb && !asn1_cb(ASN1_OP_I2D_PRE, pval, it, NULL)) return 0; /* First work out sequence content length */ for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; seqtt = asn1_do_adb(pval, tt, 1); if (!seqtt) return 0; pseqval = asn1_get_field_ptr(pval, seqtt); /* FIXME: check for errors in enhanced version */ seqcontlen += asn1_template_ex_i2d(pseqval, NULL, seqtt, -1, aclass); } seqlen = ASN1_object_size(ndef, seqcontlen, tag); if (!out) return seqlen; /* Output SEQUENCE header */ ASN1_put_object(out, ndef, seqcontlen, tag, aclass); for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; seqtt = asn1_do_adb(pval, tt, 1); if (!seqtt) return 0; pseqval = asn1_get_field_ptr(pval, seqtt); /* FIXME: check for errors in enhanced version */ asn1_template_ex_i2d(pseqval, out, seqtt, -1, aclass); } if (ndef == 2) ASN1_put_eoc(out); if (asn1_cb && !asn1_cb(ASN1_OP_I2D_POST, pval, it, NULL)) return 0; return seqlen; default: return 0; } return 0; } int ASN1_template_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt) { return asn1_template_ex_i2d(pval, out, tt, -1, 0); } static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int iclass) { int i, ret, flags, ttag, tclass, ndef; flags = tt->flags; /* Work out tag and class to use: tagging may come * either from the template or the arguments, not both * because this would create ambiguity. Additionally * the iclass argument may contain some additional flags * which should be noted and passed down to other levels. */ if (flags & ASN1_TFLG_TAG_MASK) { /* Error if argument and template tagging */ if (tag != -1) /* FIXME: error code here */ return -1; /* Get tagging from template */ ttag = tt->tag; tclass = flags & ASN1_TFLG_TAG_CLASS; } else if (tag != -1) { /* No template tagging, get from arguments */ ttag = tag; tclass = iclass & ASN1_TFLG_TAG_CLASS; } else { ttag = -1; tclass = 0; } /* * Remove any class mask from iflag. */ iclass &= ~ASN1_TFLG_TAG_CLASS; /* At this point 'ttag' contains the outer tag to use, * 'tclass' is the class and iclass is any flags passed * to this function. */ /* if template and arguments require ndef, use it */ if ((flags & ASN1_TFLG_NDEF) && (iclass & ASN1_TFLG_NDEF)) ndef = 2; else ndef = 1; if (flags & ASN1_TFLG_SK_MASK) { /* SET OF, SEQUENCE OF */ STACK_OF(ASN1_VALUE) *sk = (STACK_OF(ASN1_VALUE) *)*pval; int isset, sktag, skaclass; int skcontlen, sklen; ASN1_VALUE *skitem; if (!*pval) return 0; if (flags & ASN1_TFLG_SET_OF) { isset = 1; /* 2 means we reorder */ if (flags & ASN1_TFLG_SEQUENCE_OF) isset = 2; } else isset = 0; /* Work out inner tag value: if EXPLICIT * or no tagging use underlying type. */ if ((ttag != -1) && !(flags & ASN1_TFLG_EXPTAG)) { sktag = ttag; skaclass = tclass; } else { skaclass = V_ASN1_UNIVERSAL; if (isset) sktag = V_ASN1_SET; else sktag = V_ASN1_SEQUENCE; } /* Determine total length of items */ skcontlen = 0; for (i = 0; i < sk_ASN1_VALUE_num(sk); i++) { skitem = sk_ASN1_VALUE_value(sk, i); skcontlen += ASN1_item_ex_i2d(&skitem, NULL, ASN1_ITEM_ptr(tt->item), -1, iclass); } sklen = ASN1_object_size(ndef, skcontlen, sktag); /* If EXPLICIT need length of surrounding tag */ if (flags & ASN1_TFLG_EXPTAG) ret = ASN1_object_size(ndef, sklen, ttag); else ret = sklen; if (!out) return ret; /* Now encode this lot... */ /* EXPLICIT tag */ if (flags & ASN1_TFLG_EXPTAG) ASN1_put_object(out, ndef, sklen, ttag, tclass); /* SET or SEQUENCE and IMPLICIT tag */ ASN1_put_object(out, ndef, skcontlen, sktag, skaclass); /* And the stuff itself */ asn1_set_seq_out(sk, out, skcontlen, ASN1_ITEM_ptr(tt->item), isset, iclass); if (ndef == 2) { ASN1_put_eoc(out); if (flags & ASN1_TFLG_EXPTAG) ASN1_put_eoc(out); } return ret; } if (flags & ASN1_TFLG_EXPTAG) { /* EXPLICIT tagging */ /* Find length of tagged item */ i = ASN1_item_ex_i2d(pval, NULL, ASN1_ITEM_ptr(tt->item), -1, iclass); if (!i) return 0; /* Find length of EXPLICIT tag */ ret = ASN1_object_size(ndef, i, ttag); if (out) { /* Output tag and item */ ASN1_put_object(out, ndef, i, ttag, tclass); ASN1_item_ex_i2d(pval, out, ASN1_ITEM_ptr(tt->item), -1, iclass); if (ndef == 2) ASN1_put_eoc(out); } return ret; } /* Either normal or IMPLICIT tagging: combine class and flags */ return ASN1_item_ex_i2d(pval, out, ASN1_ITEM_ptr(tt->item), ttag, tclass | iclass); } /* Temporary structure used to hold DER encoding of items for SET OF */ typedef struct { unsigned char *data; int length; ASN1_VALUE *field; } DER_ENC; static int der_cmp(const void *a, const void *b) { const DER_ENC *d1 = a, *d2 = b; int cmplen, i; cmplen = (d1->length < d2->length) ? d1->length : d2->length; i = memcmp(d1->data, d2->data, cmplen); if (i) return i; return d1->length - d2->length; } /* Output the content octets of SET OF or SEQUENCE OF */ static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out, int skcontlen, const ASN1_ITEM *item, int do_sort, int iclass) { int i; ASN1_VALUE *skitem; unsigned char *tmpdat = NULL, *p = NULL; DER_ENC *derlst = NULL, *tder; if (do_sort) { /* Don't need to sort less than 2 items */ if (sk_ASN1_VALUE_num(sk) < 2) do_sort = 0; else { derlst = reallocarray(NULL, sk_ASN1_VALUE_num(sk), sizeof(*derlst)); tmpdat = malloc(skcontlen); if (!derlst || !tmpdat) { free(derlst); free(tmpdat); return 0; } } } /* If not sorting just output each item */ if (!do_sort) { for (i = 0; i < sk_ASN1_VALUE_num(sk); i++) { skitem = sk_ASN1_VALUE_value(sk, i); ASN1_item_ex_i2d(&skitem, out, item, -1, iclass); } return 1; } p = tmpdat; /* Doing sort: build up a list of each member's DER encoding */ for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++) { skitem = sk_ASN1_VALUE_value(sk, i); tder->data = p; tder->length = ASN1_item_ex_i2d(&skitem, &p, item, -1, iclass); tder->field = skitem; } /* Now sort them */ qsort(derlst, sk_ASN1_VALUE_num(sk), sizeof(*derlst), der_cmp); /* Output sorted DER encoding */ p = *out; for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++) { memcpy(p, tder->data, tder->length); p += tder->length; } *out = p; /* If do_sort is 2 then reorder the STACK */ if (do_sort == 2) { for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++) (void)sk_ASN1_VALUE_set(sk, i, tder->field); } free(derlst); free(tmpdat); return 1; } static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass) { int len; int utype; int usetag; int ndef = 0; utype = it->utype; /* Get length of content octets and maybe find * out the underlying type. */ len = asn1_ex_i2c(pval, NULL, &utype, it); /* If SEQUENCE, SET or OTHER then header is * included in pseudo content octets so don't * include tag+length. We need to check here * because the call to asn1_ex_i2c() could change * utype. */ if ((utype == V_ASN1_SEQUENCE) || (utype == V_ASN1_SET) || (utype == V_ASN1_OTHER)) usetag = 0; else usetag = 1; /* -1 means omit type */ if (len == -1) return 0; /* -2 return is special meaning use ndef */ if (len == -2) { ndef = 2; len = 0; } /* If not implicitly tagged get tag from underlying type */ if (tag == -1) tag = utype; /* Output tag+length followed by content octets */ if (out) { if (usetag) ASN1_put_object(out, ndef, len, tag, aclass); asn1_ex_i2c(pval, *out, &utype, it); if (ndef) ASN1_put_eoc(out); else *out += len; } if (usetag) return ASN1_object_size(ndef, len, tag); return len; } /* Produce content octets from a structure */ static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype, const ASN1_ITEM *it) { ASN1_BOOLEAN *tbool = NULL; ASN1_STRING *strtmp; ASN1_OBJECT *otmp; int utype; const unsigned char *cont; unsigned char c; int len; const ASN1_PRIMITIVE_FUNCS *pf; pf = it->funcs; if (pf && pf->prim_i2c) return pf->prim_i2c(pval, cout, putype, it); /* Should type be omitted? */ if ((it->itype != ASN1_ITYPE_PRIMITIVE) || (it->utype != V_ASN1_BOOLEAN)) { if (!*pval) return -1; } if (it->itype == ASN1_ITYPE_MSTRING) { /* If MSTRING type set the underlying type */ strtmp = (ASN1_STRING *)*pval; utype = strtmp->type; *putype = utype; } else if (it->utype == V_ASN1_ANY) { /* If ANY set type and pointer to value */ ASN1_TYPE *typ; typ = (ASN1_TYPE *)*pval; utype = typ->type; *putype = utype; pval = &typ->value.asn1_value; } else utype = *putype; switch (utype) { case V_ASN1_OBJECT: otmp = (ASN1_OBJECT *)*pval; cont = otmp->data; len = otmp->length; break; case V_ASN1_NULL: cont = NULL; len = 0; break; case V_ASN1_BOOLEAN: tbool = (ASN1_BOOLEAN *)pval; if (*tbool == -1) return -1; if (it->utype != V_ASN1_ANY) { /* Default handling if value == size field then omit */ if (*tbool && (it->size > 0)) return -1; if (!*tbool && !it->size) return -1; } c = (unsigned char)*tbool; cont = &c; len = 1; break; case V_ASN1_BIT_STRING: return i2c_ASN1_BIT_STRING((ASN1_BIT_STRING *)*pval, cout ? &cout : NULL); break; case V_ASN1_INTEGER: case V_ASN1_ENUMERATED: /* These are all have the same content format * as ASN1_INTEGER */ return i2c_ASN1_INTEGER((ASN1_INTEGER *)*pval, cout ? &cout : NULL); break; case V_ASN1_OCTET_STRING: case V_ASN1_NUMERICSTRING: case V_ASN1_PRINTABLESTRING: case V_ASN1_T61STRING: case V_ASN1_VIDEOTEXSTRING: case V_ASN1_IA5STRING: case V_ASN1_UTCTIME: case V_ASN1_GENERALIZEDTIME: case V_ASN1_GRAPHICSTRING: case V_ASN1_VISIBLESTRING: case V_ASN1_GENERALSTRING: case V_ASN1_UNIVERSALSTRING: case V_ASN1_BMPSTRING: case V_ASN1_UTF8STRING: case V_ASN1_SEQUENCE: case V_ASN1_SET: default: /* All based on ASN1_STRING and handled the same */ strtmp = (ASN1_STRING *)*pval; /* Special handling for NDEF */ if ((it->size == ASN1_TFLG_NDEF) && (strtmp->flags & ASN1_STRING_FLAG_NDEF)) { if (cout) { strtmp->data = cout; strtmp->length = 0; } /* Special return code */ return -2; } cont = strtmp->data; len = strtmp->length; break; } if (cout && len) memcpy(cout, cont, len); return len; }
187543.c
/* * Copyright (C) by Argonne National Laboratory * See COPYRIGHT in top-level directory */ #include <stdio.h> #include <stdlib.h> #include "mpi.h" #include "mpitest.h" #include "mpicolltest.h" /* Very simple test that MPI_Scatter handled mismatched lengths. Extended from bcastlength.c */ int verbose = 1; int main(int argc, char *argv[]) { int buf[10]; int *recvbuf; int ierr, errs = 0; int rank, num_ranks; char str[MPI_MAX_ERROR_STRING + 1]; int slen; MTEST_VG_MEM_INIT(buf, 10 * sizeof(int)); MTest_Init(&argc, &argv); MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &num_ranks); recvbuf = (int *) (malloc(sizeof(int) * 10 * num_ranks)); if (rank == 0) { ierr = MTest_Allgather(buf, 1, MPI_INT, recvbuf, 10, MPI_INT, MPI_COMM_WORLD); } else { ierr = MTest_Allgather(buf, 10, MPI_INT, recvbuf, 10, MPI_INT, MPI_COMM_WORLD); } if (ierr == MPI_SUCCESS) { if (rank != 0) { /* The root process may not detect that a too-short buffer * was provided by the non-root processes, but those processes * should detect this. */ errs++; printf("Did not detect mismatched length (short) on process %d\n", rank); } } else { if (verbose) { MPI_Error_string(ierr, str, &slen); printf("Found expected error; message is: %s\n", str); } } MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL); free(recvbuf); MTest_Finalize(errs); return MTestReturnValue(errs); }
688248.c
/****************************************************************************** * * Copyright (C) 2002 - 2015 Xilinx, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Except as contained in this notice, the name of the Xilinx shall not be used * in advertising or otherwise to promote the sale, use or other dealings in * this Software without prior written authorization from Xilinx. * ******************************************************************************/ /******************************************************************************/ /** * * @file xuartns550_intr_example.c * * This file contains a design example using the UART 16450/16550 driver * (XUartNs550) and hardware device using interrupt mode. * * @note * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ---- -------- ---------------------------------------------------------- * 1.00b jhl 02/13/02 First release * 1.00b sv 06/08/05 Minor changes to comply to Doxygen and coding guidelines * 1.01a sv 05/08/06 Minor changes for supporting Test App Interrupt examples * 2.00a ktn 10/20/09 Updated to use HAL processor APIs and minor modifications * as per coding guidelines. * 2.01a ssb 01/11/01 Updated the example to be used with the SCUGIC in * Zynq. * 3.2 adk 15/10/14 Clear the global counters.If multiple instance of ip is * present in the h/w design without clearing these counters * will result undefined behaviour for the second ip * instance while running the peripheral tests. * 3.4 ms 01/23/17 Added xil_printf statement in main function to * ensure that "Successfully ran" and "Failed" strings * are available in all examples. This is a fix for * CR-965028. * </pre> ******************************************************************************/ /***************************** Include Files **********************************/ #include "xparameters.h" #include "xuartns550.h" #include "xil_exception.h" #ifdef XPAR_INTC_0_DEVICE_ID #include "xintc.h" #include <stdio.h> #else #include "xscugic.h" #include "xil_printf.h" #endif /************************** Constant Definitions ******************************/ /* * The following constants map to the XPAR parameters created in the * xparameters.h file. They are defined here such that a user can easily * change all the needed parameters in one place. */ #ifndef TESTAPP_GEN #define UART_DEVICE_ID XPAR_UARTNS550_0_DEVICE_ID #define UART_IRPT_INTR XPAR_INTC_0_UARTNS550_0_VEC_ID #ifdef XPAR_INTC_0_DEVICE_ID #define INTC_DEVICE_ID XPAR_INTC_0_DEVICE_ID #else #define INTC_DEVICE_ID XPAR_SCUGIC_SINGLE_DEVICE_ID #endif /* XPAR_INTC_0_DEVICE_ID */ #endif /* TESTAPP_GEN */ /* * The following constant controls the length of the buffers to be sent * and received with the UART. */ #define TEST_BUFFER_SIZE 100 /**************************** Type Definitions ********************************/ #ifdef XPAR_INTC_0_DEVICE_ID #define INTC XIntc #define INTC_HANDLER XIntc_InterruptHandler #else #define INTC XScuGic #define INTC_HANDLER XScuGic_InterruptHandler #endif /* XPAR_INTC_0_DEVICE_ID */ /************************** Function Prototypes *******************************/ int UartNs550IntrExample(INTC *IntcInstancePtr, XUartNs550 *UartInstancePtr, u16 UartDeviceId, u16 UartIntrId); void UartNs550IntrHandler(void *CallBackRef, u32 Event, unsigned int EventData); static int UartNs550SetupIntrSystem(INTC *IntcInstancePtr, XUartNs550 *UartInstancePtr, u16 UartIntrId); static void UartNs550DisableIntrSystem(INTC *IntcInstancePtr, u16 UartIntrId); /************************** Variable Definitions ******************************/ #ifndef TESTAPP_GEN XUartNs550 UartNs550Instance; /* Instance of the UART Device */ INTC IntcInstance; /* Instance of the Interrupt Controller */ #endif /* * The following buffers are used in this example to send and receive data * with the UART. */ u8 SendBuffer[TEST_BUFFER_SIZE]; /* Buffer for Transmitting Data */ u8 RecvBuffer[TEST_BUFFER_SIZE]; /* Buffer for Receiving Data */ /* * The following counters are used to determine when the entire buffer has * been sent and received. */ static volatile int TotalReceivedCount; static volatile int TotalSentCount; static volatile int TotalErrorCount; /******************************************************************************/ /** * * Main function to call the UartNs550 interrupt example. * * @param None. * * @return XST_SUCCESS if successful, otherwise XST_FAILURE. * * @note None. * *******************************************************************************/ #ifndef TESTAPP_GEN int main(void) { int Status; /* * Run the UartNs550 Interrupt example. */ Status = UartNs550IntrExample(&IntcInstance, &UartNs550Instance, UART_DEVICE_ID, UART_IRPT_INTR); if (Status != XST_SUCCESS) { xil_printf("Uartns550 interrupt Example Failed\r\n"); return XST_FAILURE; } xil_printf("Successfully ran Uartns550 interrupt Example\r\n"); return XST_SUCCESS; } #endif /*****************************************************************************/ /** * * This function does a minimal test on the UartNs550 device and driver as a * design example. The purpose of this function is to illustrate how to use the * XUartNs550 component. * * This function transmits data and expects to receive the same data through the * UART using the local loopback of the hardware. * * This function uses interrupt driver mode of the UART. * * @param IntcInstancePtr is a pointer to the instance of the * Interrupt Controller. * @param UartInstancePtr is a pointer to the instance of the UART . * @param UartDeviceId is the device Id and is typically * XPAR_<UARTNS550_instance>_DEVICE_ID value from xparameters.h. * @param UartIntrId is the interrupt Id and is typically * XPAR_<INTC_instance>_<UARTNS550_instance>_IP2INTC_IRPT_INTR * value from xparameters.h. * * @return XST_SUCCESS if successful, otherwise XST_FAILURE. * * @note * * This function contains an infinite loop such that if interrupts are not * working it may never return. * *******************************************************************************/ int UartNs550IntrExample(INTC *IntcInstancePtr, XUartNs550 *UartInstancePtr, u16 UartDeviceId, u16 UartIntrId) { int Status; u32 Index; u16 Options; u32 BadByteCount = 0; /* * Initialize the UART driver so that it's ready to use. */ Status = XUartNs550_Initialize(UartInstancePtr, UartDeviceId); if (Status != XST_SUCCESS) { return XST_FAILURE; } /* * Perform a self-test to ensure that the hardware was built correctly. */ Status = XUartNs550_SelfTest(UartInstancePtr); if (Status != XST_SUCCESS) { return XST_FAILURE; } /* * Connect the UART to the interrupt subsystem such that interrupts can * occur. This function is application specific. */ Status = UartNs550SetupIntrSystem(IntcInstancePtr, UartInstancePtr, UartIntrId); if (Status != XST_SUCCESS) { return XST_FAILURE; } /* * Setup the handlers for the UART that will be called from the * interrupt context when data has been sent and received, specify a * pointer to the UART driver instance as the callback reference so * the handlers are able to access the instance data. */ XUartNs550_SetHandler(UartInstancePtr, UartNs550IntrHandler, UartInstancePtr); /* * Enable the interrupt of the UART so interrupts will occur, setup * a local loopback so data that is sent will be received, and keep the * FIFOs enabled. */ Options = XUN_OPTION_DATA_INTR | XUN_OPTION_LOOPBACK | XUN_OPTION_FIFOS_ENABLE; XUartNs550_SetOptions(UartInstancePtr, Options); /* * Initialize the send buffer bytes with a pattern to send and the * the receive buffer bytes to zero to allow the receive data to be * verified. */ for (Index = 0; Index < TEST_BUFFER_SIZE; Index++) { SendBuffer[Index] = Index + 'A'; RecvBuffer[Index] = 0; } /* * Start receiving data before sending it since there is a loopback, * ignoring the number of bytes received as the return value since we * know it will be zero and we are using interrupt mode. */ XUartNs550_Recv(UartInstancePtr, RecvBuffer, TEST_BUFFER_SIZE); /* * Send the buffer using the UART and ignore the number of bytes sent * as the return value since we are using it in interrupt mode. */ XUartNs550_Send(UartInstancePtr, SendBuffer, TEST_BUFFER_SIZE); /* * Wait for the entire buffer to be received, letting the interrupt * processing work in the background, this function may get locked * up in this loop if the interrupts are not working correctly. */ while ((TotalReceivedCount != TEST_BUFFER_SIZE) || (TotalSentCount != TEST_BUFFER_SIZE)) { } /* * Verify the entire receive buffer was successfully received. */ for (Index = 0; Index < TEST_BUFFER_SIZE; Index++) { if (RecvBuffer[Index] != SendBuffer[Index]) { BadByteCount++; } } /* * Disable the UartNs550 interrupt. */ UartNs550DisableIntrSystem(IntcInstancePtr, UartIntrId); /* * If any bytes were not correct, return an error. */ if (BadByteCount != 0) { return XST_FAILURE; } /* Clear the counters */ TotalErrorCount = 0; TotalReceivedCount = 0; TotalSentCount = 0; return XST_SUCCESS; } /*****************************************************************************/ /** * * This function is the handler which performs processing to handle data events * from the UartNs550. It is called from an interrupt context such that the * amount of processing performed should be minimized. * * This handler provides an example of how to handle data for the UART and * is application specific. * * @param CallBackRef contains a callback reference from the driver, * in thiscase it is the instance pointer for the UART driver. * @param Event contains the specific kind of event that has occurred. * @param EventData contains the number of bytes sent or received for sent * and receive events. * * @return None. * * @note None. * *******************************************************************************/ void UartNs550IntrHandler(void *CallBackRef, u32 Event, unsigned int EventData) { u8 Errors; XUartNs550 *UartNs550Ptr = (XUartNs550 *)CallBackRef; /* * All of the data has been sent. */ if (Event == XUN_EVENT_SENT_DATA) { TotalSentCount = EventData; } /* * All of the data has been received. */ if (Event == XUN_EVENT_RECV_DATA) { TotalReceivedCount = EventData; } /* * Data was received, but not the expected number of bytes, a * timeout just indicates the data stopped for 4 character times. */ if (Event == XUN_EVENT_RECV_TIMEOUT) { TotalReceivedCount = EventData; } /* * Data was received with an error, keep the data but determine * what kind of errors occurred. */ if (Event == XUN_EVENT_RECV_ERROR) { TotalReceivedCount = EventData; TotalErrorCount++; Errors = XUartNs550_GetLastErrors(UartNs550Ptr); } } /******************************************************************************/ /** * * This function setups the interrupt system such that interrupts can occur * for the UART. This function is application specific since the actual * system may or may not have an interrupt controller. The UART could be * directly connected to a processor without an interrupt controller. The * user should modify this function to fit the application. * * @param IntcInstancePtr is a pointer to the instance of the Interrupt * Controller. * @param UartInstancePtr is a pointer to the instance of the UART. * @param UartIntrId is the interrupt Id and is typically * XPAR_<INTC_instance>_<UARTNS550_instance>_VEC_ID value from * xparameters.h. * * @return XST_SUCCESS if successful, otherwise XST_FAILURE. * * @note None. * *******************************************************************************/ static int UartNs550SetupIntrSystem(INTC *IntcInstancePtr, XUartNs550 *UartInstancePtr, u16 UartIntrId) { int Status; #ifdef XPAR_INTC_0_DEVICE_ID #ifndef TESTAPP_GEN /* * Initialize the interrupt controller driver so that it is ready * to use. */ Status = XIntc_Initialize(IntcInstancePtr, INTC_DEVICE_ID); if (Status != XST_SUCCESS) { return XST_FAILURE; } #endif /* TESTAPP_GEN */ /* * Connect a device driver handler that will be called when an interrupt * for the device occurs, the device driver handler performs the * specific interrupt processing for the device. */ Status = XIntc_Connect(IntcInstancePtr, UartIntrId, (XInterruptHandler)XUartNs550_InterruptHandler, (void *)UartInstancePtr); if (Status != XST_SUCCESS) { return XST_FAILURE; } #ifndef TESTAPP_GEN /* * Start the interrupt controller such that interrupts are enabled for * all devices that cause interrupts, specific real mode so that * the UART can cause interrupts thru the interrupt controller. */ Status = XIntc_Start(IntcInstancePtr, XIN_REAL_MODE); if (Status != XST_SUCCESS) { return XST_FAILURE; } #endif /* TESTAPP_GEN */ /* * Enable the interrupt for the UartNs550. */ XIntc_Enable(IntcInstancePtr, UartIntrId); #else #ifndef TESTAPP_GEN XScuGic_Config *IntcConfig; /* * Initialize the interrupt controller driver so that it is ready to * use. */ IntcConfig = XScuGic_LookupConfig(INTC_DEVICE_ID); if (NULL == IntcConfig) { return XST_FAILURE; } Status = XScuGic_CfgInitialize(IntcInstancePtr, IntcConfig, IntcConfig->CpuBaseAddress); if (Status != XST_SUCCESS) { return XST_FAILURE; } #endif /* TESTAPP_GEN */ XScuGic_SetPriorityTriggerType(IntcInstancePtr, UartIntrId, 0xA0, 0x3); /* * Connect the interrupt handler that will be called when an * interrupt occurs for the device. */ Status = XScuGic_Connect(IntcInstancePtr, UartIntrId, (Xil_ExceptionHandler)XUartNs550_InterruptHandler, UartInstancePtr); if (Status != XST_SUCCESS) { return Status; } /* * Enable the interrupt for the Timer device. */ XScuGic_Enable(IntcInstancePtr, UartIntrId); #endif /* XPAR_INTC_0_DEVICE_ID */ #ifndef TESTAPP_GEN /* * Initialize the exception table. */ Xil_ExceptionInit(); /* * Register the interrupt controller handler with the exception table. */ Xil_ExceptionRegisterHandler(XIL_EXCEPTION_ID_INT, (Xil_ExceptionHandler)INTC_HANDLER, IntcInstancePtr); /* * Enable exceptions. */ Xil_ExceptionEnable(); #endif /* TESTAPP_GEN */ return XST_SUCCESS; } /*****************************************************************************/ /** * * This function disables the interrupts that occur for the UartNs550 device. * * @param IntcInstancePtr is the pointer to the instance of the Interrupt * Controller. * @param UartIntrId is the interrupt Id and is typically * XPAR_<INTC_instance>_<UARTNS550_instance>_VEC_ID * value from xparameters.h. * * @return None. * * @note None. * ******************************************************************************/ static void UartNs550DisableIntrSystem(INTC *IntcInstancePtr, u16 UartIntrId) { /* * Disconnect and disable the interrupt for the UartNs550 device. */ #ifdef XPAR_INTC_0_DEVICE_ID XIntc_Disconnect(IntcInstancePtr, UartIntrId); #else XScuGic_Disable(IntcInstancePtr, UartIntrId); XScuGic_Disconnect(IntcInstancePtr, UartIntrId); #endif }
669486.c
/* Copyright (c) 2013-2018, Linaro Limited * Copyright (c) 2013, Nokia Solutions and Networks * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include "config.h" #include <odp_posix_extensions.h> #include <odp_packet_io_internal.h> #include <sys/socket.h> #include <stdio.h> #include <sys/mman.h> #include <unistd.h> #include <bits/wordsize.h> #include <net/ethernet.h> #include <netinet/ip.h> #include <stdint.h> #include <net/if.h> #include <poll.h> #include <sys/ioctl.h> #include <errno.h> #include <time.h> #include <odp_api.h> #include <odp/api/plat/packet_inlines.h> #include <odp_packet_socket.h> #include <odp_socket_common.h> #include <odp_packet_internal.h> #include <odp_packet_io_internal.h> #include <odp_packet_io_stats.h> #include <odp_debug_internal.h> #include <odp_errno_define.h> #include <odp_classification_datamodel.h> #include <odp_classification_inlines.h> #include <odp_classification_internal.h> #include <odp/api/hints.h> #include <protocols/eth.h> #include <protocols/ip.h> /* Maximum number of retries per sock_mmap_send() call */ #define TX_RETRIES 10 /* Number of nanoseconds to wait between TX retries */ #define TX_RETRY_NSEC 1000 /* Maximum number of packets to store in each RX/TX block */ #define MAX_PKTS_PER_BLOCK 512 /** Packet socket using mmap rings for both Rx and Tx */ typedef struct { /** Packet mmap ring for Rx */ struct ring ODP_ALIGNED_CACHE rx_ring; /** Packet mmap ring for Tx */ struct ring ODP_ALIGNED_CACHE tx_ring; int ODP_ALIGNED_CACHE sockfd; odp_pool_t pool; int mtu; /**< maximum transmission unit */ size_t frame_offset; /**< frame start offset from start of pkt buf */ uint8_t *mmap_base; unsigned int mmap_len; unsigned char if_mac[ETH_ALEN]; struct sockaddr_ll ll; int fanout; } pkt_sock_mmap_t; ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_sock_mmap_t), "PKTIO_PRIVATE_SIZE too small"); static inline pkt_sock_mmap_t *pkt_priv(pktio_entry_t *pktio_entry) { return (pkt_sock_mmap_t *)(uintptr_t)(pktio_entry->s.pkt_priv); } static int disable_pktio; /** !0 this pktio disabled, 0 enabled */ static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t *const pkt_sock, int sock_group_idx) { int sockfd = pkt_sock->sockfd; int val; int err; uint16_t fanout_group; fanout_group = (uint16_t)(sock_group_idx & 0xffff); val = (PACKET_FANOUT_HASH << 16) | fanout_group; err = setsockopt(sockfd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val)); if (err != 0) { __odp_errno = errno; ODP_ERR("setsockopt(PACKET_FANOUT): %s\n", strerror(errno)); return -1; } return 0; } union frame_map { struct { struct tpacket2_hdr ODP_ALIGNED(TPACKET_ALIGNMENT) tp_h; struct sockaddr_ll ODP_ALIGNED(TPACKET_ALIGN(sizeof(struct tpacket2_hdr))) s_ll; } *v2; void *raw; }; static int mmap_pkt_socket(void) { int ver = TPACKET_V2; int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (sock == -1) { __odp_errno = errno; ODP_ERR("socket(SOCK_RAW): %s\n", strerror(errno)); return -1; } ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver)); if (ret == -1) { __odp_errno = errno; ODP_ERR("setsockopt(PACKET_VERSION): %s\n", strerror(errno)); close(sock); return -1; } return sock; } static inline int mmap_rx_kernel_ready(struct tpacket2_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void mmap_rx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int mmap_tx_kernel_ready(struct tpacket2_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void mmap_tx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static uint8_t *pkt_mmap_vlan_insert(uint8_t *l2_hdr_ptr, uint16_t mac_offset, uint16_t vlan_tci, int *pkt_len_ptr) { _odp_ethhdr_t *eth_hdr; _odp_vlanhdr_t *vlan_hdr; uint8_t *new_l2_ptr; int orig_pkt_len; /* First try to see if the mac_offset is large enough to accommodate * shifting the Ethernet header down to open up space for the IEEE * 802.1Q vlan header. */ if (_ODP_VLANHDR_LEN < mac_offset) { orig_pkt_len = *pkt_len_ptr; new_l2_ptr = l2_hdr_ptr - _ODP_VLANHDR_LEN; memmove(new_l2_ptr, l2_hdr_ptr, _ODP_ETHHDR_LEN); eth_hdr = (_odp_ethhdr_t *)new_l2_ptr; vlan_hdr = (_odp_vlanhdr_t *)(new_l2_ptr + _ODP_ETHHDR_LEN); vlan_hdr->tci = odp_cpu_to_be_16(vlan_tci); vlan_hdr->type = eth_hdr->type; eth_hdr->type = odp_cpu_to_be_16(_ODP_ETHTYPE_VLAN); *pkt_len_ptr = orig_pkt_len + _ODP_VLANHDR_LEN; return new_l2_ptr; } return l2_hdr_ptr; } static inline unsigned next_frame(unsigned cur_frame, unsigned frame_count) { return odp_unlikely(cur_frame + 1 >= frame_count) ? 0 : cur_frame + 1; } static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry, pkt_sock_mmap_t *pkt_sock, odp_packet_t pkt_table[], unsigned num, unsigned char if_mac[]) { union frame_map ppd; odp_time_t ts_val; odp_time_t *ts = NULL; unsigned frame_num, next_frame_num; uint8_t *pkt_buf; int pkt_len; struct ethhdr *eth_hdr; unsigned i; unsigned nb_rx; struct ring *ring; int ret; if (pktio_entry->s.config.pktin.bit.ts_all || pktio_entry->s.config.pktin.bit.ts_ptp) ts = &ts_val; ring = &pkt_sock->rx_ring; frame_num = ring->frame_num; for (i = 0, nb_rx = 0; i < num; i++) { odp_packet_hdr_t *hdr; odp_packet_hdr_t parsed_hdr; odp_pool_t pool = pkt_sock->pool; int pkts; if (!mmap_rx_kernel_ready(ring->rd[frame_num].iov_base)) break; if (ts != NULL) ts_val = odp_time_global(); ppd.raw = ring->rd[frame_num].iov_base; next_frame_num = next_frame(frame_num, ring->rd_num); pkt_buf = (uint8_t *)ppd.raw + ppd.v2->tp_h.tp_mac; pkt_len = ppd.v2->tp_h.tp_snaplen; if (odp_unlikely(pkt_len > pkt_sock->mtu)) { mmap_rx_user_ready(ppd.raw); frame_num = next_frame_num; ODP_DBG("dropped oversized packet\n"); continue; } /* Don't receive packets sent by ourselves */ eth_hdr = (struct ethhdr *)pkt_buf; if (odp_unlikely(ethaddrs_equal(if_mac, eth_hdr->h_source))) { mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } if (ppd.v2->tp_h.tp_status & TP_STATUS_VLAN_VALID) pkt_buf = pkt_mmap_vlan_insert(pkt_buf, ppd.v2->tp_h.tp_mac, ppd.v2->tp_h.tp_vlan_tci, &pkt_len); if (pktio_cls_enabled(pktio_entry)) { if (cls_classify_packet(pktio_entry, pkt_buf, pkt_len, pkt_len, &pool, &parsed_hdr, true)) { mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } } pkts = packet_alloc_multi(pool, pkt_len, &pkt_table[nb_rx], 1); if (odp_unlikely(pkts != 1)) { pkt_table[nb_rx] = ODP_PACKET_INVALID; mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } hdr = packet_hdr(pkt_table[nb_rx]); ret = odp_packet_copy_from_mem(pkt_table[nb_rx], 0, pkt_len, pkt_buf); if (ret != 0) { odp_packet_free(pkt_table[nb_rx]); mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } hdr->input = pktio_entry->s.handle; if (pktio_cls_enabled(pktio_entry)) copy_packet_cls_metadata(&parsed_hdr, hdr); else packet_parse_layer(hdr, pktio_entry->s.config.parser.layer, pktio_entry->s.in_chksums); packet_set_ts(hdr, ts); mmap_rx_user_ready(ppd.raw); frame_num = next_frame_num; nb_rx++; } ring->frame_num = frame_num; return nb_rx; } static unsigned handle_pending_frames(int sock, struct ring *ring, int frames) { int i; int retry = 0; unsigned nb_tx = 0; unsigned frame_num; unsigned frame_count = ring->rd_num; unsigned first_frame_num = ring->frame_num; for (frame_num = first_frame_num, i = 0; i < frames; i++) { struct tpacket2_hdr *hdr = ring->rd[frame_num].iov_base; if (odp_likely(hdr->tp_status == TP_STATUS_AVAILABLE || hdr->tp_status == TP_STATUS_SENDING)) { nb_tx++; } else if (hdr->tp_status == TP_STATUS_SEND_REQUEST) { if (retry++ < TX_RETRIES) { struct timespec ts = { .tv_nsec = TX_RETRY_NSEC, .tv_sec = 0 }; sendto(sock, NULL, 0, MSG_DONTWAIT, NULL, 0); nanosleep(&ts, NULL); i--; continue; } else { hdr->tp_status = TP_STATUS_AVAILABLE; } } else { /* TP_STATUS_WRONG_FORMAT */ /* Don't try re-sending frames after failure */ for (; i < frames; i++) { hdr = ring->rd[frame_num].iov_base; hdr->tp_status = TP_STATUS_AVAILABLE; frame_num = next_frame(frame_num, frame_count); } break; } frame_num = next_frame(frame_num, frame_count); } ring->frame_num = next_frame(first_frame_num + nb_tx - 1, frame_count); return nb_tx; } static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, const odp_packet_t pkt_table[], unsigned num) { union frame_map ppd; uint32_t pkt_len; unsigned first_frame_num, frame_num, frame_count; int ret; uint8_t *buf; unsigned i = 0; unsigned nb_tx = 0; int send_errno; int total_len = 0; first_frame_num = ring->frame_num; frame_num = first_frame_num; frame_count = ring->rd_num; while (i < num) { ppd.raw = ring->rd[frame_num].iov_base; if (!odp_unlikely(mmap_tx_kernel_ready(ppd.raw))) break; pkt_len = odp_packet_len(pkt_table[i]); ppd.v2->tp_h.tp_snaplen = pkt_len; ppd.v2->tp_h.tp_len = pkt_len; total_len += pkt_len; buf = (uint8_t *)ppd.raw + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); odp_packet_copy_to_mem(pkt_table[i], 0, pkt_len, buf); mmap_tx_user_ready(ppd.raw); frame_num = next_frame(frame_num, frame_count); i++; } ret = sendto(sock, NULL, 0, MSG_DONTWAIT, NULL, 0); send_errno = errno; /* On success, the return value indicates the number of bytes sent. On * failure a value of -1 is returned, even if the failure occurred * after some of the packets in the ring have already been sent, so we * need to inspect the packet status to determine which were sent. */ if (odp_likely(ret == total_len)) { nb_tx = i; ring->frame_num = frame_num; } else { nb_tx = handle_pending_frames(sock, ring, i); if (odp_unlikely(ret == -1 && nb_tx == 0 && SOCK_ERR_REPORT(send_errno))) { __odp_errno = send_errno; /* ENOBUFS indicates that the transmit queue is full, * which will happen regularly when overloaded so don't * print it */ if (errno != ENOBUFS) ODP_ERR("sendto(pkt mmap): %s\n", strerror(send_errno)); return -1; } } for (i = 0; i < nb_tx; ++i) odp_packet_free(pkt_table[i]); return nb_tx; } static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) { uint32_t num_frames; int pz = getpagesize(); pool_t *pool; if (pool_hdl == ODP_POOL_INVALID) ODP_ABORT("Invalid pool handle\n"); pool = pool_entry_from_hdl(pool_hdl); /* Frame has to capture full packet which can fit to the pool block.*/ ring->req.tp_frame_size = (pool->headroom + pool->seg_len + pool->tailroom + TPACKET_HDRLEN + TPACKET_ALIGNMENT + (pz - 1)) & (-pz); /* Calculate how many pages we need to hold at most MAX_PKTS_PER_BLOCK * packets and align size to page boundary. */ num_frames = pool->num < MAX_PKTS_PER_BLOCK ? pool->num : MAX_PKTS_PER_BLOCK; ring->req.tp_block_size = (ring->req.tp_frame_size * num_frames + (pz - 1)) & (-pz); if (!fanout) { /* Single socket is in use. Use 1 block with buf_num frames. */ ring->req.tp_block_nr = 1; } else { /* Fanout is in use, more likely taffic split accodring to * number of cpu threads. Use cpu blocks and buf_num frames. */ ring->req.tp_block_nr = odp_cpu_count(); } ring->req.tp_frame_nr = ring->req.tp_block_size / ring->req.tp_frame_size * ring->req.tp_block_nr; ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr; ring->rd_num = ring->req.tp_frame_nr; ring->flen = ring->req.tp_frame_size; } static int mmap_setup_ring(int sock, struct ring *ring, int type, odp_pool_t pool_hdl, int fanout) { int ret = 0; ring->sock = sock; ring->type = type; ring->version = TPACKET_V2; mmap_fill_ring(ring, pool_hdl, fanout); ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); if (ret == -1) { __odp_errno = errno; ODP_ERR("setsockopt(pkt mmap): %s\n", strerror(errno)); return -1; } ring->rd_len = ring->rd_num * sizeof(*ring->rd); ring->rd = malloc(ring->rd_len); if (!ring->rd) { __odp_errno = errno; ODP_ERR("malloc(): %s\n", strerror(errno)); return -1; } return 0; } static int mmap_sock(pkt_sock_mmap_t *pkt_sock) { int i; int sock = pkt_sock->sockfd; /* map rx + tx buffer to userspace : they are in this order */ pkt_sock->mmap_len = pkt_sock->rx_ring.req.tp_block_size * pkt_sock->rx_ring.req.tp_block_nr + pkt_sock->tx_ring.req.tp_block_size * pkt_sock->tx_ring.req.tp_block_nr; pkt_sock->mmap_base = mmap(NULL, pkt_sock->mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0); if (pkt_sock->mmap_base == MAP_FAILED) { __odp_errno = errno; ODP_ERR("mmap rx&tx buffer failed: %s\n", strerror(errno)); return -1; } pkt_sock->rx_ring.mm_space = pkt_sock->mmap_base; memset(pkt_sock->rx_ring.rd, 0, pkt_sock->rx_ring.rd_len); for (i = 0; i < pkt_sock->rx_ring.rd_num; ++i) { pkt_sock->rx_ring.rd[i].iov_base = pkt_sock->rx_ring.mm_space + (i * pkt_sock->rx_ring.flen); pkt_sock->rx_ring.rd[i].iov_len = pkt_sock->rx_ring.flen; } pkt_sock->tx_ring.mm_space = pkt_sock->mmap_base + pkt_sock->rx_ring.mm_len; memset(pkt_sock->tx_ring.rd, 0, pkt_sock->tx_ring.rd_len); for (i = 0; i < pkt_sock->tx_ring.rd_num; ++i) { pkt_sock->tx_ring.rd[i].iov_base = pkt_sock->tx_ring.mm_space + (i * pkt_sock->tx_ring.flen); pkt_sock->tx_ring.rd[i].iov_len = pkt_sock->tx_ring.flen; } return 0; } static int mmap_unmap_sock(pkt_sock_mmap_t *pkt_sock) { free(pkt_sock->rx_ring.rd); free(pkt_sock->tx_ring.rd); return munmap(pkt_sock->mmap_base, pkt_sock->mmap_len); } static int mmap_bind_sock(pkt_sock_mmap_t *pkt_sock, const char *netdev) { int ret; pkt_sock->ll.sll_family = PF_PACKET; pkt_sock->ll.sll_protocol = htons(ETH_P_ALL); pkt_sock->ll.sll_ifindex = if_nametoindex(netdev); pkt_sock->ll.sll_hatype = 0; pkt_sock->ll.sll_pkttype = 0; pkt_sock->ll.sll_halen = 0; ret = bind(pkt_sock->sockfd, (struct sockaddr *)&pkt_sock->ll, sizeof(pkt_sock->ll)); if (ret == -1) { __odp_errno = errno; ODP_ERR("bind(to IF): %s\n", strerror(errno)); return -1; } return 0; } static int sock_mmap_close(pktio_entry_t *entry) { pkt_sock_mmap_t *const pkt_sock = pkt_priv(entry); int ret; ret = mmap_unmap_sock(pkt_sock); if (ret != 0) { ODP_ERR("mmap_unmap_sock() %s\n", strerror(errno)); return -1; } if (pkt_sock->sockfd != -1 && close(pkt_sock->sockfd) != 0) { __odp_errno = errno; ODP_ERR("close(sockfd): %s\n", strerror(errno)); return -1; } return 0; } static int sock_mmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry, const char *netdev, odp_pool_t pool) { int if_idx; int ret = 0; if (disable_pktio) return -1; pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry); int fanout = 1; /* Init pktio entry */ memset(pkt_sock, 0, sizeof(*pkt_sock)); /* set sockfd to -1, because a valid socked might be initialized to 0 */ pkt_sock->sockfd = -1; if (pool == ODP_POOL_INVALID) return -1; /* Store eth buffer offset for pkt buffers from this pool */ pkt_sock->frame_offset = 0; pkt_sock->pool = pool; pkt_sock->sockfd = mmap_pkt_socket(); if (pkt_sock->sockfd == -1) goto error; ret = mmap_bind_sock(pkt_sock, netdev); if (ret != 0) goto error; ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring, PACKET_TX_RING, pool, fanout); if (ret != 0) goto error; ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring, PACKET_RX_RING, pool, fanout); if (ret != 0) goto error; ret = mmap_sock(pkt_sock); if (ret != 0) goto error; ret = mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac); if (ret != 0) goto error; pkt_sock->mtu = mtu_get_fd(pkt_sock->sockfd, netdev); if (!pkt_sock->mtu) goto error; if_idx = if_nametoindex(netdev); if (if_idx == 0) { __odp_errno = errno; ODP_ERR("if_nametoindex(): %s\n", strerror(errno)); goto error; } pkt_sock->fanout = fanout; if (fanout) { ret = set_pkt_sock_fanout_mmap(pkt_sock, if_idx); if (ret != 0) goto error; } pktio_entry->s.stats_type = sock_stats_type_fd(pktio_entry, pkt_sock->sockfd); if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) ODP_DBG("pktio: %s unsupported stats\n", pktio_entry->s.name); ret = sock_stats_reset_fd(pktio_entry, pkt_priv(pktio_entry)->sockfd); if (ret != 0) goto error; return 0; error: sock_mmap_close(pktio_entry); return -1; } static int sock_mmap_fd_set(pktio_entry_t *pktio_entry, int index ODP_UNUSED, fd_set *readfds) { pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry); int fd; odp_ticketlock_lock(&pktio_entry->s.rxl); fd = pkt_sock->sockfd; FD_SET(fd, readfds); odp_ticketlock_unlock(&pktio_entry->s.rxl); return fd; } static int sock_mmap_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, odp_packet_t pkt_table[], int num) { pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry); int ret; odp_ticketlock_lock(&pktio_entry->s.rxl); ret = pkt_mmap_v2_rx(pktio_entry, pkt_sock, pkt_table, num, pkt_sock->if_mac); odp_ticketlock_unlock(&pktio_entry->s.rxl); return ret; } static int sock_mmap_recv_tmo(pktio_entry_t *pktio_entry, int index, odp_packet_t pkt_table[], int num, uint64_t usecs) { struct timeval timeout; int ret; int maxfd; fd_set readfds; ret = sock_mmap_recv(pktio_entry, index, pkt_table, num); if (ret != 0) return ret; timeout.tv_sec = usecs / (1000 * 1000); timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL); FD_ZERO(&readfds); maxfd = sock_mmap_fd_set(pktio_entry, index, &readfds); while (1) { ret = select(maxfd + 1, &readfds, NULL, NULL, &timeout); if (ret <= 0) return ret; ret = sock_mmap_recv(pktio_entry, index, pkt_table, num); if (ret) return ret; /* If no packets, continue wait until timeout expires */ } } static int sock_mmap_recv_mq_tmo(pktio_entry_t *pktio_entry[], int index[], int num_q, odp_packet_t pkt_table[], int num, unsigned *from, uint64_t usecs) { struct timeval timeout; int i; int ret; int maxfd = -1, maxfd2; fd_set readfds; for (i = 0; i < num_q; i++) { ret = sock_mmap_recv(pktio_entry[i], index[i], pkt_table, num); if (ret > 0 && from) *from = i; if (ret != 0) return ret; } FD_ZERO(&readfds); for (i = 0; i < num_q; i++) { maxfd2 = sock_mmap_fd_set(pktio_entry[i], index[i], &readfds); if (maxfd2 > maxfd) maxfd = maxfd2; } timeout.tv_sec = usecs / (1000 * 1000); timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL); while (1) { ret = select(maxfd + 1, &readfds, NULL, NULL, &timeout); if (ret <= 0) return ret; for (i = 0; i < num_q; i++) { ret = sock_mmap_recv(pktio_entry[i], index[i], pkt_table, num); if (ret > 0 && from) *from = i; if (ret) return ret; } /* If no packets, continue wait until timeout expires */ } } static int sock_mmap_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED, const odp_packet_t pkt_table[], int num) { int ret; pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry); odp_ticketlock_lock(&pktio_entry->s.txl); ret = pkt_mmap_v2_tx(pkt_sock->tx_ring.sock, &pkt_sock->tx_ring, pkt_table, num); odp_ticketlock_unlock(&pktio_entry->s.txl); return ret; } static uint32_t sock_mmap_mtu_get(pktio_entry_t *pktio_entry) { return mtu_get_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name); } static int sock_mmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr) { memcpy(mac_addr, pkt_priv(pktio_entry)->if_mac, ETH_ALEN); return ETH_ALEN; } static int sock_mmap_promisc_mode_set(pktio_entry_t *pktio_entry, odp_bool_t enable) { return promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, enable); } static int sock_mmap_promisc_mode_get(pktio_entry_t *pktio_entry) { return promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name); } static int sock_mmap_link_status(pktio_entry_t *pktio_entry) { return link_status_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name); } static int sock_mmap_capability(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_capability_t *capa) { memset(capa, 0, sizeof(odp_pktio_capability_t)); capa->max_input_queues = 1; capa->max_output_queues = 1; capa->set_op.op.promisc_mode = 1; odp_pktio_config_init(&capa->config); capa->config.pktin.bit.ts_all = 1; capa->config.pktin.bit.ts_ptp = 1; return 0; } static int sock_mmap_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats) { if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) { memset(stats, 0, sizeof(*stats)); return 0; } return sock_stats_fd(pktio_entry, stats, pkt_priv(pktio_entry)->sockfd); } static int sock_mmap_stats_reset(pktio_entry_t *pktio_entry) { if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) { memset(&pktio_entry->s.stats, 0, sizeof(odp_pktio_stats_t)); return 0; } return sock_stats_reset_fd(pktio_entry, pkt_priv(pktio_entry)->sockfd); } static int sock_mmap_init_global(void) { if (getenv("ODP_PKTIO_DISABLE_SOCKET_MMAP")) { ODP_PRINT("PKTIO: socket mmap skipped," " enabled export ODP_PKTIO_DISABLE_SOCKET_MMAP=1.\n"); disable_pktio = 1; } else { ODP_PRINT("PKTIO: initialized socket mmap," " use export ODP_PKTIO_DISABLE_SOCKET_MMAP=1 to disable.\n"); } return 0; } const pktio_if_ops_t sock_mmap_pktio_ops = { .name = "socket_mmap", .print = NULL, .init_global = sock_mmap_init_global, .init_local = NULL, .term = NULL, .open = sock_mmap_open, .close = sock_mmap_close, .start = NULL, .stop = NULL, .stats = sock_mmap_stats, .stats_reset = sock_mmap_stats_reset, .recv = sock_mmap_recv, .recv_tmo = sock_mmap_recv_tmo, .recv_mq_tmo = sock_mmap_recv_mq_tmo, .send = sock_mmap_send, .fd_set = sock_mmap_fd_set, .mtu_get = sock_mmap_mtu_get, .promisc_mode_set = sock_mmap_promisc_mode_set, .promisc_mode_get = sock_mmap_promisc_mode_get, .mac_get = sock_mmap_mac_addr_get, .mac_set = NULL, .link_status = sock_mmap_link_status, .capability = sock_mmap_capability, .pktin_ts_res = NULL, .pktin_ts_from_ns = NULL, .config = NULL, .input_queues_config = NULL, .output_queues_config = NULL, };
467858.c
/* * FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application * Copyright (C) 2005-2014, Anthony Minessale II <[email protected]> * * Version: MPL 1.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application * * The Initial Developer of the Original Code is * Anthony Minessale II <[email protected]> * Portions created by the Initial Developer are Copyright (C) * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Anthony Minessale II <[email protected]> * Neal Horman <neal at wanlink dot com> * * * mod_random.c -- entropy source module * */ #include <switch.h> /* Prototypes */ SWITCH_MODULE_SHUTDOWN_FUNCTION(mod_random_shutdown); SWITCH_MODULE_RUNTIME_FUNCTION(mod_random_runtime); SWITCH_MODULE_LOAD_FUNCTION(mod_random_load); SWITCH_MODULE_DEFINITION(mod_random, mod_random_load, mod_random_shutdown, mod_random_runtime); static int RUNNING = 0; static const char *random_device_files[] = { "/dev/hwrandom", "/dev/random", NULL }; const char *random_device_file = NULL; static void event_handler(switch_event_t *event); SWITCH_MODULE_LOAD_FUNCTION(mod_random_load) { #ifdef WIN32 switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "%s missing window support\n", modname); return SWITCH_STATUS_NOTIMPL; #endif int i = 0; for(i = 0 ;random_device_files[i]; i++) { if (switch_file_exists(random_device_files[i], pool) == SWITCH_STATUS_SUCCESS) { random_device_file = random_device_files[i]; break; } } if (!random_device_file) { switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "%s can't locate a random device file\n", modname); return SWITCH_STATUS_FALSE; } if ((switch_event_bind(modname, SWITCH_EVENT_ALL, NULL, event_handler, NULL) != SWITCH_STATUS_SUCCESS)) { switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Couldn't bind!\n"); return SWITCH_STATUS_TERM; } /* connect my internal structure to the blank pointer passed to me */ *module_interface = switch_loadable_module_create_module_interface(pool, modname); RUNNING = 1; /* indicate that the module should continue to be loaded */ return SWITCH_STATUS_SUCCESS; } SWITCH_MODULE_SHUTDOWN_FUNCTION(mod_random_shutdown) { switch_event_unbind_callback(event_handler); RUNNING = 0; return SWITCH_STATUS_SUCCESS; } #if WIN32 SWITCH_MODULE_RUNTIME_FUNCTION(mod_random_runtime) { RUNNING = 0; return SWITCH_STATUS_TERM; } #else #include <unistd.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <syslog.h> #include <sys/ioctl.h> #include <sys/poll.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/time.h> #include <time.h> #include <linux/types.h> #include <linux/random.h> #include <string.h> typedef struct { int count; int size; unsigned char *data; } entropy_t; static int random_add_entropy(int fd, void *buf, size_t size) { entropy_t e = { 0 }; int r = 0; e.count = size * 8; e.size = size; e.data = (unsigned char *) buf; if (ioctl(fd, RNDADDENTROPY, &e) != 0) { r = 1; } return r; } static int rng_read(int fd, void *buf, size_t size) { size_t off = 0; ssize_t r; unsigned char *bp = (unsigned char *) buf; while (size > 0) { do { r = read(fd, bp + off, size); } while ((r == -1) && (errno == EINTR)); if (r <= 0) { break; } off += r; size -= r; } return size; } static int rfd = 0; static void event_handler(switch_event_t *event) { char *buf; if (switch_event_serialize(event, &buf, SWITCH_TRUE) == SWITCH_STATUS_SUCCESS) { random_add_entropy(rfd, buf, strlen(buf)); free(buf); } } SWITCH_MODULE_RUNTIME_FUNCTION(mod_random_runtime) { unsigned char data[1024] = {0}; switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "%s Thread starting using random_device_file %s\n", modname, random_device_file); if ((rfd = open(random_device_file, O_RDWR)) < 0) { switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "%s Error opening random_device_file %s\n", modname, random_device_file); RUNNING = 0; } rng_read(rfd, data, 4); while(RUNNING) { int16_t data[64]; int i = 0; int len = sizeof(data) / 2; switch_generate_sln_silence(data, len, 1, 1); random_add_entropy(rfd, data, len); while(i < len && !data[i]) i++; if (i < len) { switch_yield(abs(data[i]) * 1000); } } if (rfd > -1) { close(rfd); } switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "%s Thread ending\n", modname); return SWITCH_STATUS_TERM; } #endif /* For Emacs: * Local Variables: * mode:c * indent-tabs-mode:t * tab-width:4 * c-basic-offset:4 * End: * For VIM: * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet */
521529.c
// Auto-generated file. Do not edit! // Template: src/f32-dwconv/up-neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f32_dwconv_minmax_ukernel_up8x4__neon( size_t channels, size_t output_width, const float** input, const float* weights, float* output, size_t input_stride, size_t output_increment, size_t input_offset, const float* zero, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); do { const float* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const float*) ((uintptr_t) i0 + input_offset); } const float* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const float*) ((uintptr_t) i1 + input_offset); } const float* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const float*) ((uintptr_t) i2 + input_offset); } const float* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const float*) ((uintptr_t) i3 + input_offset); } input = (const float**) ((uintptr_t) input + input_stride); size_t c = channels; const float* w = weights; for (; c >= 8; c -= 8) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; float32x4_t vacc4567p0 = vld1q_f32(w); w += 4; const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4; const float32x4_t vk0x0123 = vld1q_f32(w); w += 4; const float32x4_t vk0x4567 = vld1q_f32(w); w += 4; vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123); vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567); const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4; const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; const float32x4_t vk1x4567 = vld1q_f32(w); w += 4; vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567); const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4; const float32x4_t vk2x0123 = vld1q_f32(w); w += 4; const float32x4_t vk2x4567 = vld1q_f32(w); w += 4; vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123); vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567); const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4; const float32x4_t vk3x0123 = vld1q_f32(w); w += 4; const float32x4_t vk3x4567 = vld1q_f32(w); w += 4; vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123); vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567); float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); vacc4567 = vminq_f32(vacc4567, vmax); vst1q_f32(output, vacc0123); output += 4; vst1q_f32(output, vacc4567); output += 4; } for (; c >= 4; c -= 4) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vk0x0123 = vld1q_f32(w + 4); vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123); const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vk1x0123 = vld1q_f32(w + 12); vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vk2x0123 = vld1q_f32(w + 20); vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123); const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vk3x0123 = vld1q_f32(w + 28); vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123); float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); vst1q_f32(output, vacc0123); output += 4; } if XNN_UNLIKELY(c != 0) { float32x4_t vacc0123p0 = vld1q_f32(w); const float32x4_t vi0x0123 = vld1q_f32(i0); const float32x4_t vk0x0123 = vld1q_f32(w + 8); vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123); const float32x4_t vi1x0123 = vld1q_f32(i1); const float32x4_t vk1x0123 = vld1q_f32(w + 16); vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); const float32x4_t vi2x0123 = vld1q_f32(i2); const float32x4_t vk2x0123 = vld1q_f32(w + 24); vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123); const float32x4_t vi3x0123 = vld1q_f32(i3); const float32x4_t vk3x0123 = vld1q_f32(w + 32); vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123); float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); float32x2_t vacc01 = vget_low_f32(vacc0123); if (c & 2) { vst1_f32(output, vacc01); output += 2; vacc01 = vget_high_f32(vacc0123); } if (c & 1) { vst1_lane_f32(output, vacc01, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
56559.c
/* ---------------------------------------------------------------------- * Copyright (C) 2010-2013 ARM Limited. All rights reserved. * * $Date: 17. January 2013 * $Revision: V1.4.1 * * Project: CMSIS DSP Library * Title: arm_cmplx_mult_real_q15.c * * Description: Q15 complex by real multiplication * * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of ARM LIMITED nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * -------------------------------------------------------------------- */ #include "arm_math.h" /** * @ingroup groupCmplxMath */ /** * @addtogroup CmplxByRealMult * @{ */ /** * @brief Q15 complex-by-real multiplication * @param[in] *pSrcCmplx points to the complex input vector * @param[in] *pSrcReal points to the real input vector * @param[out] *pCmplxDst points to the complex output vector * @param[in] numSamples number of samples in each vector * @return none. * * <b>Scaling and Overflow Behavior:</b> * \par * The function uses saturating arithmetic. * Results outside of the allowable Q15 range [0x8000 0x7FFF] will be saturated. */ void arm_cmplx_mult_real_q15( q15_t * pSrcCmplx, q15_t * pSrcReal, q15_t * pCmplxDst, uint32_t numSamples) { q15_t in; /* Temporary variable to store input value */ #ifndef ARM_MATH_CM0_FAMILY /* Run the below code for Cortex-M4 and Cortex-M3 */ uint32_t blkCnt; /* loop counters */ q31_t inA1, inA2; /* Temporary variables to hold input data */ q31_t inB1; /* Temporary variables to hold input data */ q15_t out1, out2, out3, out4; /* Temporary variables to hold output data */ q31_t mul1, mul2, mul3, mul4; /* Temporary variables to hold intermediate data */ /* loop Unrolling */ blkCnt = numSamples >> 2u; /* First part of the processing with loop unrolling. Compute 4 outputs at a time. ** a second loop below computes the remaining 1 to 3 samples. */ while(blkCnt > 0u) { /* C[2 * i] = A[2 * i] * B[i]. */ /* C[2 * i + 1] = A[2 * i + 1] * B[i]. */ /* read complex number both real and imaginary from complex input buffer */ inA1 = *__SIMD32(pSrcCmplx)++; /* read two real values at a time from real input buffer */ inB1 = *__SIMD32(pSrcReal)++; /* read complex number both real and imaginary from complex input buffer */ inA2 = *__SIMD32(pSrcCmplx)++; /* multiply complex number with real numbers */ #ifndef ARM_MATH_BIG_ENDIAN mul1 = (q31_t) ((q15_t) (inA1) * (q15_t) (inB1)); mul2 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1)); mul3 = (q31_t) ((q15_t) (inA2) * (q15_t) (inB1 >> 16)); mul4 = (q31_t) ((q15_t) (inA2 >> 16) * (q15_t) (inB1 >> 16)); #else mul2 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1 >> 16)); mul1 = (q31_t) ((q15_t) inA1 * (q15_t) (inB1 >> 16)); mul4 = (q31_t) ((q15_t) (inA2 >> 16) * (q15_t) inB1); mul3 = (q31_t) ((q15_t) inA2 * (q15_t) inB1); #endif // #ifndef ARM_MATH_BIG_ENDIAN /* saturate the result */ out1 = (q15_t) __SSAT(mul1 >> 15u, 16); out2 = (q15_t) __SSAT(mul2 >> 15u, 16); out3 = (q15_t) __SSAT(mul3 >> 15u, 16); out4 = (q15_t) __SSAT(mul4 >> 15u, 16); /* pack real and imaginary outputs and store them to destination */ *__SIMD32(pCmplxDst)++ = __PKHBT(out1, out2, 16); *__SIMD32(pCmplxDst)++ = __PKHBT(out3, out4, 16); inA1 = *__SIMD32(pSrcCmplx)++; inB1 = *__SIMD32(pSrcReal)++; inA2 = *__SIMD32(pSrcCmplx)++; #ifndef ARM_MATH_BIG_ENDIAN mul1 = (q31_t) ((q15_t) (inA1) * (q15_t) (inB1)); mul2 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1)); mul3 = (q31_t) ((q15_t) (inA2) * (q15_t) (inB1 >> 16)); mul4 = (q31_t) ((q15_t) (inA2 >> 16) * (q15_t) (inB1 >> 16)); #else mul2 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1 >> 16)); mul1 = (q31_t) ((q15_t) inA1 * (q15_t) (inB1 >> 16)); mul4 = (q31_t) ((q15_t) (inA2 >> 16) * (q15_t) inB1); mul3 = (q31_t) ((q15_t) inA2 * (q15_t) inB1); #endif // #ifndef ARM_MATH_BIG_ENDIAN out1 = (q15_t) __SSAT(mul1 >> 15u, 16); out2 = (q15_t) __SSAT(mul2 >> 15u, 16); out3 = (q15_t) __SSAT(mul3 >> 15u, 16); out4 = (q15_t) __SSAT(mul4 >> 15u, 16); *__SIMD32(pCmplxDst)++ = __PKHBT(out1, out2, 16); *__SIMD32(pCmplxDst)++ = __PKHBT(out3, out4, 16); /* Decrement the numSamples loop counter */ blkCnt--; } /* If the numSamples is not a multiple of 4, compute any remaining output samples here. ** No loop unrolling is used. */ blkCnt = numSamples % 0x4u; while(blkCnt > 0u) { /* C[2 * i] = A[2 * i] * B[i]. */ /* C[2 * i + 1] = A[2 * i + 1] * B[i]. */ in = *pSrcReal++; /* store the result in the destination buffer. */ *pCmplxDst++ = (q15_t) __SSAT((((q31_t) (*pSrcCmplx++) * (in)) >> 15), 16); *pCmplxDst++ = (q15_t) __SSAT((((q31_t) (*pSrcCmplx++) * (in)) >> 15), 16); /* Decrement the numSamples loop counter */ blkCnt--; } #else /* Run the below code for Cortex-M0 */ while(numSamples > 0u) { /* realOut = realA * realB. */ /* imagOut = imagA * realB. */ in = *pSrcReal++; /* store the result in the destination buffer. */ *pCmplxDst++ = (q15_t) __SSAT((((q31_t) (*pSrcCmplx++) * (in)) >> 15), 16); *pCmplxDst++ = (q15_t) __SSAT((((q31_t) (*pSrcCmplx++) * (in)) >> 15), 16); /* Decrement the numSamples loop counter */ numSamples--; } #endif /* #ifndef ARM_MATH_CM0_FAMILY */ } /** * @} end of CmplxByRealMult group */
893617.c
/* * This patch Copyright (C) 2010 by James Nobis - quel * - quel NOSPAM quelrod NOSPAM net, and it is herby released to the general * public under the follow terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * format specification * http://www.hmailserver.com/forum/viewtopic.php?p=97515&sid=b2c1c6ba1e10c2f0654ca9421b2059e8#p97515 * inspiration from the generic sha-1 and md5 * Copyright (c) 2010 by Solar Designer * * JimF Feb, 2015: converted into a 'thin' format, hooked to dynamic_61 */ #if AC_BUILT #include "autoconfig.h" #endif #ifndef DYNAMIC_DISABLED #if FMT_EXTERNS_H extern struct fmt_main fmt_hmailserver; #elif FMT_REGISTERS_H john_register_one(&fmt_hmailserver); #else #include "sha2.h" #include "params.h" #include "common.h" #include "formats.h" #include "dynamic.h" #include "memdbg.h" #define FORMAT_LABEL "hMailServer" #define FORMAT_NAME "" #define ALGORITHM_NAME "?" /* filled in by dynamic */ #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 // set PLAINTEXT_LENGTH to 0, so dyna will set this #define PLAINTEXT_LENGTH 0 #define CIPHERTEXT_LENGTH 64 #define BINARY_SIZE 32 #define DYNA_BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE 6 #define DYNA_SALT_SIZE (sizeof(char*)) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests hmailserver_tests[] = { {"cc06fa688a64cdeea43d3c0fb761fede7e3ccf00a9daea9c79f7d458e06f88327f16dd", "password"}, {"fee4fd4446aebcb3332aa5c61845b7bcbe5a3126fedf51a6359663d61b87d4f6ee87df", "12345678"}, {"2d7b784370c488b6548394ba11513e159220c83e2458ed01d8c7cdadd6bf486b433703", "1234"}, {"0926aadc8d49682c3f091af2dbf7f16f1cc7130b8e6dc86978d3f1bef914ce0096d4b3", "0123456789ABCDE"}, {NULL} }; static char Conv_Buf[120]; static struct fmt_main *pDynamic; static void hmailserver_init(struct fmt_main *self); static void get_ptr(); /* this function converts a 'native' phps signature string into a $dynamic_6$ syntax string */ static char *Convert(char *Buf, char *ciphertext) { if (text_in_dynamic_format_already(pDynamic, ciphertext)) return ciphertext; snprintf(Buf, sizeof(Conv_Buf), "$dynamic_61$%s$%6.6s", &ciphertext[6], ciphertext); return Buf; } static char *our_split(char *ciphertext, int index, struct fmt_main *self) { get_ptr(); return pDynamic->methods.split(Convert(Conv_Buf, ciphertext), index, self); } static char *our_prepare(char *split_fields[10], struct fmt_main *self) { get_ptr(); return pDynamic->methods.prepare(split_fields, self); } static int hmailserver_valid(char *ciphertext, struct fmt_main *self) { int i; if ( ciphertext == NULL ) return 0; get_ptr(); i = strlen( ciphertext ); if (i != CIPHERTEXT_LENGTH+SALT_SIZE) return pDynamic->methods.valid(ciphertext, pDynamic); return pDynamic->methods.valid(Convert(Conv_Buf, ciphertext), pDynamic); } static void * our_salt(char *ciphertext) { get_ptr(); return pDynamic->methods.salt(Convert(Conv_Buf, ciphertext)); } static void * our_binary(char *ciphertext) { get_ptr(); return pDynamic->methods.binary(Convert(Conv_Buf, ciphertext)); } struct fmt_main fmt_hmailserver = { { // setup the labeling and stuff. NOTE the max and min crypts are set to 1 // here, but will be reset within our init() function. FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, DYNA_BINARY_SIZE, BINARY_ALIGN, DYNA_SALT_SIZE, SALT_ALIGN, 1, 1, FMT_CASE | FMT_8_BIT | FMT_DYNAMIC, { NULL }, { NULL }, hmailserver_tests }, { /* All we setup here, is the pointer to valid, and the pointer to init */ /* within the call to init, we will properly set this full object */ hmailserver_init, fmt_default_done, fmt_default_reset, our_prepare, hmailserver_valid, our_split } }; static void link_funcs() { fmt_hmailserver.methods.salt = our_salt; fmt_hmailserver.methods.binary = our_binary; fmt_hmailserver.methods.split = our_split; fmt_hmailserver.methods.prepare = our_prepare; } static void hmailserver_init(struct fmt_main *self) { if (self->private.initialized == 0) { get_ptr(); pDynamic->methods.init(pDynamic); self->private.initialized = 1; } } static void get_ptr() { if (!pDynamic) { pDynamic = dynamic_THIN_FORMAT_LINK(&fmt_hmailserver, Convert(Conv_Buf, hmailserver_tests[0].ciphertext), "hmailserver", 0); link_funcs(); } } #endif /* plugin stanza */ #endif /* DYNAMIC_DISABLED */
209564.c
/* * The MIT License (MIT) * * Copyright (c) 2017 Nels D. "Chip" Pearson (aka CmdrZin) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ProgTimer.c * * Created: 1/30/2017 0.01 ndp * Author : Chip */ #include <avr/io.h> #include <avr/interrupt.h> #include <stdbool.h> #include <avr/sleep.h> #include <avr/wdt.h> #include <avr/pgmspace.h> #include "sysTimer.h" #include "io_led_button.h" #include "serial.h" #include "mod_comms.h" #include "mod_audio.h" #include "mod_eventList.h" #include "mod_epLog.h" void triggerBeepTone(uint8_t n); #define B_ACK IO_GREEN_BUTTON // (0x01) #define B_SNOOZE IO_RED_BUTTON // (0x02) #define M_TWOSECTIME 200 #define M_200MSTIME 20 typedef enum pStates {S_NORMAL, S_PRECHECK, S_EVENT_AWAKE, S_EVENT_ASLEEP, S_SNOOZE_AWAKE, S_SNOOZE_ASLEEP} E_STATE; //#define S_ACK 10 //#define S_SNOOZE 20 const char version_date[] PROGMEM = {"0.51 2017-MAY-08 17:00:00"}; // svn 6 bool goToSleep; uint16_t awakeCount; char mBuff[9]; char mCycle = 0; E_STATE eState; bool preCheck; int main(void) { char eTone = 0; uint8_t buttons; uint8_t oneMinTime = 1; uint16_t tenSecTime = 1; uint8_t tenMinTime = 1; uint8_t tenMinCount = 1; bool ledToggle = false; bool skipSleep; // turn OFF watchdog timer to reduce power. wdt_disable(); st_init_tmr0(); // set up timers st_init_tmr2(); mod_io_init(); // set up Button and LED pins. initUsart(); // set up USART mode 9600 8N1 ma_init(); // set up audio me_init(); // set up EEPROM logging sei(); preCheck = true; // Self Check mode. // set up SLEEP set_sleep_mode(SLEEP_MODE_PWR_SAVE); goToSleep = true; awakeCount = M_TWOSECTIME; // N * 10ms eState = S_PRECHECK; // Start in PreCheck mode while (1) { mod_io_service(); // Scan buttons mod_comms_service(); ma_service(); #if 1 if(ma_isPlaying()) { skipSleep = true; } else { skipSleep = false; } if(goToSleep && !skipSleep) { // Don't go to sleep until Tune/Beep finishes. cli(); sleep_enable(); sei(); sleep_cpu(); // sleep for 8s based on Timer2 setup. // wait for Timer2 interrupt to wake up. // Decide which LED(s) to blink on wake up. switch(eState) { case S_NORMAL: mod_io_setBlink(5, IO_GREEN_LED); break; case S_PRECHECK: if(ledToggle) { mod_io_setBlink(5, IO_GREEN_LED); } else { mod_io_setBlink(5, IO_RED_LED); } ledToggle = !ledToggle; break; case S_SNOOZE_AWAKE: case S_SNOOZE_ASLEEP: mod_io_setBlink(5, IO_RED_LED); break; case S_EVENT_AWAKE: case S_EVENT_ASLEEP: mod_io_setBlink(5, IO_BOTH_LED); break; } } // stay awake if comm activity if(mod_commGetMode() != MC_IDLE) awakeCount = 1000; // reset timeout to longer. // Main process loop if(!skipSleep) { // Only run this block if coming out of Sleep. Simplifies logic. switch(eState) { case S_NORMAL: // Allow for LED blink after waking. AND 2 sec comms window. goToSleep = false; if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if(--awakeCount == 0) { awakeCount = M_TWOSECTIME; goToSleep = true; // sleep the 6 seconds left. } } eTone = mod_eventList_check(); // returns seconds ones value on hit, else returns 10 if( (eTone < 0) || (eTone > 9) ) { // No Event } else { // Valid Tone/Tune select is 0->9 triggerBeepTone(eTone); // Init timers and counters oneMinTime = 10; // based on ten second timer awakeCount = M_TWOSECTIME; // based on 10ms TIC tenMinTime = 75; // based on 8 second sleep. tenMinCount = 3; // maximum number of 10 min waits tenSecTime = 1000; // uses 10ms TIC goToSleep = false; // use manual time control eState = S_EVENT_AWAKE; } break; case S_PRECHECK: // Waiting for PreCheck ACK. Check for seconds after waking, then go back to sleep. goToSleep = false; if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if(--awakeCount == 0) { awakeCount = M_TWOSECTIME; goToSleep = true; // sleep the 6 seconds left. } } // ACK is checked for 2 seconds after wake up. buttons = mod_io_getButtons(); if(buttons & B_ACK) { ma_clearAudio(); triggerBeepTone(0); // triggerBeepTone(4); eState = S_NORMAL; goToSleep = true; } break; case S_EVENT_AWAKE: // under manual control, not sleeping goToSleep = false; // Processing EVENT Trigger // ACK or Snooze button can be pressed an ANY time. buttons = mod_io_getButtons(); if(buttons & B_ACK) { ma_clearAudio(); triggerBeepTone(0); me_store1(EPT_ACK, me_getLastEvent()); // Store Event as ACK'd. eState = S_NORMAL; goToSleep = true; break; } if(buttons & B_SNOOZE) { ma_clearAudio(); triggerBeepTone(2); eState = S_SNOOZE_ASLEEP; // oneMin and twoSec already set up. tenMinTime = 75; // based on 8 second sleep. goToSleep = true; break; } // Play Tune every 10 seconds // After 1 minute. go to sleep for 10 minutes. repeat 3 times. if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if(--tenSecTime == 0) { // Ten second timeout. Play tune/tone every 10 seconds. tenSecTime = 1000; triggerBeepTone(eTone); // replay tune. if(--oneMinTime == 0) { // One minute timeout oneMinTime = 10; // No response. Go to sleep for 10 minutes. awakeCount = 200; tenMinTime = 75; // uses 8 sec sleep timer eState = S_EVENT_ASLEEP; goToSleep = true; } } } break; case S_EVENT_ASLEEP: // allow 2 seconds to detect ACK or Snooze button, then go to sleep for the remaining 6 seconds. if(awakeCount != 0) { goToSleep = false; // ACK or Snooze button can be pressed within 2 seconds after awake. buttons = mod_io_getButtons(); if(buttons & B_ACK) { ma_clearAudio(); triggerBeepTone(0); me_store1(EPT_ACK, me_getLastEvent()); // Store Event as ACK'd. eState = S_NORMAL; goToSleep = true; break; } if(buttons & B_SNOOZE) { ma_clearAudio(); triggerBeepTone(2); eState = S_SNOOZE_ASLEEP; // Wait until Snooze ends or is ACK'd to store to Log. goToSleep = true; // Go back to sleep while in Snooze break; } // Manual timer control for 2 seconds if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if(--awakeCount == 0) { goToSleep = true; // leave manual control. Sleep for last 6 seconds. } } } else { // awakeCount == 0 to get here awakeCount = M_TWOSECTIME; // to trigger above section // decrement time every 8 seconds and counts on wake up if(--tenMinTime == 0) { tenMinTime = 75; // ten minute time out if(--tenMinCount == 0) { tenMinCount = 3; // gone through 3 ten minute periods. Log it. me_store1(EPT_NOACTION, me_getLastEvent()); eState = S_NORMAL; goToSleep = true; break; } else { // Wake up again after 10 minutes. tenSecTime = 1; // immediate trigger of tone. eState = S_EVENT_AWAKE; goToSleep = true; break; } } goToSleep = false; // to get to above section on next loop } break; case S_SNOOZE_AWAKE: // Only Snooze for 10 min. No button checks. // Check ACK for 2 seconds after waking. goToSleep = false; // use manual control // ACK is checked for 1 minute after 8s wakeup while playing tune. buttons = mod_io_getButtons(); if(buttons & B_ACK) { ma_clearAudio(); triggerBeepTone(0); me_store2(EPT_SNOOZE, me_getLastEvent(), st_getLocalTime()); // Store Event as Snooze then ACK'd. eState = S_NORMAL; goToSleep = true; break; } // Play tune every 10 seconds // After 1 minute. go to sleep for 10 minutes. if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if(--tenSecTime == 0) { // Ten second timeout. Play tune/tone every 10 seconds. tenSecTime = 1000; triggerBeepTone(eTone); // replay tune. if(--oneMinTime == 0) { // One minute timeout oneMinTime = 10; // No response. Go to sleep for 10 minutes. awakeCount = M_TWOSECTIME; tenMinTime = 75; // uses 8 sec sleep timer eState = S_SNOOZE_ASLEEP; goToSleep = true; } } } break; case S_SNOOZE_ASLEEP: // ACK is checked for 2 seconds after 8s wakeup. buttons = mod_io_getButtons(); if(buttons & B_ACK) { ma_clearAudio(); triggerBeepTone(0); me_store2(EPT_SNOOZE, me_getLastEvent(), st_getLocalTime()); // Store Event as Snooze then ACK'd. eState = S_NORMAL; goToSleep = true; break; } // Allow for ACK check and LED blink for 2s after waking, then go back to sleep. goToSleep = false; if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if(--awakeCount == 0) { awakeCount = M_TWOSECTIME; // Stay asleep for 10 seconds more // decrement time and counts on wake up if(--tenMinTime == 0) { // ten minute time out tenMinTime = 75; triggerBeepTone(eTone); // beep/tune again after 10 min eState = S_SNOOZE_AWAKE; } goToSleep = true; // sleep the 7.8 seconds left. } } break; default: eState = S_NORMAL; break; } // switch(eState) } // if(!skipSleep) #else if(goToSleep) { if(!ma_isPlaying()) { // Don't go to sleep until Tune/Beep finishes. cli(); sleep_enable(); sei(); sleep_cpu(); // sleep for 8s based on Timer2 setup. // wait for intr to wake up. if(!eTempEvent) { eTone = mod_eventList_check(); // returns seconds ones value on hit, else returns 10 if(eTone != 10) { eState = 1; // Event time hit. } else { eState = 0; } } // Use eState to further process. goToSleep = false; if(eState == S_SNOOZE) { mod_io_setBlink(5, IO_RED_LED); // N * 1ms } else { if(ledToggle || !preCheck) { ledToggle = false; mod_io_setBlink(5, IO_GREEN_LED); // N * 1ms } else { ledToggle = true; mod_io_setBlink(5, IO_RED_LED); // N * 1ms } } } } else { if(preCheck) { if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if( --awakeCount == 0) { awakeCount = 200; goToSleep = true; } } buttons = mod_io_getButtons(); if(buttons & B_ACK) { triggerBeepTone(0); // triggerBeepTone(4); eTempEvent = false; goToSleep = true; eState = 0; preCheck = false; } } else if( (eState == 0) && !goToSleep ) { // Normal 2 second awake checks // Stay awake for at least 2s if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if( --awakeCount == 0) { awakeCount = 200; goToSleep = true; } } // stay awake if comm activity if(mod_commGetMode() != MC_IDLE) awakeCount = 1000; // reset timeout to longer. } else { // Process Event UI buttons = mod_io_getButtons(); switch(eState) { // Event Triggered case 1: oneMinTime = 30; // based on two second timer twoSecTime = 200; // based on 10ms TIC tenMinTime = 75; // based on 8 second sleep. tenMinCount = 3; triggerBeepTone(eTone); eState = 2; break; // Waiting for any button - 1 minute case 2: if(buttons & B_ACK) { ma_clearAudio(); eState = S_ACK; break; } if(buttons & B_SNOOZE) { ma_clearAudio(); eState = S_SNOOZE; // oneMin and twoSec already set up. tenMinTime = 75; // based on 8 second sleep. eTempEvent = true; // Use sleep timer for long No Action wait. goToSleep = true; break; } // Check 1 minute timeout if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if( --twoSecTime == 0) { twoSecTime = 200; triggerBeepTone(eTone); if( --oneMinTime == 0) { oneMinTime = 30; tenMinTime = 75; // based on 8 second sleep. eTempEvent = true; // Use sleep timer for long No Action wait. goToSleep = true; eState = 3; } } } break; // Waiting 10 min - Uses 8 second sleep time case 3: if( --tenMinTime == 0 ) { tenMinTime = 75; if( --tenMinCount == 0 ) { tenMinCount = 3; // No response in 3x ten minute waits. me_store1(EPT_NOACTION, me_getLastEvent()); eTempEvent = false; goToSleep = true; } else { eState = 2; } } break; // Waiting 10 min in Snooze mode case S_SNOOZE: if(buttons & B_ACK) { triggerBeepTone(0); me_store2(EPT_SNOOZE, me_getLastEvent(), st_getLocalTime()); // Store Event as Snooze then ACK'd. eTempEvent = false; goToSleep = true; eState = 0; break; } // Stay awake for at least 2s if(!goToSleep) { // just woke up if( GPIOR0 & (1<<DEV_10MS_TIC) ) { GPIOR0 &= ~(1<<DEV_10MS_TIC); if( --awakeCount == 0) { awakeCount = 200; goToSleep = true; } } } if(goToSleep) { if( --tenMinTime == 0 ) { tenMinTime = 75; if( --tenMinCount == 0 ) { tenMinCount = 3; // No response in 3x ten minute waits. eTempEvent = false; } else { // DON'T go back to Sleep. Going to do one min beeps. goToSleep = false; oneMinTime = 30; // based on two second timer twoSecTime = 200; // based on 10ms TIC eState = 2; } } } break; case S_ACK: triggerBeepTone(0); me_store1(EPT_ACK, me_getLastEvent()); // Store Event as ACK'd. eTempEvent = false; goToSleep = true; eState = 0; break; default: break; } } } #endif } // while() } void setSelfCheck() { preCheck = true; } /* * Configure Beep or Tune to play. */ void triggerBeepTone(uint8_t tone) { switch(tone) { case 0: ma_setService(MA_SERVICE_BEEP); ma_setBeepParam(MA_BEEP_4k, 250, 2); break; case 1: ma_setService(MA_SERVICE_BEEP); ma_setBeepParam(MA_BEEP_5k, 250, 3); break; case 2: ma_setService(MA_SERVICE_BEEP); ma_setBeepParam(MA_BEEP_3k, 250, 3); break; case 3: ma_setService(MA_SERVICE_SCORE6); break; case 4: ma_setService(MA_SERVICE_SCORE7); break; case 5: ma_setService(MA_SERVICE_SCORE2); break; case 6: ma_setService(MA_SERVICE_SCORE3); break; case 7: ma_setService(MA_SERVICE_SCORE4); break; case 8: ma_setService(MA_SERVICE_SCORE5); break; case 9: ma_setService(MA_SERVICE_SCORE1); break; default: ma_setService(MA_SERVICE_BEEP); ma_setBeepParam(MA_BEEP_4k, 250, 1); break; } }
140743.c
/*- * Copyright (c) 2011, Oleksandr Tymoshenko <[email protected]> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); #include <sys/types.h> #include <sys/systm.h> #include <sys/param.h> #include <sys/kernel.h> #include <vm/uma.h> #include <machine/octeon_cop2.h> static uma_zone_t ctxzone; static void octeon_cop2_init(void* dummy) { printf("Create COP2 context zone\n"); ctxzone = uma_zcreate("COP2 context", sizeof(struct octeon_cop2_state), NULL, NULL, NULL, NULL, 8, 0); } struct octeon_cop2_state * octeon_cop2_alloc_ctx() { return uma_zalloc(ctxzone, M_NOWAIT); } void octeon_cop2_free_ctx(struct octeon_cop2_state *ctx) { uma_zfree(ctxzone, ctx); } SYSINIT(octeon_cop2, SI_SUB_CPU, SI_ORDER_FIRST, octeon_cop2_init, NULL);
167005.c
/* * clang -Wall -Wextra -pedantic -std=c11 -lgc boehmgc.c -o gctest */ #include <stdio.h> #include <stdlib.h> #include <gc.h> struct foo { int x; int y; double z; }; struct foo *extra(void) { struct foo *g = GC_malloc(sizeof(struct foo)); g->x = 0; g->y = 1; g->z = 2.4; return g; } void extra2(void) { struct foo *g = GC_malloc(sizeof(struct foo)); g->x = 0; g->y = 1; g->z = 2.4; } int main(void) { GC_init(); struct foo *f = GC_malloc(sizeof(struct foo)); f->x = 0; f->y = 1; f->z = 1.5; struct foo *g = extra(); extra2(); printf("%d %d %f\n", f->x, f->y, f->z); printf("%d %d %f\n", g->x, g->y, g->z); return 0; }
327257.c
/* * Copyright (C) 2015-2017 Alibaba Group Holding Limited */ #include <k_api.h> #include <assert.h> #include <stdio.h> void soc_hw_timer_init() { } #if (RHINO_CONFIG_USER_HOOK > 0) void krhino_idle_pre_hook(void) { } void krhino_idle_hook(void) { } void krhino_init_hook(void) { #if (RHINO_CONFIG_HW_COUNT > 0) soc_hw_timer_init(); #endif } #endif void krhino_start_hook(void) { #if (RHINO_CONFIG_SYS_STATS > 0) krhino_task_sched_stats_reset(); #endif } void krhino_task_create_hook(ktask_t *task) { #if XCHAL_CP_NUM > 0 krhino_task_info_set(task, 0, (void *)((uint32_t)task->task_stack + XT_STK_FRMSZ)); #endif } void krhino_task_del_hook(ktask_t *task, res_free_t *arg) { printf("%s del success!\n\r", task->task_name); } void krhino_task_switch_hook(ktask_t *orgin, ktask_t *dest) { (void)orgin; (void)dest; } void krhino_tick_hook(void) { } void krhino_task_abort_hook(ktask_t *task) { (void)task; } void krhino_mm_alloc_hook(void *mem, size_t size) { (void)mem; (void)size; } #if (RHINO_CONFIG_MM_TLF > 0) #if defined ( __GNUC__ ) extern void *heap_start; extern void *heap_end; extern void *heap_len; /* heap_start and heap_len is set by linkscript(*.ld) */ k_mm_region_t g_mm_region[] = {{(uint8_t*)&heap_start,(size_t)&heap_len}}; #else #define HEAP_BUFFER_SIZE 1024*120 uint8_t g_heap_buf[HEAP_BUFFER_SIZE]; k_mm_region_t g_mm_region[] = {g_heap_buf, HEAP_BUFFER_SIZE}; #endif /* __GNUC__ */ int g_region_num = sizeof(g_mm_region)/sizeof(k_mm_region_t); #endif /* RHINO_CONFIG_MM_TLF */ void soc_err_proc(kstat_t err) { (void)err; assert(0); } krhino_err_proc_t g_err_proc = soc_err_proc;
912966.c
/* ** tests-my_revstr.c for tests-my_revstr in /home/RODRIG_1/rendu/Piscine_C_J06/tests ** ** Made by rodriguez gwendoline ** Login <[email protected]> ** ** Started on Tue Oct 7 11:23:38 2014 rodriguez gwendoline ** Last update Tue Oct 7 11:24:48 2014 rodriguez gwendoline */ int main() { char str[] = "hello\n"; my_putstr(str); my_revstr(str); my_putstr(str); }
626982.c
#include QMK_KEYBOARD_H // Each layer gets a name for readability, which is then used in the keymap matrix below. // The underscores don't mean anything - you can have a layer called STUFF or any other name. // Layer names don't all need to be of the same length, obviously, and you can also skip them // entirely and just use numbers. #define _BL 0 #define _FL 1 const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = { /* Keymap _BL: (Base Layer) Default Layer * ,----------------------------------------------------------------. * |Esc | 1| 2| 3| 4| 5| 6| 7| 8| 9| 0| -| =|Backsp |~ ` | * |----------------------------------------------------------------| * |Tab | Q| W| E| R| T| Y| U| I| O| P| [| ]| \ |Del | * |----------------------------------------------------------------| * |CAPS | A| S| D| F| G| H| J| K| L| ;| '|Return |PgUp| * |----------------------------------------------------------------| * |Shift | Z| X| C| V| B| N| M| ,| .| /|Shift | Up|PgDn| * |----------------------------------------------------------------| * |Ctrl|Alt |Cmd | Space | FN|Ctrl|Alt|Lef|Dow|Rig | * `----------------------------------------------------------------' */ [_BL] = LAYOUT_ansi( KC_ESC, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, KC_BSPC,KC_GRV, \ KC_TAB, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_LBRC, KC_RBRC,KC_BSLS,KC_DEL, \ KC_CAPS, KC_A, KC_S, KC_D, KC_F, KC_G, KC_H, KC_J, KC_K, KC_L, KC_SCLN,KC_QUOT, KC_ENT,KC_PGUP, \ KC_LSFT, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_N, KC_M, KC_COMM,KC_DOT, KC_SLSH, KC_RSFT,KC_UP,KC_PGDN, \ KC_LCTL, KC_LALT,KC_LGUI, KC_SPC, MO(_FL),KC_RCTRL,KC_RALT, KC_LEFT,KC_DOWN,KC_RGHT), /* Keymap _FL: Function Layer * ,----------------------------------------------------------------. * | ~ | F1|F2 |F3 |F4 |F5 |F6 |F7 |F8 |F9 |F10|F11|F12|Del |Ins | * |----------------------------------------------------------------| * | | |Up | | | | | |Bl-|BL |BL+| | | |Hme | * |----------------------------------------------------------------| * | |<- |Dn | ->| | | | | | | | | |End | * |----------------------------------------------------------------| * | |<< |>|||>> | | | | |VU-|VU+|MUT| McL|MsU|McR | * |----------------------------------------------------------------| * | | | | | | | |MsL|MsD|MsR | * `----------------------------------------------------------------' */ [_FL] = LAYOUT_ansi( KC_TILDE,KC_F1 ,KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, KC_DEL, KC_INS , \ _______,_______, KC_UP,_______,_______, _______,_______,_______, BL_DEC, BL_TOGG,BL_INC, _______,_______, _______,KC_HOME, \ _______,KC_LEFT, KC_DOWN, KC_RIGHT, _______,_______,_______,_______,_______,_______,_______,_______, _______,KC_END, \ _______,KC_MRWD, KC_MPLY, KC_MFFD, _______,_______,_______,_______,KC_VOLD,KC_VOLU,KC_MUTE,KC_BTN1, KC_MS_U, KC_BTN2, \ _______,_______,_______, _______, _______,_______,_______,KC_MS_L,KC_MS_D, KC_MS_R), };
300511.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_foreach.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: soumanso <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2021/09/10 04:43:54 by soumanso #+# #+# */ /* Updated: 2021/09/13 18:30:24 by soumanso ### ########lyon.fr */ /* */ /* ************************************************************************** */ void ft_foreach(int *tab, int length, void (*f) (int)) { int i; i = 0; while (i < length) { f (tab[i]); i += 1; } }
143998.c
/* * Exercise 4-13 * * Write a recursive version of the function reverse(s), which reverses the * string s in place. */ #include <stdio.h> #include <string.h> #define MAXLEN 128 void reverse(char s[]); int main(void) { char s1[MAXLEN] = ""; char s2[MAXLEN] = "Test string."; char s3[MAXLEN] = "Another test string."; char s4[MAXLEN] = "The last test string."; printf("%s -> ", s1); reverse(s1); printf("%s -> ", s1); reverse(s1); printf("%s\n", s1); printf("%s -> ", s2); reverse(s2); printf("%s -> ", s2); reverse(s2); printf("%s\n", s2); printf("%s -> ", s3); reverse(s3); printf("%s -> ", s3); reverse(s3); printf("%s\n", s3); printf("%s -> ", s4); reverse(s4); printf("%s -> ", s4); reverse(s4); printf("%s\n", s4); return 0; } void reverse(char s[]) { /* reverse: recursively reverses string s in place */ static int start; int end; int tmp; end = strlen(s) - start - 1; if (start >= end) { start = 0; return; } tmp = s[start]; s[start++] = s[end]; s[end--] = tmp; reverse(s); }
625759.c
/* * Probe for F81216A LPC to 4 UART * * Based on drivers/tty/serial/8250_pnp.c, by Russell King, et al * * Copyright (C) 2014 Ricardo Ribalda, Qtechnology A/S * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/pnp.h> #include <linux/kernel.h> #include <linux/serial_core.h> #include "8250.h" #define ADDR_PORT 0x4E #define DATA_PORT 0x4F #define ENTRY_KEY 0x77 #define EXIT_KEY 0xAA #define CHIP_ID1 0x20 #define CHIP_ID1_VAL 0x02 #define CHIP_ID2 0x21 #define CHIP_ID2_VAL 0x16 #define VENDOR_ID1 0x23 #define VENDOR_ID1_VAL 0x19 #define VENDOR_ID2 0x24 #define VENDOR_ID2_VAL 0x34 #define LDN 0x7 #define RS485 0xF0 #define RTS_INVERT BIT(5) #define RS485_URA BIT(4) #define RXW4C_IRA BIT(3) #define TXW4C_IRA BIT(2) #define DRIVER_NAME "8250_fintek" static int fintek_8250_enter_key(void){ if (!request_muxed_region(ADDR_PORT, 2, DRIVER_NAME)) return -EBUSY; outb(ENTRY_KEY, ADDR_PORT); outb(ENTRY_KEY, ADDR_PORT); return 0; } static void fintek_8250_exit_key(void){ outb(EXIT_KEY, ADDR_PORT); release_region(ADDR_PORT, 2); } static int fintek_8250_get_index(resource_size_t base_addr) { resource_size_t base[] = {0x3f8, 0x2f8, 0x3e8, 0x2e8}; int i; for (i = 0; i < ARRAY_SIZE(base); i++) if (base_addr == base[i]) return i; return -ENODEV; } static int fintek_8250_check_id(void) { outb(CHIP_ID1, ADDR_PORT); if (inb(DATA_PORT) != CHIP_ID1_VAL) return -ENODEV; outb(CHIP_ID2, ADDR_PORT); if (inb(DATA_PORT) != CHIP_ID2_VAL) return -ENODEV; outb(VENDOR_ID1, ADDR_PORT); if (inb(DATA_PORT) != VENDOR_ID1_VAL) return -ENODEV; outb(VENDOR_ID2, ADDR_PORT); if (inb(DATA_PORT) != VENDOR_ID2_VAL) return -ENODEV; return 0; } static int fintek_8250_rs485_config(struct uart_port *port, struct serial_rs485 *rs485) { uint8_t config = 0; int index = fintek_8250_get_index(port->iobase); if (index < 0) return -EINVAL; if (rs485->flags & SER_RS485_ENABLED) memset(rs485->padding, 0, sizeof(rs485->padding)); else memset(rs485, 0, sizeof(*rs485)); rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND; if (rs485->delay_rts_before_send) { rs485->delay_rts_before_send = 1; config |= TXW4C_IRA; } if (rs485->delay_rts_after_send) { rs485->delay_rts_after_send = 1; config |= RXW4C_IRA; } if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) rs485->flags &= SER_RS485_ENABLED; else config |= RS485_URA; if (rs485->flags & SER_RS485_RTS_ON_SEND) config |= RTS_INVERT; if (fintek_8250_enter_key()) return -EBUSY; outb(LDN, ADDR_PORT); outb(index, DATA_PORT); outb(RS485, ADDR_PORT); outb(config, DATA_PORT); fintek_8250_exit_key(); port->rs485 = *rs485; return 0; } static int fintek_8250_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { int line; struct uart_8250_port uart; int ret; if (!pnp_port_valid(dev, 0)) return -ENODEV; if (fintek_8250_get_index(pnp_port_start(dev, 0)) < 0) return -ENODEV; /* Enable configuration registers*/ if (fintek_8250_enter_key()) return -EBUSY; /*Check ID*/ ret = fintek_8250_check_id(); fintek_8250_exit_key(); if (ret) return ret; memset(&uart, 0, sizeof(uart)); if (!pnp_irq_valid(dev, 0)) return -ENODEV; uart.port.irq = pnp_irq(dev, 0); uart.port.iobase = pnp_port_start(dev, 0); uart.port.iotype = UPIO_PORT; uart.port.rs485_config = fintek_8250_rs485_config; uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE) uart.port.flags |= UPF_SHARE_IRQ; uart.port.uartclk = 1843200; uart.port.dev = &dev->dev; line = serial8250_register_8250_port(&uart); if (line < 0) return -ENODEV; pnp_set_drvdata(dev, (void *)((long)line + 1)); return 0; } static void fintek_8250_remove(struct pnp_dev *dev) { long line = (long)pnp_get_drvdata(dev); if (line) serial8250_unregister_port(line - 1); } #ifdef CONFIG_PM static int fintek_8250_suspend(struct pnp_dev *dev, pm_message_t state) { long line = (long)pnp_get_drvdata(dev); if (!line) return -ENODEV; serial8250_suspend_port(line - 1); return 0; } static int fintek_8250_resume(struct pnp_dev *dev) { long line = (long)pnp_get_drvdata(dev); if (!line) return -ENODEV; serial8250_resume_port(line - 1); return 0; } #else #define fintek_8250_suspend NULL #define fintek_8250_resume NULL #endif /* CONFIG_PM */ static const struct pnp_device_id fintek_dev_table[] = { /* Qtechnology Panel PC / IO1000 */ { "PNP0501"}, {} }; MODULE_DEVICE_TABLE(pnp, fintek_dev_table); static struct pnp_driver fintek_8250_driver = { .name = DRIVER_NAME, .probe = fintek_8250_probe, .remove = fintek_8250_remove, .suspend = fintek_8250_suspend, .resume = fintek_8250_resume, .id_table = fintek_dev_table, }; static int fintek_8250_init(void) { return pnp_register_driver(&fintek_8250_driver); } module_init(fintek_8250_init); static void fintek_8250_exit(void) { pnp_unregister_driver(&fintek_8250_driver); } module_exit(fintek_8250_exit); MODULE_DESCRIPTION("Fintek F812164 module"); MODULE_AUTHOR("Ricardo Ribalda <[email protected]>"); MODULE_LICENSE("GPL");
185810.c
/* * Copyright (c) 2014 Brian Swetland * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files * (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include "bootimage.h" static const char *outname = "boot.img"; static struct { const char *cmd; unsigned kind; unsigned type; } types[] = { { "lk", KIND_FILE, TYPE_LK }, { "fpga", KIND_FILE, TYPE_FPGA_IMAGE }, { "linux", KIND_FILE, TYPE_LINUX_KERNEL }, { "initrd", KIND_FILE, TYPE_LINUX_INITRD }, { "devicetree", KIND_FILE, TYPE_DEVICE_TREE }, { "sysparams", KIND_FILE, TYPE_SYSPARAMS }, { "board", KIND_BOARD, 0 }, { "build", KIND_BUILD, 0 }, { NULL, 0 }, }; void usage(const char *binary) { fprintf(stderr, "usage:\n"); fprintf(stderr, "%s [-h] [-o <output file] section:file ...\n", binary); } int process(bootimage *img, char *cmd, char *arg) { unsigned n; for (n = 0; types[n].cmd != NULL; n++) { if (strcmp(cmd, types[n].cmd)) { continue; } if (types[n].kind == KIND_FILE) { if (bootimage_add_file(img, types[n].type, arg) == NULL) { return -1; } } else { if (bootimage_add_string(img, types[n].kind, arg) == NULL) { return -1; } } return 0; } fprintf(stderr, "unknown command '%s'\n", cmd); return -1; } int main(int argc, char **argv) { const char *binary = argv[0]; bootimage *img; int fd; int count = 0; img = bootimage_init(); while (argc > 1) { char *cmd = argv[1]; char *arg = strchr(cmd, ':'); argc--; argv++; if (!strcmp(cmd, "-h") || !strcmp(cmd, "--help")) { usage(binary); return 1; } else if (!strcmp(cmd, "-o")) { outname = argv[1]; argc--; argv++; } else { if (arg == NULL) { fprintf(stderr, "error: invalid argument '%s'\n", cmd); return 1; } *arg++ = 0; if (process(img, cmd, arg)) { return 1; } count++; } } if (count == 0) { fprintf(stderr, "no sections to process\n"); return 1; } bootimage_done(img); if ((fd = open(outname, O_CREAT|O_TRUNC|O_WRONLY, 0644)) < 0) { fprintf(stderr, "error: cannot open '%s' for writing\n", outname); return 1; } if (bootimage_write(img, fd)) { fprintf(stderr, "error: failed to write '%s'\n", outname); unlink(outname); return 1; } close(fd); return 0; } // vim: set noexpandtab:
263275.c
/* * Mach Operating System * Copyright (c) 1993-1989 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or [email protected] * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* * HISTORY * $Log: ms_task_terminate.c,v $ * Revision 2.3 93/01/24 14:25:06 danner * Corrected include of mach/mach.h to mach.h. * [93/01/16 mrt] * * Revision 2.2 92/01/03 20:36:31 dbg * Split into individual files so that user can substitute for any * one routine (e.g. vm_allocate). * [91/09/05 dbg] * * Revision 2.8 91/06/25 10:34:48 rpd * Check against MACH_SEND_INTERRUPTED instead of KERN_SUCCESS. * [91/05/20 rpd] * * Revision 2.5 90/09/09 14:34:31 rpd * Added mach_port_allocate_name since it didn't exist because of * the makefile sed. * [90/09/03 rwd] * * Revision 2.4 90/06/19 23:03:50 rpd * Added mach_port_allocate, mach_port_deallocate, mach_port_insert_right. * [90/06/02 rpd] * * Revision 2.3 90/06/02 15:12:36 rpd * Try kernel call whenever syscall fails. * [90/05/31 rpd] * * Updated for new IPC. * Removed vm_allocate_with_pager. * [90/05/31 rpd] * * Revision 2.2 90/05/29 18:40:03 rwd * New file from rfr to try traps then mig. * [90/04/20 rwd] * */ #include <mach.h> #include <mach/message.h> kern_return_t task_terminate(target_task) task_t target_task; { kern_return_t result; result = syscall_task_terminate(target_task); if (result == MACH_SEND_INTERRUPTED) result = mig_task_terminate(target_task); return(result); }
856944.c
/** * Debug Module for Beryllium Kernel - Reference & Actual implementation **/ #include <beryllium/log.h> int main() { klog(LOG_INFO,"DBG","Debug module loaded at kernel_ns->debug\n"); }
244533.c
/* * Copyright 2006-2008, Sine Nomine Associates and others. * All Rights Reserved. * * This software has been released under the terms of the IBM Public * License. For details, see the LICENSE file in the top-level source * directory or online at http://www.openafs.org/dl/license10.html */ /* * localhost interprocess communication for servers * * currently handled by a localhost socket * (yes, this needs to be replaced someday) */ #ifndef _WIN32 #define FD_SETSIZE 65536 #endif #include <afsconfig.h> #include <afs/param.h> #include <roken.h> #include <afs/opr.h> #include <rx/xdr.h> #include <afs/afsint.h> #include <afs/errors.h> #include <rx/rx_queue.h> #include "nfs.h" #include "daemon_com.h" #include "lwp.h" #include "lock.h" #include <afs/afssyscalls.h> #include "ihandle.h" #include "vnode.h" #include "volume.h" #include "partition.h" #include "common.h" #include <rx/rx_queue.h> #ifdef USE_UNIX_SOCKETS #include <afs/afsutil.h> #include <sys/un.h> #endif int (*V_BreakVolumeCallbacks) (VolumeId); #define MAXHANDLERS 4 /* Up to 4 clients; must be at least 2, so that * move = dump+restore can run on single server */ #define MAX_BIND_TRIES 5 /* Number of times to retry socket bind */ static int SYNC_ask_internal(SYNC_client_state * state, SYNC_command * com, SYNC_response * res); /* * On AIX, connect() and bind() require use of SUN_LEN() macro; * sizeof(struct sockaddr_un) will not suffice. */ #if defined(AFS_AIX_ENV) && defined(USE_UNIX_SOCKETS) #define AFS_SOCKADDR_LEN(sa) SUN_LEN(sa) #else #define AFS_SOCKADDR_LEN(sa) sizeof(*sa) #endif /* daemon com SYNC general interfaces */ /** * fill in sockaddr structure. * * @param[in] endpoint pointer to sync endpoint object * @param[out] addr pointer to sockaddr structure * * @post sockaddr structure populated using information from * endpoint structure. */ void SYNC_getAddr(SYNC_endpoint_t * endpoint, SYNC_sockaddr_t * addr) { memset(addr, 0, sizeof(*addr)); #ifdef USE_UNIX_SOCKETS addr->sun_family = AF_UNIX; snprintf(addr->sun_path, sizeof(addr->sun_path), "%s/%s", AFSDIR_SERVER_LOCAL_DIRPATH, endpoint->un); addr->sun_path[sizeof(addr->sun_path) - 1] = '\0'; #else /* !USE_UNIX_SOCKETS */ #ifdef STRUCT_SOCKADDR_HAS_SA_LEN addr->sin_len = sizeof(struct sockaddr_in); #endif addr->sin_addr.s_addr = htonl(0x7f000001); addr->sin_family = AF_INET; /* was localhost->h_addrtype */ addr->sin_port = htons(endpoint->in); /* XXXX htons not _really_ neccessary */ #endif /* !USE_UNIX_SOCKETS */ } /** * get a socket descriptor of the appropriate domain. * * @param[in] endpoint pointer to sync endpoint object * * @return socket descriptor * * @post socket of domain specified in endpoint structure is created and * returned to caller. */ osi_socket SYNC_getSock(SYNC_endpoint_t * endpoint) { osi_socket sd; opr_Verify((sd = socket(endpoint->domain, SOCK_STREAM, 0)) >= 0); return sd; } /* daemon com SYNC client interface */ /** * open a client connection to a sync server * * @param[in] state pointer to sync client handle * * @return operation status * @retval 1 success * * @note at present, this routine aborts rather than returning an error code */ int SYNC_connect(SYNC_client_state * state) { SYNC_sockaddr_t addr; /* I can't believe the following is needed for localhost connections!! */ static time_t backoff[] = { 3, 3, 3, 5, 5, 5, 7, 15, 16, 24, 32, 40, 48, 0 }; time_t *timeout = &backoff[0]; if (state->fd != OSI_NULLSOCKET) { return 1; } SYNC_getAddr(&state->endpoint, &addr); for (;;) { state->fd = SYNC_getSock(&state->endpoint); if (connect(state->fd, (struct sockaddr *)&addr, AFS_SOCKADDR_LEN(&addr)) >= 0) return 1; if (!*timeout) break; if (!(*timeout & 1)) Log("SYNC_connect: temporary failure on circuit '%s' (will retry)\n", state->proto_name); SYNC_disconnect(state); sleep(*timeout++); } perror("SYNC_connect failed (giving up!)"); return 0; } /** * forcibly disconnect a sync client handle. * * @param[in] state pointer to sync client handle * * @retval operation status * @retval 0 success */ int SYNC_disconnect(SYNC_client_state * state) { rk_closesocket(state->fd); state->fd = OSI_NULLSOCKET; return 0; } /** * gracefully disconnect a sync client handle. * * @param[in] state pointer to sync client handle * * @return operation status * @retval SYNC_OK success */ afs_int32 SYNC_closeChannel(SYNC_client_state * state) { SYNC_command com; SYNC_response res; SYNC_PROTO_BUF_DECL(ores); if (state->fd == OSI_NULLSOCKET) return SYNC_OK; memset(&com, 0, sizeof(com)); memset(&res, 0, sizeof(res)); res.payload.len = SYNC_PROTO_MAX_LEN; res.payload.buf = ores; com.hdr.command = SYNC_COM_CHANNEL_CLOSE; com.hdr.command_len = sizeof(SYNC_command_hdr); com.hdr.flags |= SYNC_FLAG_CHANNEL_SHUTDOWN; /* in case the other end dropped, don't do any retries */ state->retry_limit = 0; state->hard_timeout = 0; SYNC_ask(state, &com, &res); SYNC_disconnect(state); return SYNC_OK; } /** * forcibly break a client connection, and then create a new connection. * * @param[in] state pointer to sync client handle * * @post old connection dropped; new connection established * * @return @see SYNC_connect() */ int SYNC_reconnect(SYNC_client_state * state) { SYNC_disconnect(state); return SYNC_connect(state); } /** * send a command to a sync server and wait for a response. * * @param[in] state pointer to sync client handle * @param[in] com command object * @param[out] res response object * * @return operation status * @retval SYNC_OK success * @retval SYNC_COM_ERROR communications error * @retval SYNC_BAD_COMMAND server did not recognize command code * * @note this routine merely handles error processing; SYNC_ask_internal() * handles the low-level details of communicating with the SYNC server. * * @see SYNC_ask_internal */ afs_int32 SYNC_ask(SYNC_client_state * state, SYNC_command * com, SYNC_response * res) { int tries; afs_uint32 now, timeout, code=SYNC_OK; if (state->fd == OSI_NULLSOCKET) { SYNC_connect(state); } if (state->fd == OSI_NULLSOCKET) { return SYNC_COM_ERROR; } #ifdef AFS_DEMAND_ATTACH_FS com->hdr.flags |= SYNC_FLAG_DAFS_EXTENSIONS; #endif now = FT_ApproxTime(); timeout = now + state->hard_timeout; for (tries = 0; (tries <= state->retry_limit) && (now <= timeout); tries++, now = FT_ApproxTime()) { code = SYNC_ask_internal(state, com, res); if (code == SYNC_OK) { break; } else if (code == SYNC_BAD_COMMAND) { Log("SYNC_ask: protocol mismatch on circuit '%s'; make sure " "fileserver, volserver, salvageserver and salvager are same " "version\n", state->proto_name); break; } else if ((code == SYNC_COM_ERROR) && (tries < state->retry_limit)) { Log("SYNC_ask: protocol communications failure on circuit '%s'; " "attempting reconnect to server\n", state->proto_name); SYNC_reconnect(state); /* try again */ } else { /* * unknown (probably protocol-specific) response code, pass it up to * the caller, and let them deal with it */ break; } } if (code == SYNC_COM_ERROR) { Log("SYNC_ask: too many / too latent fatal protocol errors on circuit " "'%s'; giving up (tries %d timeout %d)\n", state->proto_name, tries, timeout); } return code; } /** * send a command to a sync server and wait for a response. * * @param[in] state pointer to sync client handle * @param[in] com command object * @param[out] res response object * * @return operation status * @retval SYNC_OK success * @retval SYNC_COM_ERROR communications error * * @internal */ static afs_int32 SYNC_ask_internal(SYNC_client_state * state, SYNC_command * com, SYNC_response * res) { int n; SYNC_PROTO_BUF_DECL(buf); #ifndef AFS_NT40_ENV int iovcnt; struct iovec iov[2]; #endif if (state->fd == OSI_NULLSOCKET) { Log("SYNC_ask: invalid sync file descriptor on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } if (com->hdr.command_len > SYNC_PROTO_MAX_LEN) { Log("SYNC_ask: internal SYNC buffer too small on circuit '%s'; " "please file a bug\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } /* * fill in some common header fields */ com->hdr.proto_version = state->proto_version; com->hdr.pkt_seq = ++state->pkt_seq; com->hdr.com_seq = ++state->com_seq; #ifdef AFS_NT40_ENV com->hdr.pid = 0; com->hdr.tid = 0; #else com->hdr.pid = getpid(); #ifdef AFS_PTHREAD_ENV com->hdr.tid = afs_pointer_to_int(pthread_self()); #else { PROCESS handle = LWP_ThreadId(); com->hdr.tid = (handle) ? handle->index : 0; } #endif /* !AFS_PTHREAD_ENV */ #endif /* !AFS_NT40_ENV */ memcpy(buf, &com->hdr, sizeof(com->hdr)); if (com->payload.len) { memcpy(buf + sizeof(com->hdr), com->payload.buf, com->hdr.command_len - sizeof(com->hdr)); } #ifdef AFS_NT40_ENV n = send(state->fd, buf, com->hdr.command_len, 0); if (n != com->hdr.command_len) { Log("SYNC_ask: write failed on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } if (com->hdr.command == SYNC_COM_CHANNEL_CLOSE) { /* short circuit close channel requests */ res->hdr.response = SYNC_OK; goto done; } n = recv(state->fd, buf, SYNC_PROTO_MAX_LEN, 0); if (n == 0 || (n < 0 && WSAEINTR != WSAGetLastError())) { Log("SYNC_ask: No response on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } #else /* !AFS_NT40_ENV */ n = write(state->fd, buf, com->hdr.command_len); if (com->hdr.command_len != n) { Log("SYNC_ask: write failed on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } if (com->hdr.command == SYNC_COM_CHANNEL_CLOSE) { /* short circuit close channel requests */ res->hdr.response = SYNC_OK; goto done; } /* receive the response */ iov[0].iov_base = (char *)&res->hdr; iov[0].iov_len = sizeof(res->hdr); if (res->payload.len) { iov[1].iov_base = (char *)res->payload.buf; iov[1].iov_len = res->payload.len; iovcnt = 2; } else { iovcnt = 1; } n = readv(state->fd, iov, iovcnt); if (n == 0 || (n < 0 && errno != EINTR)) { Log("SYNC_ask: No response on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } #endif /* !AFS_NT40_ENV */ res->recv_len = n; if (n < sizeof(res->hdr)) { Log("SYNC_ask: response too short on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } #ifdef AFS_NT40_ENV memcpy(&res->hdr, buf, sizeof(res->hdr)); #endif if ((n - sizeof(res->hdr)) > res->payload.len) { Log("SYNC_ask: response too long on circuit '%s'\n", state->proto_name); res->hdr.response = SYNC_COM_ERROR; goto done; } #ifdef AFS_NT40_ENV memcpy(res->payload.buf, buf + sizeof(res->hdr), n - sizeof(res->hdr)); #endif if (res->hdr.response_len != n) { Log("SYNC_ask: length field in response inconsistent " "on circuit '%s' command %ld, %d != %lu\n", state->proto_name, afs_printable_int32_ld(com->hdr.command), n, afs_printable_uint32_lu(res->hdr.response_len)); res->hdr.response = SYNC_COM_ERROR; goto done; } if (res->hdr.response == SYNC_DENIED) { Log("SYNC_ask: negative response on circuit '%s'\n", state->proto_name); } done: return res->hdr.response; } /* * daemon com SYNC server-side interfaces */ /** * receive a command structure off a sync socket. * * @param[in] state pointer to server-side state object * @param[in] fd file descriptor on which to perform i/o * @param[out] com sync command object to be populated * * @return operation status * @retval SYNC_OK command received * @retval SYNC_COM_ERROR there was a socket communications error */ afs_int32 SYNC_getCom(SYNC_server_state_t * state, osi_socket fd, SYNC_command * com) { int n; afs_int32 code = SYNC_OK; #ifdef AFS_NT40_ENV SYNC_PROTO_BUF_DECL(buf); #else struct iovec iov[2]; int iovcnt; #endif #ifdef AFS_NT40_ENV n = recv(fd, buf, SYNC_PROTO_MAX_LEN, 0); if (n == 0 || (n < 0 && WSAEINTR != WSAGetLastError())) { Log("SYNC_getCom: error receiving command\n"); code = SYNC_COM_ERROR; goto done; } #else /* !AFS_NT40_ENV */ iov[0].iov_base = (char *)&com->hdr; iov[0].iov_len = sizeof(com->hdr); if (com->payload.len) { iov[1].iov_base = (char *)com->payload.buf; iov[1].iov_len = com->payload.len; iovcnt = 2; } else { iovcnt = 1; } n = readv(fd, iov, iovcnt); if (n == 0 || (n < 0 && errno != EINTR)) { Log("SYNC_getCom: error receiving command\n"); code = SYNC_COM_ERROR; goto done; } #endif /* !AFS_NT40_ENV */ com->recv_len = n; if (n < sizeof(com->hdr)) { Log("SYNC_getCom: command too short\n"); code = SYNC_COM_ERROR; goto done; } #ifdef AFS_NT40_ENV memcpy(&com->hdr, buf, sizeof(com->hdr)); #endif if ((n - sizeof(com->hdr)) > com->payload.len) { Log("SYNC_getCom: command too long\n"); code = SYNC_COM_ERROR; goto done; } #ifdef AFS_NT40_ENV memcpy(com->payload.buf, buf + sizeof(com->hdr), n - sizeof(com->hdr)); #endif done: return code; } /** * write a response structure to a sync socket. * * @param[in] state handle to server-side state object * @param[in] fd file descriptor on which to perform i/o * @param[in] res handle to response packet * * @return operation status * @retval SYNC_OK * @retval SYNC_COM_ERROR */ afs_int32 SYNC_putRes(SYNC_server_state_t * state, osi_socket fd, SYNC_response * res) { int n; afs_int32 code = SYNC_OK; SYNC_PROTO_BUF_DECL(buf); if (res->hdr.response_len > (sizeof(res->hdr) + res->payload.len)) { Log("SYNC_putRes: response_len field in response header inconsistent\n"); code = SYNC_COM_ERROR; goto done; } if (res->hdr.response_len > SYNC_PROTO_MAX_LEN) { Log("SYNC_putRes: internal SYNC buffer too small; please file a bug\n"); code = SYNC_COM_ERROR; goto done; } #ifdef AFS_DEMAND_ATTACH_FS res->hdr.flags |= SYNC_FLAG_DAFS_EXTENSIONS; #endif res->hdr.proto_version = state->proto_version; res->hdr.pkt_seq = ++state->pkt_seq; res->hdr.res_seq = ++state->res_seq; memcpy(buf, &res->hdr, sizeof(res->hdr)); if (res->payload.len) { memcpy(buf + sizeof(res->hdr), res->payload.buf, res->hdr.response_len - sizeof(res->hdr)); } #ifdef AFS_NT40_ENV n = send(fd, buf, res->hdr.response_len, 0); #else /* !AFS_NT40_ENV */ n = write(fd, buf, res->hdr.response_len); #endif /* !AFS_NT40_ENV */ if (res->hdr.response_len != n) { Log("SYNC_putRes: write failed\n"); res->hdr.response = SYNC_COM_ERROR; goto done; } done: return code; } /* return 0 for legal (null-terminated) string, * 1 for illegal (unterminated) string */ int SYNC_verifyProtocolString(char * buf, size_t len) { size_t s_len; s_len = strnlen(buf, len); return (s_len == len) ? 1 : 0; } /** * clean up old sockets. * * @param[in] state server state object * * @post unix domain sockets are cleaned up */ void SYNC_cleanupSock(SYNC_server_state_t * state) { #ifdef USE_UNIX_SOCKETS remove(state->addr.sun_path); #endif } /** * bind socket and set it to listen state. * * @param[in] state server state object * * @return operation status * @retval 0 success * @retval nonzero failure * * @post socket bound and set to listen state */ int SYNC_bindSock(SYNC_server_state_t * state) { int code; int on = 1; int numTries; /* Reuseaddr needed because system inexplicably leaves crud lying around */ code = setsockopt(state->fd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on)); if (code) Log("SYNC_bindSock: setsockopt failed with (%d)\n", errno); for (numTries = 0; numTries < state->bind_retry_limit; numTries++) { code = bind(state->fd, (struct sockaddr *)&state->addr, AFS_SOCKADDR_LEN(&state->addr)); if (code == 0) break; Log("SYNC_bindSock: bind failed with (%d), will sleep and retry\n", errno); sleep(5); } listen(state->fd, state->listen_depth); return code; }
910435.c
/* * This file is part of the MicroPython project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2014 Damien P. George * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <stdio.h> #include <stdint.h> #include <string.h> #include "py/objlist.h" #include "py/runtime.h" #include "py/mphal.h" #include "lib/netutils/netutils.h" #include "systick.h" #include "pendsv.h" #include "modnetwork.h" #if MICROPY_PY_NETWORK #if MICROPY_PY_LWIP #include "lwip/netif.h" #include "lwip/timeouts.h" #include "lwip/dns.h" #include "lwip/dhcp.h" #include "extmod/network_cyw43.h" #include "drivers/cyw43/cyw43.h" // Poll lwIP every 128ms #define LWIP_TICK(tick) (((tick) & ~(SYSTICK_DISPATCH_NUM_SLOTS - 1) & 0x7f) == 0) u32_t sys_now(void) { return mp_hal_ticks_ms(); } STATIC void pyb_lwip_poll(void) { #if MICROPY_PY_WIZNET5K // Poll the NIC for incoming data wiznet5k_poll(); #endif // Run the lwIP internal updates sys_check_timeouts(); } void mod_network_lwip_poll_wrapper(uint32_t ticks_ms) { if (LWIP_TICK(ticks_ms)) { pendsv_schedule_dispatch(PENDSV_DISPATCH_LWIP, pyb_lwip_poll); } #if MICROPY_PY_NETWORK_CYW43 if (cyw43_poll) { if (cyw43_sleep != 0) { if (--cyw43_sleep == 0) { pendsv_schedule_dispatch(PENDSV_DISPATCH_CYW43, cyw43_poll); } } } #endif } #endif /// \module network - network configuration /// /// This module provides network drivers and routing configuration. void mod_network_init(void) { mp_obj_list_init(&MP_STATE_PORT(mod_network_nic_list), 0); } void mod_network_deinit(void) { } void mod_network_register_nic(mp_obj_t nic) { for (mp_uint_t i = 0; i < MP_STATE_PORT(mod_network_nic_list).len; i++) { if (MP_STATE_PORT(mod_network_nic_list).items[i] == nic) { // nic already registered return; } } // nic not registered so add to list mp_obj_list_append(MP_OBJ_FROM_PTR(&MP_STATE_PORT(mod_network_nic_list)), nic); } mp_obj_t mod_network_find_nic(const uint8_t *ip) { // find a NIC that is suited to given IP address for (mp_uint_t i = 0; i < MP_STATE_PORT(mod_network_nic_list).len; i++) { mp_obj_t nic = MP_STATE_PORT(mod_network_nic_list).items[i]; // TODO check IP suitability here //mod_network_nic_type_t *nic_type = (mod_network_nic_type_t*)mp_obj_get_type(nic); return nic; } nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "no available NIC")); } STATIC mp_obj_t network_route(void) { return MP_OBJ_FROM_PTR(&MP_STATE_PORT(mod_network_nic_list)); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(network_route_obj, network_route); STATIC const mp_rom_map_elem_t mp_module_network_globals_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_network) }, #if defined(MICROPY_HW_ETH_MDC) { MP_ROM_QSTR(MP_QSTR_LAN), MP_ROM_PTR(&network_lan_type) }, #endif #if MICROPY_PY_NETWORK_CYW43 { MP_ROM_QSTR(MP_QSTR_WLAN), MP_ROM_PTR(&mp_network_cyw43_type) }, #endif #if MICROPY_PY_WIZNET5K { MP_ROM_QSTR(MP_QSTR_WIZNET5K), MP_ROM_PTR(&mod_network_nic_type_wiznet5k) }, #endif #if MICROPY_PY_CC3K { MP_ROM_QSTR(MP_QSTR_CC3K), MP_ROM_PTR(&mod_network_nic_type_cc3k) }, #endif { MP_ROM_QSTR(MP_QSTR_route), MP_ROM_PTR(&network_route_obj) }, // Constants #if MICROPY_PY_NETWORK_CYW43 { MP_ROM_QSTR(MP_QSTR_STA_IF), MP_ROM_INT(CYW43_ITF_STA)}, { MP_ROM_QSTR(MP_QSTR_AP_IF), MP_ROM_INT(CYW43_ITF_AP)}, #endif }; STATIC MP_DEFINE_CONST_DICT(mp_module_network_globals, mp_module_network_globals_table); const mp_obj_module_t mp_module_network = { .base = { &mp_type_module }, .globals = (mp_obj_dict_t*)&mp_module_network_globals, }; /*******************************************************************************/ // Implementations of network methods that can be used by any interface #if MICROPY_PY_LWIP mp_obj_t mod_network_nic_ifconfig(struct netif *netif, size_t n_args, const mp_obj_t *args) { if (n_args == 0) { // Get IP addresses const ip_addr_t *dns = dns_getserver(0); mp_obj_t tuple[4] = { netutils_format_ipv4_addr((uint8_t*)&netif->ip_addr, NETUTILS_BIG), netutils_format_ipv4_addr((uint8_t*)&netif->netmask, NETUTILS_BIG), netutils_format_ipv4_addr((uint8_t*)&netif->gw, NETUTILS_BIG), netutils_format_ipv4_addr((uint8_t*)dns, NETUTILS_BIG), }; return mp_obj_new_tuple(4, tuple); } else if (args[0] == MP_OBJ_NEW_QSTR(MP_QSTR_dhcp)) { // Start the DHCP client if (dhcp_supplied_address(netif)) { dhcp_renew(netif); } else { dhcp_stop(netif); dhcp_start(netif); } // Wait for DHCP to get IP address uint32_t start = mp_hal_ticks_ms(); while (!dhcp_supplied_address(netif)) { if (mp_hal_ticks_ms() - start > 10000) { mp_raise_msg(&mp_type_OSError, "timeout waiting for DHCP to get IP address"); } mp_hal_delay_ms(100); } return mp_const_none; } else { // Release and stop any existing DHCP dhcp_release(netif); dhcp_stop(netif); // Set static IP addresses mp_obj_t *items; mp_obj_get_array_fixed_n(args[0], 4, &items); netutils_parse_ipv4_addr(items[0], (uint8_t*)&netif->ip_addr, NETUTILS_BIG); netutils_parse_ipv4_addr(items[1], (uint8_t*)&netif->netmask, NETUTILS_BIG); netutils_parse_ipv4_addr(items[2], (uint8_t*)&netif->gw, NETUTILS_BIG); ip_addr_t dns; netutils_parse_ipv4_addr(items[3], (uint8_t*)&dns, NETUTILS_BIG); dns_setserver(0, &dns); return mp_const_none; } } #endif #endif // MICROPY_PY_NETWORK
603397.c
/*- * Copyright (c) 2014-present MongoDB, Inc. * Copyright (c) 2008-2014 WiredTiger, Inc. * All rights reserved. * * See the file LICENSE for redistribution information. */ #include "wt_internal.h" /* * __metadata_turtle -- * Return if a key's value should be taken from the turtle file. */ static bool __metadata_turtle(const char *key) { switch (key[0]) { case 'C': if (strcmp(key, WT_METADATA_COMPAT) == 0) return (true); break; case 'f': if (strcmp(key, WT_METAFILE_URI) == 0) return (true); break; case 'W': if (strcmp(key, WT_METADATA_VERSION) == 0) return (true); if (strcmp(key, WT_METADATA_VERSION_STR) == 0) return (true); break; } return (false); } /* * __wt_metadata_turtle_rewrite -- * Rewrite the turtle file. We wrap this because the lower functions expect a URI key and config * value pair for the metadata. This function exists to push out the other contents to the * turtle file such as a change in compatibility information. */ int __wt_metadata_turtle_rewrite(WT_SESSION_IMPL *session) { WT_DECL_RET; char *value; WT_RET(__wt_metadata_search(session, WT_METAFILE_URI, &value)); ret = __wt_metadata_update(session, WT_METAFILE_URI, value); __wt_free(session, value); return (ret); } /* * __wt_metadata_cursor_open -- * Opens a cursor on the metadata. */ int __wt_metadata_cursor_open(WT_SESSION_IMPL *session, const char *config, WT_CURSOR **cursorp) { WT_BTREE *btree; WT_DECL_RET; const char *open_cursor_cfg[] = {WT_CONFIG_BASE(session, WT_SESSION_open_cursor), config, NULL}; WT_WITHOUT_DHANDLE( session, ret = __wt_open_cursor(session, WT_METAFILE_URI, NULL, open_cursor_cfg, cursorp)); WT_RET(ret); /* * Retrieve the btree from the cursor, rather than the session because we don't always switch * the metadata handle in to the session before entering this function. */ btree = CUR2BT(*cursorp); #define WT_EVICT_META_SKEW 10000 /* * Skew eviction so metadata almost always stays in cache. * * Test before setting so updates can't race in subsequent opens (the first update is safe * because it's single-threaded from wiredtiger_open). */ if (btree->evict_priority == 0) WT_WITH_BTREE(session, btree, __wt_evict_priority_set(session, WT_EVICT_META_SKEW)); return (0); } /* * __wt_metadata_cursor -- * Returns the session's cached metadata cursor, unless it's in use, in which case it opens and * returns another metadata cursor. */ int __wt_metadata_cursor(WT_SESSION_IMPL *session, WT_CURSOR **cursorp) { WT_CURSOR *cursor; /* * If we don't have a cached metadata cursor, or it's already in use, we'll need to open a new * one. */ cursor = NULL; if (session->meta_cursor == NULL || F_ISSET(session->meta_cursor, WT_CURSTD_META_INUSE)) { WT_RET(__wt_metadata_cursor_open(session, NULL, &cursor)); if (session->meta_cursor == NULL) { session->meta_cursor = cursor; cursor = NULL; } } /* * If there's no cursor return, we're done, our caller should have just been triggering the * creation of the session's cached cursor. There should not be an open local cursor in that * case, but caution doesn't cost anything. */ if (cursorp == NULL) return (cursor == NULL ? 0 : cursor->close(cursor)); /* * If the cached cursor is in use, return the newly opened cursor, else mark the cached cursor * in use and return it. */ if (F_ISSET(session->meta_cursor, WT_CURSTD_META_INUSE)) *cursorp = cursor; else { *cursorp = session->meta_cursor; F_SET(session->meta_cursor, WT_CURSTD_META_INUSE); } return (0); } /* * __wt_metadata_cursor_close -- * Close a metadata cursor. */ int __wt_metadata_cursor_close(WT_SESSION_IMPL *session) { WT_DECL_RET; if (session->meta_cursor != NULL) ret = session->meta_cursor->close(session->meta_cursor); session->meta_cursor = NULL; return (ret); } /* * __wt_metadata_cursor_release -- * Release a metadata cursor. */ int __wt_metadata_cursor_release(WT_SESSION_IMPL *session, WT_CURSOR **cursorp) { WT_CURSOR *cursor; WT_UNUSED(session); if ((cursor = *cursorp) == NULL) return (0); *cursorp = NULL; /* * If using the session's cached metadata cursor, clear the in-use flag and reset it, otherwise, * discard the cursor. */ if (F_ISSET(cursor, WT_CURSTD_META_INUSE)) { WT_ASSERT(session, cursor == session->meta_cursor); F_CLR(cursor, WT_CURSTD_META_INUSE); return (cursor->reset(cursor)); } return (cursor->close(cursor)); } /* * __wt_metadata_insert -- * Insert a row into the metadata. */ int __wt_metadata_insert(WT_SESSION_IMPL *session, const char *key, const char *value) { WT_CURSOR *cursor; WT_DECL_RET; __wt_verbose(session, WT_VERB_METADATA, "Insert: key: %s, value: %s, tracking: %s, %s" "turtle", key, value, WT_META_TRACKING(session) ? "true" : "false", __metadata_turtle(key) ? "" : "not "); if (__metadata_turtle(key)) WT_RET_MSG(session, EINVAL, "%s: insert not supported on the turtle file", key); WT_RET(__wt_metadata_cursor(session, &cursor)); cursor->set_key(cursor, key); cursor->set_value(cursor, value); WT_ERR(cursor->insert(cursor)); if (WT_META_TRACKING(session)) WT_ERR(__wt_meta_track_insert(session, key)); err: WT_TRET(__wt_metadata_cursor_release(session, &cursor)); return (ret); } /* * __wt_metadata_update -- * Update a row in the metadata. */ int __wt_metadata_update(WT_SESSION_IMPL *session, const char *key, const char *value) { WT_CURSOR *cursor; WT_DECL_RET; __wt_verbose(session, WT_VERB_METADATA, "Update: key: %s, value: %s, tracking: %s, %s" "turtle", key, value, WT_META_TRACKING(session) ? "true" : "false", __metadata_turtle(key) ? "" : "not "); if (__metadata_turtle(key)) { WT_WITH_TURTLE_LOCK(session, ret = __wt_turtle_update(session, key, value)); return (ret); } if (WT_META_TRACKING(session)) WT_RET(__wt_meta_track_update(session, key)); WT_RET(__wt_metadata_cursor(session, &cursor)); /* This cursor needs to have overwrite semantics. */ WT_ASSERT(session, F_ISSET(cursor, WT_CURSTD_OVERWRITE)); cursor->set_key(cursor, key); cursor->set_value(cursor, value); WT_ERR(cursor->insert(cursor)); err: WT_TRET(__wt_metadata_cursor_release(session, &cursor)); return (ret); } /* * __wt_metadata_remove -- * Remove a row from the metadata. */ int __wt_metadata_remove(WT_SESSION_IMPL *session, const char *key) { WT_CURSOR *cursor; WT_DECL_RET; __wt_verbose(session, WT_VERB_METADATA, "Remove: key: %s, tracking: %s, %s" "turtle", key, WT_META_TRACKING(session) ? "true" : "false", __metadata_turtle(key) ? "" : "not "); if (__metadata_turtle(key)) WT_RET_MSG(session, EINVAL, "%s: remove not supported on the turtle file", key); /* * Take, release, and reacquire the metadata cursor. It's complicated, but that way the * underlying meta-tracking function doesn't have to open a second metadata cursor, it can use * the session's cached one. */ WT_RET(__wt_metadata_cursor(session, &cursor)); cursor->set_key(cursor, key); WT_ERR(cursor->search(cursor)); WT_ERR(__wt_metadata_cursor_release(session, &cursor)); if (WT_META_TRACKING(session)) WT_ERR(__wt_meta_track_update(session, key)); WT_ERR(__wt_metadata_cursor(session, &cursor)); cursor->set_key(cursor, key); ret = cursor->remove(cursor); err: WT_TRET(__wt_metadata_cursor_release(session, &cursor)); return (ret); } /* * __wt_metadata_search -- * Return a copied row from the metadata. The caller is responsible for freeing the allocated * memory. */ int __wt_metadata_search(WT_SESSION_IMPL *session, const char *key, char **valuep) { WT_CURSOR *cursor; WT_DECL_RET; const char *value; *valuep = NULL; __wt_verbose(session, WT_VERB_METADATA, "Search: key: %s, tracking: %s, %s" "turtle", key, WT_META_TRACKING(session) ? "true" : "false", __metadata_turtle(key) ? "" : "not "); if (__metadata_turtle(key)) { /* * The returned value should only be set if ret is non-zero, but Coverity is convinced * otherwise. The code path is used enough that Coverity complains a lot, add an error check * to get some peace and quiet. */ WT_WITH_TURTLE_LOCK(session, ret = __wt_turtle_read(session, key, valuep)); if (ret != 0) __wt_free(session, *valuep); return (ret); } /* * All metadata reads are at read-uncommitted isolation. That's because once a schema-level * operation completes, subsequent operations must see the current version of checkpoint * metadata, or they may try to read blocks that may have been freed from a file. Metadata * updates use non-transactional techniques (such as the schema and metadata locks) to protect * access to in-flight updates. */ WT_RET(__wt_metadata_cursor(session, &cursor)); cursor->set_key(cursor, key); WT_WITH_TXN_ISOLATION(session, WT_ISO_READ_UNCOMMITTED, ret = cursor->search(cursor)); WT_ERR(ret); WT_ERR(cursor->get_value(cursor, &value)); WT_ERR(__wt_strdup(session, value, valuep)); err: WT_TRET(__wt_metadata_cursor_release(session, &cursor)); if (ret != 0) __wt_free(session, *valuep); return (ret); }
937373.c
/** * \defgroup Constants * \desc static script constants * @{ */ /** * \defgroup InputDevice * \desc constants for input device - inputInterfaceDef.h * @{ */ const int INPUT_MODULE_TYPE_MASK = 0x00700000; const int INPUT_KEY_MASK = 0x000000ff; const int INPUT_ACTION_TYPE_MASK = 0x00000f00; const int INPUT_AXIS = 0x00010000; const int INPUT_POV = 0x00020000; const int INPUT_COMBO_MASK = 0xff000000; const int INPUT_COMBO_AXIS = 0x00800000; const int INPUT_COMBO_AXIS_OFFSET = 0x00000080; const int INPUT_COMBO_KEY_OFFSET = 0x01000000; const int INPUT_DEVICE_KEYBOARD = 0x00000000; const int INPUT_DEVICE_MOUSE = 0x00100000; // mouse button const int INPUT_DEVICE_STICK = 0x00200000; const int INPUT_DEVICE_XINPUT = 0x00300000; // XInput device const int INPUT_DEVICE_TRACKIR = 0x00400000; const int INPUT_DEVICE_GAMEPAD = 0x00500000; const int INPUT_DEVICE_CHEAT = 0x00600000; const int INPUT_ACTION_TYPE_NONE = 0x00000000; const int INPUT_ACTION_TYPE_STATE = 0x00000100; const int INPUT_ACTION_TYPE_DOWN_EVENT = 0x00000200; const int INPUT_ACTION_TYPE_UP_EVENT = 0x00000300; const int INPUT_ACTION_TYPE_SHORTCLICK_EVENT= 0x00000400; const int INPUT_ACTION_TYPE_HOLD_EVENT = 0x00000500; const int INPUT_ACTION_TYPE_COMBO = 0x00002000; const int INPUT_ACTION_TYPE_SPECIALCOMBO = 0x00004000; const int INPUT_ACTION_TYPE_DOUBLETAP = 0x00008000; const int INPUT_DEVICE_MOUSE_AXIS = (INPUT_DEVICE_MOUSE | INPUT_AXIS); const int INPUT_DEVICE_STICK_AXIS = (INPUT_DEVICE_STICK | INPUT_AXIS); const int INPUT_DEVICE_STICK_POV = (INPUT_DEVICE_STICK | INPUT_POV); const int INPUT_DEVICE_GAMEPAD_AXIS = (INPUT_DEVICE_GAMEPAD | INPUT_AXIS); /** @}*/ /** * \defgroup String constants * \desc String constants * @{ */ const string STRING_EMPTY = ""; /** @}*/ /** * \defgroup Colors * @{ */ const int COLOR_RED = 0xFFF22613; const int COLOR_GREEN = 0xFF2ECC71; const int COLOR_BLUE = 0xFF4B77BE; const int COLOR_YELLOW = 0xFFF7CA18; const int COLOR_RED_A = 0x1fF22613; const int COLOR_GREEN_A = 0x1f2ECC71; const int COLOR_BLUE_A = 0x1f4B77BE; const int COLOR_YELLOW_A = 0x1fF7CA18; /** @}*/ /** * \defgroup Materials * @{ */ /**************************************************************************** * MATERIALS LIST * * Note: If you add new materials here, don't forget to add physics * parameters to them in physics/materials.xml ***************************************************************************/ const int MATERIAL_DEFAULT = 0; const int MATERIAL_METAL = 1; //full steel const int MATERIAL_IRON = 2; //iron const int MATERIAL_GLASS = 3; //glass pane const int MATERIAL_PLASTIC = 4; //plastic object const int MATERIAL_LIQUID = 5; //liquids, water const int MATERIAL_SLIME = 6; //slime, oil etc const int MATERIAL_BETON = 7; //concrete const int MATERIAL_RUBBER = 8; //rubber, linoeum const int MATERIAL_FLESH = 9; //flesh, humanoids const int MATERIAL_GRASS = 10; //grass const int MATERIAL_WOOD = 11; //wood const int MATERIAL_SNOW = 12; //snow const int MATERIAL_SAND = 13; //soft sand const int MATERIAL_DIRT = 14; //super-soft dirt const int MATERIAL_GRAVEL = 15; //gravel const int MATERIAL_STONE = 16; //rocks, cliffs /** @}*/
302842.c
/* * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <assert.h> #include <stdbool.h> #include <arch.h> #include <arch_helpers.h> #include <common/debug.h> #include <common/interrupt_props.h> #include <drivers/arm/gic_common.h> #include <drivers/arm/gicv2.h> #include <lib/spinlock.h> #include "../common/gic_common_private.h" #include "gicv2_private.h" static const gicv2_driver_data_t *driver_data; /* * Spinlock to guard registers needing read-modify-write. APIs protected by this * spinlock are used either at boot time (when only a single CPU is active), or * when the system is fully coherent. */ static spinlock_t gic_lock; /******************************************************************************* * Enable secure interrupts and use FIQs to route them. Disable legacy bypass * and set the priority mask register to allow all interrupts to trickle in. ******************************************************************************/ void gicv2_cpuif_enable(void) { unsigned int val; assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); /* * Enable the Group 0 interrupts, FIQEn and disable Group 0/1 * bypass. */ val = CTLR_ENABLE_G0_BIT | FIQ_EN_BIT | FIQ_BYP_DIS_GRP0; val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1; /* Program the idle priority in the PMR */ gicc_write_pmr(driver_data->gicc_base, GIC_PRI_MASK); gicc_write_ctlr(driver_data->gicc_base, val); } /******************************************************************************* * Place the cpu interface in a state where it can never make a cpu exit wfi as * as result of an asserted interrupt. This is critical for powering down a cpu ******************************************************************************/ void gicv2_cpuif_disable(void) { unsigned int val; assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); /* Disable secure, non-secure interrupts and disable their bypass */ val = gicc_read_ctlr(driver_data->gicc_base); val &= ~(CTLR_ENABLE_G0_BIT | CTLR_ENABLE_G1_BIT); val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0; val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1; gicc_write_ctlr(driver_data->gicc_base, val); } /******************************************************************************* * Per cpu gic distributor setup which will be done by all cpus after a cold * boot/hotplug. This marks out the secure SPIs and PPIs & enables them. ******************************************************************************/ void gicv2_pcpu_distif_init(void) { unsigned int ctlr; assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); gicv2_secure_ppi_sgi_setup_props(driver_data->gicd_base, driver_data->interrupt_props, driver_data->interrupt_props_num); /* Enable G0 interrupts if not already */ ctlr = gicd_read_ctlr(driver_data->gicd_base); if ((ctlr & CTLR_ENABLE_G0_BIT) == 0U) { gicd_write_ctlr(driver_data->gicd_base, ctlr | CTLR_ENABLE_G0_BIT); } } /******************************************************************************* * Global gic distributor init which will be done by the primary cpu after a * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It * then enables the secure GIC distributor interface. ******************************************************************************/ void gicv2_distif_init(void) { unsigned int ctlr; assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); /* Disable the distributor before going further */ ctlr = gicd_read_ctlr(driver_data->gicd_base); gicd_write_ctlr(driver_data->gicd_base, ctlr & ~(CTLR_ENABLE_G0_BIT | CTLR_ENABLE_G1_BIT)); /* Set the default attribute of all SPIs */ gicv2_spis_configure_defaults(driver_data->gicd_base); gicv2_secure_spis_configure_props(driver_data->gicd_base, driver_data->interrupt_props, driver_data->interrupt_props_num); /* Re-enable the secure SPIs now that they have been configured */ gicd_write_ctlr(driver_data->gicd_base, ctlr | CTLR_ENABLE_G0_BIT); } /******************************************************************************* * Initialize the ARM GICv2 driver with the provided platform inputs ******************************************************************************/ void gicv2_driver_init(const gicv2_driver_data_t *plat_driver_data) { unsigned int gic_version; assert(plat_driver_data != NULL); assert(plat_driver_data->gicd_base != 0U); assert(plat_driver_data->gicc_base != 0U); assert(plat_driver_data->interrupt_props_num > 0 ? plat_driver_data->interrupt_props != NULL : 1); /* Ensure that this is a GICv2 system */ gic_version = gicd_read_pidr2(plat_driver_data->gicd_base); gic_version = (gic_version >> PIDR2_ARCH_REV_SHIFT) & PIDR2_ARCH_REV_MASK; /* * GICv1 with security extension complies with trusted firmware * GICv2 driver as far as virtualization and few tricky power * features are not used. GICv2 features that are not supported * by GICv1 with Security Extensions are: * - virtual interrupt support. * - wake up events. * - writeable GIC state register (for power sequences) * - interrupt priority drop. * - interrupt signal bypass. */ assert((gic_version == ARCH_REV_GICV2) || (gic_version == ARCH_REV_GICV1)); driver_data = plat_driver_data; /* * The GIC driver data is initialized by the primary CPU with caches * enabled. When the secondary CPU boots up, it initializes the * GICC/GICR interface with the caches disabled. Hence flush the * driver_data to ensure coherency. This is not required if the * platform has HW_ASSISTED_COHERENCY or WARMBOOT_ENABLE_DCACHE_EARLY * enabled. */ #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) flush_dcache_range((uintptr_t) &driver_data, sizeof(driver_data)); flush_dcache_range((uintptr_t) driver_data, sizeof(*driver_data)); #endif INFO("ARM GICv2 driver initialized\n"); } /****************************************************************************** * This function returns whether FIQ is enabled in the GIC CPU interface. *****************************************************************************/ unsigned int gicv2_is_fiq_enabled(void) { unsigned int gicc_ctlr; assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); gicc_ctlr = gicc_read_ctlr(driver_data->gicc_base); return (gicc_ctlr >> FIQ_EN_SHIFT) & 0x1U; } /******************************************************************************* * This function returns the type of the highest priority pending interrupt at * the GIC cpu interface. The return values can be one of the following : * PENDING_G1_INTID : The interrupt type is non secure Group 1. * 0 - 1019 : The interrupt type is secure Group 0. * GIC_SPURIOUS_INTERRUPT : there is no pending interrupt with * sufficient priority to be signaled ******************************************************************************/ unsigned int gicv2_get_pending_interrupt_type(void) { assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); return gicc_read_hppir(driver_data->gicc_base) & INT_ID_MASK; } /******************************************************************************* * This function returns the id of the highest priority pending interrupt at * the GIC cpu interface. GIC_SPURIOUS_INTERRUPT is returned when there is no * interrupt pending. ******************************************************************************/ unsigned int gicv2_get_pending_interrupt_id(void) { unsigned int id; assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); id = gicc_read_hppir(driver_data->gicc_base) & INT_ID_MASK; /* * Find out which non-secure interrupt it is under the assumption that * the GICC_CTLR.AckCtl bit is 0. */ if (id == PENDING_G1_INTID) id = gicc_read_ahppir(driver_data->gicc_base) & INT_ID_MASK; return id; } /******************************************************************************* * This functions reads the GIC cpu interface Interrupt Acknowledge register * to start handling the pending secure 0 interrupt. It returns the * contents of the IAR. ******************************************************************************/ unsigned int gicv2_acknowledge_interrupt(void) { assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); return gicc_read_IAR(driver_data->gicc_base); } /******************************************************************************* * This functions writes the GIC cpu interface End Of Interrupt register with * the passed value to finish handling the active secure group 0 interrupt. ******************************************************************************/ void gicv2_end_of_interrupt(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); gicc_write_EOIR(driver_data->gicc_base, id); } /******************************************************************************* * This function returns the type of the interrupt id depending upon the group * this interrupt has been configured under by the interrupt controller i.e. * group0 secure or group1 non secure. It returns zero for Group 0 secure and * one for Group 1 non secure interrupt. ******************************************************************************/ unsigned int gicv2_get_interrupt_group(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); return gicd_get_igroupr(driver_data->gicd_base, id); } /******************************************************************************* * This function returns the priority of the interrupt the processor is * currently servicing. ******************************************************************************/ unsigned int gicv2_get_running_priority(void) { assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); return gicc_read_rpr(driver_data->gicc_base); } /******************************************************************************* * This function sets the GICv2 target mask pattern for the current PE. The PE * target mask is used to translate linear PE index (returned by platform core * position) to a bit mask used when targeting interrupts to a PE (for example * when raising SGIs and routing SPIs). ******************************************************************************/ void gicv2_set_pe_target_mask(unsigned int proc_num) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert(driver_data->target_masks != NULL); assert((unsigned int)proc_num < GICV2_MAX_TARGET_PE); assert((unsigned int)proc_num < driver_data->target_masks_num); /* Return if the target mask is already populated */ if (driver_data->target_masks[proc_num] != 0U) return; /* * Update target register corresponding to this CPU and flush for it to * be visible to other CPUs. */ if (driver_data->target_masks[proc_num] == 0U) { driver_data->target_masks[proc_num] = gicv2_get_cpuif_id(driver_data->gicd_base); #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) /* * PEs only update their own masks. Primary updates it with * caches on. But because secondaries does it with caches off, * all updates go to memory directly, and there's no danger of * secondaries overwriting each others' mask, despite * target_masks[] not being cache line aligned. */ flush_dcache_range((uintptr_t) &driver_data->target_masks[proc_num], sizeof(driver_data->target_masks[proc_num])); #endif } } /******************************************************************************* * This function returns the active status of the interrupt (either because the * state is active, or active and pending). ******************************************************************************/ unsigned int gicv2_get_interrupt_active(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert(id <= MAX_SPI_ID); return gicd_get_isactiver(driver_data->gicd_base, id); } /******************************************************************************* * This function enables the interrupt identified by id. ******************************************************************************/ void gicv2_enable_interrupt(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert(id <= MAX_SPI_ID); /* * Ensure that any shared variable updates depending on out of band * interrupt trigger are observed before enabling interrupt. */ dsbishst(); gicd_set_isenabler(driver_data->gicd_base, id); } /******************************************************************************* * This function disables the interrupt identified by id. ******************************************************************************/ void gicv2_disable_interrupt(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert(id <= MAX_SPI_ID); /* * Disable interrupt, and ensure that any shared variable updates * depending on out of band interrupt trigger are observed afterwards. */ gicd_set_icenabler(driver_data->gicd_base, id); dsbishst(); } /******************************************************************************* * This function sets the interrupt priority as supplied for the given interrupt * id. ******************************************************************************/ void gicv2_set_interrupt_priority(unsigned int id, unsigned int priority) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert(id <= MAX_SPI_ID); gicd_set_ipriorityr(driver_data->gicd_base, id, priority); } /******************************************************************************* * This function assigns group for the interrupt identified by id. The group can * be any of GICV2_INTR_GROUP* ******************************************************************************/ void gicv2_set_interrupt_type(unsigned int id, unsigned int type) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert(id <= MAX_SPI_ID); /* Serialize read-modify-write to Distributor registers */ spin_lock(&gic_lock); switch (type) { case GICV2_INTR_GROUP1: gicd_set_igroupr(driver_data->gicd_base, id); break; case GICV2_INTR_GROUP0: gicd_clr_igroupr(driver_data->gicd_base, id); break; default: assert(false); break; } spin_unlock(&gic_lock); } /******************************************************************************* * This function raises the specified SGI to requested targets. * * The proc_num parameter must be the linear index of the target PE in the * system. ******************************************************************************/ void gicv2_raise_sgi(int sgi_num, int proc_num) { unsigned int sgir_val, target; assert(driver_data != NULL); assert((unsigned int)proc_num < GICV2_MAX_TARGET_PE); assert(driver_data->gicd_base != 0U); /* * Target masks array must have been supplied, and the core position * should be valid. */ assert(driver_data->target_masks != NULL); assert((unsigned int)proc_num < driver_data->target_masks_num); /* Don't raise SGI if the mask hasn't been populated */ target = driver_data->target_masks[proc_num]; assert(target != 0U); sgir_val = GICV2_SGIR_VALUE(SGIR_TGT_SPECIFIC, target, sgi_num); /* * Ensure that any shared variable updates depending on out of band * interrupt trigger are observed before raising SGI. */ dsbishst(); gicd_write_sgir(driver_data->gicd_base, sgir_val); } /******************************************************************************* * This function sets the interrupt routing for the given SPI interrupt id. * The interrupt routing is specified in routing mode. The proc_num parameter is * linear index of the PE to target SPI. When proc_num < 0, the SPI may target * all PEs. ******************************************************************************/ void gicv2_set_spi_routing(unsigned int id, int proc_num) { unsigned int target; assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); assert((id >= MIN_SPI_ID) && (id <= MAX_SPI_ID)); /* * Target masks array must have been supplied, and the core position * should be valid. */ assert(driver_data->target_masks != NULL); assert((unsigned int)proc_num < GICV2_MAX_TARGET_PE); assert((unsigned int)proc_num < driver_data->target_masks_num); if (proc_num < 0) { /* Target all PEs */ target = GIC_TARGET_CPU_MASK; } else { /* Don't route interrupt if the mask hasn't been populated */ target = driver_data->target_masks[proc_num]; assert(target != 0U); } gicd_set_itargetsr(driver_data->gicd_base, id, target); } /******************************************************************************* * This function clears the pending status of an interrupt identified by id. ******************************************************************************/ void gicv2_clear_interrupt_pending(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); /* SGIs can't be cleared pending */ assert(id >= MIN_PPI_ID); /* * Clear pending interrupt, and ensure that any shared variable updates * depending on out of band interrupt trigger are observed afterwards. */ gicd_set_icpendr(driver_data->gicd_base, id); dsbishst(); } /******************************************************************************* * This function sets the pending status of an interrupt identified by id. ******************************************************************************/ void gicv2_set_interrupt_pending(unsigned int id) { assert(driver_data != NULL); assert(driver_data->gicd_base != 0U); /* SGIs can't be cleared pending */ assert(id >= MIN_PPI_ID); /* * Ensure that any shared variable updates depending on out of band * interrupt trigger are observed before setting interrupt pending. */ dsbishst(); gicd_set_ispendr(driver_data->gicd_base, id); } /******************************************************************************* * This function sets the PMR register with the supplied value. Returns the * original PMR. ******************************************************************************/ unsigned int gicv2_set_pmr(unsigned int mask) { unsigned int old_mask; assert(driver_data != NULL); assert(driver_data->gicc_base != 0U); old_mask = gicc_read_pmr(driver_data->gicc_base); /* * Order memory updates w.r.t. PMR write, and ensure they're visible * before potential out of band interrupt trigger because of PMR update. */ dmbishst(); gicc_write_pmr(driver_data->gicc_base, mask); dsbishst(); return old_mask; } /******************************************************************************* * This function updates single interrupt configuration to be level/edge * triggered ******************************************************************************/ void gicv2_interrupt_set_cfg(unsigned int id, unsigned int cfg) { gicd_set_icfgr(driver_data->gicd_base, id, cfg); }
464683.c
/* * Copyright (C) 2015-2017 Alibaba Group Holding Limited */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "ali_crypto.h" #include "mbedtls/config.h" #include "mbedtls/net_sockets.h" #include "mbedtls/debug.h" #include "mbedtls/ssl.h" #include "mbedtls/error.h" #include "mbedtls/timing.h" #define DEBUG_LEVEL 1 #define LOCAL_TEST #if defined(LOCAL_TEST) #define SERVER_PORT "4433" #define SERVER_NAME "localhost" #define SERVER_ADDR "127.0.0.1" #define MESSAGE "Echo this" #define READ_TIMEOUT_MS 1000 #define MAX_RETRY 5 const char *dtls_test_ca_pem = "-----BEGIN CERTIFICATE-----\n" \ "MIIDhzCCAm+gAwIBAgIBADANBgkqhkiG9w0BAQUFADA7MQswCQYDVQQGEwJOTDER\n" \ "MA8GA1UEChMIUG9sYXJTU0wxGTAXBgNVBAMTEFBvbGFyU1NMIFRlc3QgQ0EwHhcN\n" \ "MTEwMjEyMTQ0NDAwWhcNMjEwMjEyMTQ0NDAwWjA7MQswCQYDVQQGEwJOTDERMA8G\n" \ "A1UEChMIUG9sYXJTU0wxGTAXBgNVBAMTEFBvbGFyU1NMIFRlc3QgQ0EwggEiMA0G\n" \ "CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDA3zf8F7vglp0/ht6WMn1EpRagzSHx\n" \ "mdTs6st8GFgIlKXsm8WL3xoemTiZhx57wI053zhdcHgH057Zk+i5clHFzqMwUqny\n" \ "50BwFMtEonILwuVA+T7lpg6z+exKY8C4KQB0nFc7qKUEkHHxvYPZP9al4jwqj+8n\n" \ "YMPGn8u67GB9t+aEMr5P+1gmIgNb1LTV+/Xjli5wwOQuvfwu7uJBVcA0Ln0kcmnL\n" \ "R7EUQIN9Z/SG9jGr8XmksrUuEvmEF/Bibyc+E1ixVA0hmnM3oTDPb5Lc9un8rNsu\n" \ "KNF+AksjoBXyOGVkCeoMbo4bF6BxyLObyavpw/LPh5aPgAIynplYb6LVAgMBAAGj\n" \ "gZUwgZIwDAYDVR0TBAUwAwEB/zAdBgNVHQ4EFgQUtFrkpbPe0lL2udWmlQ/rPrzH\n" \ "/f8wYwYDVR0jBFwwWoAUtFrkpbPe0lL2udWmlQ/rPrzH/f+hP6Q9MDsxCzAJBgNV\n" \ "BAYTAk5MMREwDwYDVQQKEwhQb2xhclNTTDEZMBcGA1UEAxMQUG9sYXJTU0wgVGVz\n" \ "dCBDQYIBADANBgkqhkiG9w0BAQUFAAOCAQEAuP1U2ABUkIslsCfdlc2i94QHHYeJ\n" \ "SsR4EdgHtdciUI5I62J6Mom+Y0dT/7a+8S6MVMCZP6C5NyNyXw1GWY/YR82XTJ8H\n" \ "DBJiCTok5DbZ6SzaONBzdWHXwWwmi5vg1dxn7YxrM9d0IjxM27WNKs4sDQhZBQkF\n" \ "pjmfs2cb4oPl4Y9T9meTx/lvdkRYEug61Jfn6cA+qHpyPYdTH+UshITnmp5/Ztkf\n" \ "m/UTSLBNFNHesiTZeH31NcxYGdHSme9Nc/gfidRa0FLOCfWxRlFqAI47zG9jAQCZ\n" \ "7Z2mCGDNMhjQc+BYcdnl0lPXjdDK6V0qCg1dVewhUBcW5gZKzV7e9+DpVA==\n" \ "-----END CERTIFICATE-----\n"; #endif /* LOCAL_TEST */ static int ssl_random(void *prng, unsigned char *output, size_t output_len) { struct timeval tv; (void)prng; gettimeofday(&tv, NULL); ali_seed((uint8_t *)&tv.tv_usec, sizeof(suseconds_t)); ali_rand_gen(output, output_len); return 0; } #if defined(MBEDTLS_DEBUG_C) static void ssl_debug(void *ctx, int level, const char *file, int line, const char *str) { (void)ctx; (void) level; printf("%s, line: %d: %s", file, line, str); return; } #endif int dtls_client_sample(void) { int ret, len; mbedtls_net_context server_fd; uint32_t flags; unsigned char buf[1024]; int retry_left = MAX_RETRY; mbedtls_ssl_context ssl; mbedtls_ssl_config conf; mbedtls_x509_crt cacert; mbedtls_timing_delay_context timer; #if defined(MBEDTLS_DEBUG_C) mbedtls_debug_set_threshold(DEBUG_LEVEL); #endif /* * 0. Initialize the session data */ mbedtls_net_init(&server_fd); mbedtls_ssl_init(&ssl); mbedtls_ssl_config_init(&conf); mbedtls_x509_crt_init(&cacert); /* * 0. Load certificates */ printf( " . Loading the CA root certificate ..." ); ret = mbedtls_x509_crt_parse(&cacert, (const unsigned char *)dtls_test_ca_pem, strlen(dtls_test_ca_pem) + 1); if (ret < 0) { printf(" failed\n ! mbedtls_x509_crt_parse returned -0x%x\n\n", -ret); goto exit; } printf( " ok (%d skipped)\n", ret ); /* * 1. Start the connection */ printf(" . Connecting to udp/%s/%s...", SERVER_NAME, SERVER_PORT); if ((ret = mbedtls_net_connect(&server_fd, SERVER_ADDR, SERVER_PORT, MBEDTLS_NET_PROTO_UDP)) != 0) { printf( " failed\n ! mbedtls_net_connect returned %d\n\n", ret ); goto exit; } printf(" ok\n"); /* * 2. Setup stuff */ printf(" . Setting up the DTLS structure..."); if ((ret = mbedtls_ssl_config_defaults(&conf, MBEDTLS_SSL_IS_CLIENT, MBEDTLS_SSL_TRANSPORT_DATAGRAM, MBEDTLS_SSL_PRESET_DEFAULT)) != 0) { printf(" failed\n ! mbedtls_ssl_config_defaults returned %d\n\n", ret); goto exit; } mbedtls_ssl_conf_authmode(&conf, MBEDTLS_SSL_VERIFY_REQUIRED); mbedtls_ssl_conf_ca_chain(&conf, &cacert, NULL); mbedtls_ssl_conf_rng(&conf, ssl_random, NULL); #if defined(MBEDTLS_DEBUG_C) mbedtls_ssl_conf_dbg(&conf, ssl_debug, NULL); #endif if ((ret = mbedtls_ssl_setup( &ssl, &conf)) != 0) { printf( " failed\n ! mbedtls_ssl_setup returned %d\n\n", ret ); goto exit; } if ((ret = mbedtls_ssl_set_hostname(&ssl, SERVER_NAME)) != 0) { printf( " failed\n ! mbedtls_ssl_set_hostname returned %d\n\n", ret ); goto exit; } mbedtls_ssl_set_bio(&ssl, &server_fd, mbedtls_net_send, mbedtls_net_recv, mbedtls_net_recv_timeout); mbedtls_ssl_set_timer_cb(&ssl, &timer, mbedtls_timing_set_delay, mbedtls_timing_get_delay); printf( " ok\n" ); /* * 4. Handshake */ printf( " . Performing the SSL/TLS handshake..." ); do ret = mbedtls_ssl_handshake( &ssl ); while( ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE ); if (ret != 0) { printf( " failed\n ! mbedtls_ssl_handshake returned -0x%x\n\n", -ret ); goto exit; } printf(" ok\n"); /* * 5. Verify the server certificate */ printf( " . Verifying peer X.509 certificate..." ); if ((flags = mbedtls_ssl_get_verify_result(&ssl)) != 0) { printf("verify result not confirmed - %d\n", flags); goto exit; } else { printf( " ok\n" ); } /* * 6. Write the echo request */ send_request: printf( " > Write to server:" ); len = sizeof( MESSAGE ) - 1; do ret = mbedtls_ssl_write( &ssl, (unsigned char *) MESSAGE, len ); while( ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE ); if( ret < 0 ) { printf( " failed\n ! mbedtls_ssl_write returned %d\n\n", ret ); goto exit; } len = ret; printf( " %d bytes written\n%s\n", len, MESSAGE ); /* * 7. Read the echo response */ printf( " < Read from server:" ); len = sizeof( buf ) - 1; memset( buf, 0, sizeof( buf ) ); do ret = mbedtls_ssl_read( &ssl, buf, len ); while( ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE ); if (ret <= 0) { switch(ret) { case MBEDTLS_ERR_SSL_TIMEOUT: printf( " timeout\n\n" ); if( retry_left-- > 0 ) goto send_request; goto exit; case MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY: printf( " connection was closed gracefully\n" ); ret = 0; goto close_notify; default: printf( " mbedtls_ssl_read returned -0x%x\n\n", -ret ); goto exit; } } len = ret; printf( " %d bytes read\n%s\n", len, buf ); /* * 8. Done, cleanly close the connection */ close_notify: printf( " . Closing the connection..." ); /* No error checking, the connection might be closed already */ do ret = mbedtls_ssl_close_notify( &ssl ); while( ret == MBEDTLS_ERR_SSL_WANT_WRITE ); ret = 0; printf( " done\n" ); /* * 9. Final clean-ups and exit */ exit: mbedtls_net_free(&server_fd); mbedtls_x509_crt_free(&cacert); mbedtls_ssl_free(&ssl); mbedtls_ssl_config_free(&conf); return ret; }
92982.c
/* See COPYRIGHT for copyright information. */ #include <inc/x86.h> #include <inc/memlayout.h> #include <inc/kbdreg.h> #include <inc/string.h> #include <inc/assert.h> #include <kern/console.h> #include <kern/picirq.h> static void cons_intr(int (*proc)(void)); static void cons_putc(int c); // Stupid I/O delay routine necessitated by historical PC design flaws static void delay(void) { inb(0x84); inb(0x84); inb(0x84); inb(0x84); } /***** Serial I/O code *****/ #define COM1 0x3F8 #define COM_RX 0 // In: Receive buffer (DLAB=0) #define COM_TX 0 // Out: Transmit buffer (DLAB=0) #define COM_DLL 0 // Out: Divisor Latch Low (DLAB=1) #define COM_DLM 1 // Out: Divisor Latch High (DLAB=1) #define COM_IER 1 // Out: Interrupt Enable Register #define COM_IER_RDI 0x01 // Enable receiver data interrupt #define COM_IIR 2 // In: Interrupt ID Register #define COM_FCR 2 // Out: FIFO Control Register #define COM_LCR 3 // Out: Line Control Register #define COM_LCR_DLAB 0x80 // Divisor latch access bit #define COM_LCR_WLEN8 0x03 // Wordlength: 8 bits #define COM_MCR 4 // Out: Modem Control Register #define COM_MCR_RTS 0x02 // RTS complement #define COM_MCR_DTR 0x01 // DTR complement #define COM_MCR_OUT2 0x08 // Out2 complement #define COM_LSR 5 // In: Line Status Register #define COM_LSR_DATA 0x01 // Data available #define COM_LSR_TXRDY 0x20 // Transmit buffer avail #define COM_LSR_TSRE 0x40 // Transmitter off static bool serial_exists; static int serial_proc_data(void) { if (!(inb(COM1+COM_LSR) & COM_LSR_DATA)) return -1; return inb(COM1+COM_RX); } void serial_intr(void) { if (serial_exists) cons_intr(serial_proc_data); } static void serial_putc(int c) { int i; for (i = 0; !(inb(COM1 + COM_LSR) & COM_LSR_TXRDY) && i < 12800; i++) delay(); outb(COM1 + COM_TX, c); } static void serial_init(void) { // Turn off the FIFO outb(COM1+COM_FCR, 0); // Set speed; requires DLAB latch outb(COM1+COM_LCR, COM_LCR_DLAB); outb(COM1+COM_DLL, (uint8_t) (115200 / 9600)); outb(COM1+COM_DLM, 0); // 8 data bits, 1 stop bit, parity off; turn off DLAB latch outb(COM1+COM_LCR, COM_LCR_WLEN8 & ~COM_LCR_DLAB); // No modem controls outb(COM1+COM_MCR, 0); // Enable rcv interrupts outb(COM1+COM_IER, COM_IER_RDI); // Clear any preexisting overrun indications and interrupts // Serial port doesn't exist if COM_LSR returns 0xFF serial_exists = (inb(COM1+COM_LSR) != 0xFF); (void) inb(COM1+COM_IIR); (void) inb(COM1+COM_RX); } /***** Parallel port output code *****/ // For information on PC parallel port programming, see the class References // page. static void lpt_putc(int c) { int i; for (i = 0; !(inb(0x378+1) & 0x80) && i < 12800; i++) delay(); outb(0x378+0, c); outb(0x378+2, 0x08|0x04|0x01); outb(0x378+2, 0x08); } /***** Text-mode CGA/VGA display output *****/ static unsigned addr_6845; static uint16_t *crt_buf; static uint16_t crt_pos; static void cga_init(void) { volatile uint16_t *cp; uint16_t was; unsigned pos; cp = (uint16_t*) (KERNBASE + CGA_BUF); was = *cp; *cp = (uint16_t) 0xA55A; if (*cp != 0xA55A) { cp = (uint16_t*) (KERNBASE + MONO_BUF); addr_6845 = MONO_BASE; } else { *cp = was; addr_6845 = CGA_BASE; } /* Extract cursor location */ outb(addr_6845, 14); pos = inb(addr_6845 + 1) << 8; outb(addr_6845, 15); pos |= inb(addr_6845 + 1); crt_buf = (uint16_t*) cp; crt_pos = pos; } static void cga_putc(int c) { // if no attribute given, then use black on white if (!(c & ~0xFF)) c |= 0x0700; switch (c & 0xff) { case '\b': if (crt_pos > 0) { crt_pos--; crt_buf[crt_pos] = (c & ~0xff) | ' '; } break; case '\n': crt_pos += CRT_COLS; /* fallthru */ case '\r': crt_pos -= (crt_pos % CRT_COLS); break; case '\t': cons_putc(' '); cons_putc(' '); cons_putc(' '); cons_putc(' '); cons_putc(' '); break; default: crt_buf[crt_pos++] = c; /* write the character */ break; } // What is the purpose of this? if (crt_pos >= CRT_SIZE) { int i; memmove(crt_buf, crt_buf + CRT_COLS, (CRT_SIZE - CRT_COLS) * sizeof(uint16_t)); for (i = CRT_SIZE - CRT_COLS; i < CRT_SIZE; i++) crt_buf[i] = 0x0700 | ' '; crt_pos -= CRT_COLS; } /* move that little blinky thing */ outb(addr_6845, 14); outb(addr_6845 + 1, crt_pos >> 8); outb(addr_6845, 15); outb(addr_6845 + 1, crt_pos); } /***** Keyboard input code *****/ #define NO 0 #define SHIFT (1<<0) #define CTL (1<<1) #define ALT (1<<2) #define CAPSLOCK (1<<3) #define NUMLOCK (1<<4) #define SCROLLLOCK (1<<5) #define E0ESC (1<<6) static uint8_t shiftcode[256] = { [0x1D] = CTL, [0x2A] = SHIFT, [0x36] = SHIFT, [0x38] = ALT, [0x9D] = CTL, [0xB8] = ALT }; static uint8_t togglecode[256] = { [0x3A] = CAPSLOCK, [0x45] = NUMLOCK, [0x46] = SCROLLLOCK }; static uint8_t normalmap[256] = { NO, 0x1B, '1', '2', '3', '4', '5', '6', // 0x00 '7', '8', '9', '0', '-', '=', '\b', '\t', 'q', 'w', 'e', 'r', 't', 'y', 'u', 'i', // 0x10 'o', 'p', '[', ']', '\n', NO, 'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', // 0x20 '\'', '`', NO, '\\', 'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/', NO, '*', // 0x30 NO, ' ', NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, '7', // 0x40 '8', '9', '-', '4', '5', '6', '+', '1', '2', '3', '0', '.', NO, NO, NO, NO, // 0x50 [0xC7] = KEY_HOME, [0x9C] = '\n' /*KP_Enter*/, [0xB5] = '/' /*KP_Div*/, [0xC8] = KEY_UP, [0xC9] = KEY_PGUP, [0xCB] = KEY_LF, [0xCD] = KEY_RT, [0xCF] = KEY_END, [0xD0] = KEY_DN, [0xD1] = KEY_PGDN, [0xD2] = KEY_INS, [0xD3] = KEY_DEL }; static uint8_t shiftmap[256] = { NO, 033, '!', '@', '#', '$', '%', '^', // 0x00 '&', '*', '(', ')', '_', '+', '\b', '\t', 'Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', // 0x10 'O', 'P', '{', '}', '\n', NO, 'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', ':', // 0x20 '"', '~', NO, '|', 'Z', 'X', 'C', 'V', 'B', 'N', 'M', '<', '>', '?', NO, '*', // 0x30 NO, ' ', NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, '7', // 0x40 '8', '9', '-', '4', '5', '6', '+', '1', '2', '3', '0', '.', NO, NO, NO, NO, // 0x50 [0xC7] = KEY_HOME, [0x9C] = '\n' /*KP_Enter*/, [0xB5] = '/' /*KP_Div*/, [0xC8] = KEY_UP, [0xC9] = KEY_PGUP, [0xCB] = KEY_LF, [0xCD] = KEY_RT, [0xCF] = KEY_END, [0xD0] = KEY_DN, [0xD1] = KEY_PGDN, [0xD2] = KEY_INS, [0xD3] = KEY_DEL }; #define C(x) (x - '@') static uint8_t ctlmap[256] = { NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, C('Q'), C('W'), C('E'), C('R'), C('T'), C('Y'), C('U'), C('I'), C('O'), C('P'), NO, NO, '\r', NO, C('A'), C('S'), C('D'), C('F'), C('G'), C('H'), C('J'), C('K'), C('L'), NO, NO, NO, NO, C('\\'), C('Z'), C('X'), C('C'), C('V'), C('B'), C('N'), C('M'), NO, NO, C('/'), NO, NO, [0x97] = KEY_HOME, [0xB5] = C('/'), [0xC8] = KEY_UP, [0xC9] = KEY_PGUP, [0xCB] = KEY_LF, [0xCD] = KEY_RT, [0xCF] = KEY_END, [0xD0] = KEY_DN, [0xD1] = KEY_PGDN, [0xD2] = KEY_INS, [0xD3] = KEY_DEL }; static uint8_t *charcode[4] = { normalmap, shiftmap, ctlmap, ctlmap }; /* * Get data from the keyboard. If we finish a character, return it. Else 0. * Return -1 if no data. */ static int kbd_proc_data(void) { int c; uint8_t data; static uint32_t shift; if ((inb(KBSTATP) & KBS_DIB) == 0) return -1; data = inb(KBDATAP); if (data == 0xE0) { // E0 escape character shift |= E0ESC; return 0; } else if (data & 0x80) { // Key released data = (shift & E0ESC ? data : data & 0x7F); shift &= ~(shiftcode[data] | E0ESC); return 0; } else if (shift & E0ESC) { // Last character was an E0 escape; or with 0x80 data |= 0x80; shift &= ~E0ESC; } shift |= shiftcode[data]; shift ^= togglecode[data]; c = charcode[shift & (CTL | SHIFT)][data]; if (shift & CAPSLOCK) { if ('a' <= c && c <= 'z') c += 'A' - 'a'; else if ('A' <= c && c <= 'Z') c += 'a' - 'A'; } // Process special keys // Ctrl-Alt-Del: reboot if (!(~shift & (CTL | ALT)) && c == KEY_DEL) { cprintf("Rebooting!\n"); outb(0x92, 0x3); // courtesy of Chris Frost } return c; } void kbd_intr(void) { cons_intr(kbd_proc_data); } static void kbd_init(void) { // Drain the kbd buffer so that QEMU generates interrupts. kbd_intr(); irq_setmask_8259A(irq_mask_8259A & ~(1<<1)); } /***** General device-independent console code *****/ // Here we manage the console input buffer, // where we stash characters received from the keyboard or serial port // whenever the corresponding interrupt occurs. #define CONSBUFSIZE 512 static struct { uint8_t buf[CONSBUFSIZE]; uint32_t rpos; uint32_t wpos; } cons; // called by device interrupt routines to feed input characters // into the circular console input buffer. static void cons_intr(int (*proc)(void)) { int c; while ((c = (*proc)()) != -1) { if (c == 0) continue; cons.buf[cons.wpos++] = c; if (cons.wpos == CONSBUFSIZE) cons.wpos = 0; } } // return the next input character from the console, or 0 if none waiting int cons_getc(void) { int c; // poll for any pending input characters, // so that this function works even when interrupts are disabled // (e.g., when called from the kernel monitor). serial_intr(); kbd_intr(); // grab the next character from the input buffer. if (cons.rpos != cons.wpos) { c = cons.buf[cons.rpos++]; if (cons.rpos == CONSBUFSIZE) cons.rpos = 0; return c; } return 0; } // output a character to the console static void cons_putc(int c) { serial_putc(c); lpt_putc(c); cga_putc(c); } // initialize the console devices void cons_init(void) { cga_init(); kbd_init(); serial_init(); if (!serial_exists) cprintf("Serial port does not exist!\n"); } // `High'-level console I/O. Used by readline and cprintf. void cputchar(int c) { cons_putc(c); } int getchar(void) { int c; while ((c = cons_getc()) == 0) /* do nothing */; return c; } int iscons(int fdnum) { // used by readline return 1; }
819440.c
// REQUIRES: aarch64-registered-target || arm-registered-target // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -ffreestanding -fsyntax-only -verify %s // RUN: %clang_cc1 -triple arm64-linux-gnu -target-feature +neon -ffreestanding -fsyntax-only -verify %s #include <arm_neon.h> void test_vext_8bit(int8x8_t small, int8x16_t big) { vext_s8(small, small, 7); vext_u8(small, small, 7); vext_p8(small, small, 7); vextq_s8(big, big, 15); vextq_u8(big, big, 15); vextq_p8(big, big, 15); vext_s8(small, small, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vext_u8(small, small, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vext_p8(small, small, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vextq_s8(big, big, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vextq_u8(big, big, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vextq_p8(big, big, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} } void test_mul_lane_f64(float64x1_t small, float64x2_t big, float64x2_t rhs) { vmul_lane_f64(small, small, 0); vmul_laneq_f64(small, big, 1); vmulq_lane_f64(big, small, 0); vmulq_laneq_f64(big, big, 1); vfma_lane_f64(small, small, small, 0); vfma_laneq_f64(small, small, big, 1); vfmaq_lane_f64(big, big, small, 0); vfmaq_laneq_f64(big, big, big, 1); vmul_lane_f64(small, small, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vmul_laneq_f64(small, big, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vfma_lane_f64(small, small, small, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vfma_laneq_f64(small, small, big, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vfmaq_laneq_f64(big, big, big, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} } void test_ld1st1(int8x8_t small, int8x16_t big, void *addr) { vld1_lane_s8(addr, small, 7); vld1_lane_s16(addr, small, 3); vld1_lane_s32(addr, small, 1); vld1_lane_s64(addr, small, 0); vld1q_lane_s8(addr, big, 15); vld1q_lane_s16(addr, big, 7); vld1q_lane_s32(addr, big, 3); vld1q_lane_s64(addr, big, 1); vld1_lane_s8(addr, small, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1_lane_s16(addr, small, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1_lane_s32(addr, small, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1_lane_s64(addr, small, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1q_lane_s8(addr, big, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1q_lane_s16(addr, big, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1q_lane_s32(addr, big, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld1q_lane_s64(addr, big, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1_lane_s8(addr, small, 7); vst1_lane_s16(addr, small, 3); vst1_lane_s32(addr, small, 1); vst1_lane_s64(addr, small, 0); vst1q_lane_s8(addr, big, 15); vst1q_lane_s16(addr, big, 7); vst1q_lane_s32(addr, big, 3); vst1q_lane_s64(addr, big, 1); vst1_lane_s8(addr, small, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1_lane_s16(addr, small, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1_lane_s32(addr, small, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1_lane_s64(addr, small, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1q_lane_s8(addr, big, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1q_lane_s16(addr, big, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1q_lane_s32(addr, big, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst1q_lane_s64(addr, big, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} } void test_ld2st2(int8x8x2_t small8, int8x16x2_t big8, int16x4x2_t small16, int16x8x2_t big16, int32x2x2_t small32, int32x4x2_t big32, int64x1x2_t small64, int64x2x2_t big64, void *addr) { vld2_lane_s8(addr, small8, 7); vld2_lane_s16(addr, small16, 3); vld2_lane_s32(addr, small32, 1); vld2_lane_s64(addr, small64, 0); vld2q_lane_s8(addr, big8, 15); vld2q_lane_s16(addr, big16, 7); vld2q_lane_s32(addr, big32, 3); vld2q_lane_s64(addr, big64, 1); vld2_lane_s8(addr, small8, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2_lane_s16(addr, small16, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2_lane_s32(addr, small32, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2_lane_s64(addr, small64, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2q_lane_s8(addr, big8, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2q_lane_s16(addr, big16, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2q_lane_s32(addr, big32, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld2q_lane_s64(addr, big64, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2_lane_s8(addr, small8, 7); vst2_lane_s16(addr, small16, 3); vst2_lane_s32(addr, small32, 1); vst2_lane_s64(addr, small64, 0); vst2q_lane_s8(addr, big8, 15); vst2q_lane_s16(addr, big16, 7); vst2q_lane_s32(addr, big32, 3); vst2q_lane_s64(addr, big64, 1); vst2_lane_s8(addr, small8, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2_lane_s16(addr, small16, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2_lane_s32(addr, small32, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2_lane_s64(addr, small64, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2q_lane_s8(addr, big8, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2q_lane_s16(addr, big16, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2q_lane_s32(addr, big32, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst2q_lane_s64(addr, big64, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} } void test_ld3st3(int8x8x3_t small8, int8x16x3_t big8, int16x4x3_t small16, int16x8x3_t big16, int32x2x3_t small32, int32x4x3_t big32, int64x1x3_t small64, int64x2x3_t big64, void *addr) { vld3_lane_s8(addr, small8, 7); vld3_lane_s16(addr, small16, 3); vld3_lane_s32(addr, small32, 1); vld3_lane_s64(addr, small64, 0); vld3q_lane_s8(addr, big8, 15); vld3q_lane_s16(addr, big16, 7); vld3q_lane_s32(addr, big32, 3); vld3q_lane_s64(addr, big64, 1); vld3_lane_s8(addr, small8, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3_lane_s16(addr, small16, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3_lane_s32(addr, small32, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3_lane_s64(addr, small64, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3q_lane_s8(addr, big8, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3q_lane_s16(addr, big16, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3q_lane_s32(addr, big32, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld3q_lane_s64(addr, big64, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3_lane_s8(addr, small8, 7); vst3_lane_s16(addr, small16, 3); vst3_lane_s32(addr, small32, 1); vst3_lane_s64(addr, small64, 0); vst3q_lane_s8(addr, big8, 15); vst3q_lane_s16(addr, big16, 7); vst3q_lane_s32(addr, big32, 3); vst3q_lane_s64(addr, big64, 1); vst3_lane_s8(addr, small8, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3_lane_s16(addr, small16, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3_lane_s32(addr, small32, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3_lane_s64(addr, small64, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3q_lane_s8(addr, big8, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3q_lane_s16(addr, big16, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3q_lane_s32(addr, big32, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst3q_lane_s64(addr, big64, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} } void test_ld4st4(int8x8x4_t small8, int8x16x4_t big8, int16x4x4_t small16, int16x8x4_t big16, int32x2x4_t small32, int32x4x4_t big32, int64x1x4_t small64, int64x2x4_t big64, void *addr) { vld4_lane_s8(addr, small8, 7); vld4_lane_s16(addr, small16, 3); vld4_lane_s32(addr, small32, 1); vld4_lane_s64(addr, small64, 0); vld4q_lane_s8(addr, big8, 15); vld4q_lane_s16(addr, big16, 7); vld4q_lane_s32(addr, big32, 3); vld4q_lane_s64(addr, big64, 1); vld4_lane_s8(addr, small8, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4_lane_s16(addr, small16, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4_lane_s32(addr, small32, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4_lane_s64(addr, small64, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4q_lane_s8(addr, big8, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4q_lane_s16(addr, big16, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4q_lane_s32(addr, big32, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vld4q_lane_s64(addr, big64, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4_lane_s8(addr, small8, 7); vst4_lane_s16(addr, small16, 3); vst4_lane_s32(addr, small32, 1); vst4_lane_s64(addr, small64, 0); vst4q_lane_s8(addr, big8, 15); vst4q_lane_s16(addr, big16, 7); vst4q_lane_s32(addr, big32, 3); vst4q_lane_s64(addr, big64, 1); vst4_lane_s8(addr, small8, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4_lane_s16(addr, small16, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4_lane_s32(addr, small32, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4_lane_s64(addr, small64, 1); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4q_lane_s8(addr, big8, 16); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4q_lane_s16(addr, big16, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4q_lane_s32(addr, big32, 4); // expected-error-re {{argument value {{.*}} is outside the valid range}} vst4q_lane_s64(addr, big64, 2); // expected-error-re {{argument value {{.*}} is outside the valid range}} }
283733.c
// SPDX-License-Identifier: GPL-2.0 // // ALSA SoC CX20721/CX20723 codec driver // // Copyright: (C) 2017 Conexant Systems, Inc. // Author: Simon Ho, <[email protected]> // // TODO: add support for TDM mode. // #include <linux/acpi.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include "cx2072x.h" #define PLL_OUT_HZ_48 (1024 * 3 * 48000) #define BITS_PER_SLOT 8 /* codec private data */ struct cx2072x_priv { struct regmap *regmap; struct clk *mclk; unsigned int mclk_rate; struct device *dev; struct snd_soc_component *codec; struct snd_soc_jack_gpio jack_gpio; struct mutex lock; unsigned int bclk_ratio; bool pll_changed; bool i2spcm_changed; int sample_size; int frame_size; int sample_rate; unsigned int dai_fmt; bool en_aec_ref; }; /* * DAC/ADC Volume * * max : 74 : 0 dB * ( in 1 dB step ) * min : 0 : -74 dB */ static const DECLARE_TLV_DB_SCALE(adc_tlv, -7400, 100, 0); static const DECLARE_TLV_DB_SCALE(dac_tlv, -7400, 100, 0); static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 1200, 0); struct cx2072x_eq_ctrl { u8 ch; u8 band; }; static const DECLARE_TLV_DB_RANGE(hpf_tlv, 0, 0, TLV_DB_SCALE_ITEM(120, 0, 0), 1, 63, TLV_DB_SCALE_ITEM(30, 30, 0) ); /* Lookup table for PRE_DIV */ static const struct { unsigned int mclk; unsigned int div; } mclk_pre_div[] = { { 6144000, 1 }, { 12288000, 2 }, { 19200000, 3 }, { 26000000, 4 }, { 28224000, 5 }, { 36864000, 6 }, { 36864000, 7 }, { 48000000, 8 }, { 49152000, 8 }, }; /* * cx2072x register cache. */ static const struct reg_default cx2072x_reg_defaults[] = { { CX2072X_AFG_POWER_STATE, 0x00000003 }, { CX2072X_UM_RESPONSE, 0x00000000 }, { CX2072X_GPIO_DATA, 0x00000000 }, { CX2072X_GPIO_ENABLE, 0x00000000 }, { CX2072X_GPIO_DIRECTION, 0x00000000 }, { CX2072X_GPIO_WAKE, 0x00000000 }, { CX2072X_GPIO_UM_ENABLE, 0x00000000 }, { CX2072X_GPIO_STICKY_MASK, 0x00000000 }, { CX2072X_DAC1_CONVERTER_FORMAT, 0x00000031 }, { CX2072X_DAC1_AMP_GAIN_RIGHT, 0x0000004a }, { CX2072X_DAC1_AMP_GAIN_LEFT, 0x0000004a }, { CX2072X_DAC1_POWER_STATE, 0x00000433 }, { CX2072X_DAC1_CONVERTER_STREAM_CHANNEL, 0x00000000 }, { CX2072X_DAC1_EAPD_ENABLE, 0x00000000 }, { CX2072X_DAC2_CONVERTER_FORMAT, 0x00000031 }, { CX2072X_DAC2_AMP_GAIN_RIGHT, 0x0000004a }, { CX2072X_DAC2_AMP_GAIN_LEFT, 0x0000004a }, { CX2072X_DAC2_POWER_STATE, 0x00000433 }, { CX2072X_DAC2_CONVERTER_STREAM_CHANNEL, 0x00000000 }, { CX2072X_ADC1_CONVERTER_FORMAT, 0x00000031 }, { CX2072X_ADC1_AMP_GAIN_RIGHT_0, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_0, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_RIGHT_1, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_1, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_RIGHT_2, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_2, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_RIGHT_3, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_3, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_RIGHT_4, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_4, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_RIGHT_5, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_5, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_RIGHT_6, 0x0000004a }, { CX2072X_ADC1_AMP_GAIN_LEFT_6, 0x0000004a }, { CX2072X_ADC1_CONNECTION_SELECT_CONTROL, 0x00000000 }, { CX2072X_ADC1_POWER_STATE, 0x00000433 }, { CX2072X_ADC1_CONVERTER_STREAM_CHANNEL, 0x00000000 }, { CX2072X_ADC2_CONVERTER_FORMAT, 0x00000031 }, { CX2072X_ADC2_AMP_GAIN_RIGHT_0, 0x0000004a }, { CX2072X_ADC2_AMP_GAIN_LEFT_0, 0x0000004a }, { CX2072X_ADC2_AMP_GAIN_RIGHT_1, 0x0000004a }, { CX2072X_ADC2_AMP_GAIN_LEFT_1, 0x0000004a }, { CX2072X_ADC2_AMP_GAIN_RIGHT_2, 0x0000004a }, { CX2072X_ADC2_AMP_GAIN_LEFT_2, 0x0000004a }, { CX2072X_ADC2_CONNECTION_SELECT_CONTROL, 0x00000000 }, { CX2072X_ADC2_POWER_STATE, 0x00000433 }, { CX2072X_ADC2_CONVERTER_STREAM_CHANNEL, 0x00000000 }, { CX2072X_PORTA_CONNECTION_SELECT_CTRL, 0x00000000 }, { CX2072X_PORTA_POWER_STATE, 0x00000433 }, { CX2072X_PORTA_PIN_CTRL, 0x000000c0 }, { CX2072X_PORTA_UNSOLICITED_RESPONSE, 0x00000000 }, { CX2072X_PORTA_PIN_SENSE, 0x00000000 }, { CX2072X_PORTA_EAPD_BTL, 0x00000002 }, { CX2072X_PORTB_POWER_STATE, 0x00000433 }, { CX2072X_PORTB_PIN_CTRL, 0x00000000 }, { CX2072X_PORTB_UNSOLICITED_RESPONSE, 0x00000000 }, { CX2072X_PORTB_PIN_SENSE, 0x00000000 }, { CX2072X_PORTB_EAPD_BTL, 0x00000002 }, { CX2072X_PORTB_GAIN_RIGHT, 0x00000000 }, { CX2072X_PORTB_GAIN_LEFT, 0x00000000 }, { CX2072X_PORTC_POWER_STATE, 0x00000433 }, { CX2072X_PORTC_PIN_CTRL, 0x00000000 }, { CX2072X_PORTC_GAIN_RIGHT, 0x00000000 }, { CX2072X_PORTC_GAIN_LEFT, 0x00000000 }, { CX2072X_PORTD_POWER_STATE, 0x00000433 }, { CX2072X_PORTD_PIN_CTRL, 0x00000020 }, { CX2072X_PORTD_UNSOLICITED_RESPONSE, 0x00000000 }, { CX2072X_PORTD_PIN_SENSE, 0x00000000 }, { CX2072X_PORTD_GAIN_RIGHT, 0x00000000 }, { CX2072X_PORTD_GAIN_LEFT, 0x00000000 }, { CX2072X_PORTE_CONNECTION_SELECT_CTRL, 0x00000000 }, { CX2072X_PORTE_POWER_STATE, 0x00000433 }, { CX2072X_PORTE_PIN_CTRL, 0x00000040 }, { CX2072X_PORTE_UNSOLICITED_RESPONSE, 0x00000000 }, { CX2072X_PORTE_PIN_SENSE, 0x00000000 }, { CX2072X_PORTE_EAPD_BTL, 0x00000002 }, { CX2072X_PORTE_GAIN_RIGHT, 0x00000000 }, { CX2072X_PORTE_GAIN_LEFT, 0x00000000 }, { CX2072X_PORTF_POWER_STATE, 0x00000433 }, { CX2072X_PORTF_PIN_CTRL, 0x00000000 }, { CX2072X_PORTF_UNSOLICITED_RESPONSE, 0x00000000 }, { CX2072X_PORTF_PIN_SENSE, 0x00000000 }, { CX2072X_PORTF_GAIN_RIGHT, 0x00000000 }, { CX2072X_PORTF_GAIN_LEFT, 0x00000000 }, { CX2072X_PORTG_POWER_STATE, 0x00000433 }, { CX2072X_PORTG_PIN_CTRL, 0x00000040 }, { CX2072X_PORTG_CONNECTION_SELECT_CTRL, 0x00000000 }, { CX2072X_PORTG_EAPD_BTL, 0x00000002 }, { CX2072X_PORTM_POWER_STATE, 0x00000433 }, { CX2072X_PORTM_PIN_CTRL, 0x00000000 }, { CX2072X_PORTM_CONNECTION_SELECT_CTRL, 0x00000000 }, { CX2072X_PORTM_EAPD_BTL, 0x00000002 }, { CX2072X_MIXER_POWER_STATE, 0x00000433 }, { CX2072X_MIXER_GAIN_RIGHT_0, 0x0000004a }, { CX2072X_MIXER_GAIN_LEFT_0, 0x0000004a }, { CX2072X_MIXER_GAIN_RIGHT_1, 0x0000004a }, { CX2072X_MIXER_GAIN_LEFT_1, 0x0000004a }, { CX2072X_SPKR_DRC_ENABLE_STEP, 0x040065a4 }, { CX2072X_SPKR_DRC_CONTROL, 0x007b0024 }, { CX2072X_SPKR_DRC_TEST, 0x00000000 }, { CX2072X_DIGITAL_BIOS_TEST0, 0x001f008a }, { CX2072X_DIGITAL_BIOS_TEST2, 0x00990026 }, { CX2072X_I2SPCM_CONTROL1, 0x00010001 }, { CX2072X_I2SPCM_CONTROL2, 0x00000000 }, { CX2072X_I2SPCM_CONTROL3, 0x00000000 }, { CX2072X_I2SPCM_CONTROL4, 0x00000000 }, { CX2072X_I2SPCM_CONTROL5, 0x00000000 }, { CX2072X_I2SPCM_CONTROL6, 0x00000000 }, { CX2072X_UM_INTERRUPT_CRTL_E, 0x00000000 }, { CX2072X_CODEC_TEST2, 0x00000000 }, { CX2072X_CODEC_TEST9, 0x00000004 }, { CX2072X_CODEC_TEST20, 0x00000600 }, { CX2072X_CODEC_TEST26, 0x00000208 }, { CX2072X_ANALOG_TEST4, 0x00000000 }, { CX2072X_ANALOG_TEST5, 0x00000000 }, { CX2072X_ANALOG_TEST6, 0x0000059a }, { CX2072X_ANALOG_TEST7, 0x000000a7 }, { CX2072X_ANALOG_TEST8, 0x00000017 }, { CX2072X_ANALOG_TEST9, 0x00000000 }, { CX2072X_ANALOG_TEST10, 0x00000285 }, { CX2072X_ANALOG_TEST11, 0x00000000 }, { CX2072X_ANALOG_TEST12, 0x00000000 }, { CX2072X_ANALOG_TEST13, 0x00000000 }, { CX2072X_DIGITAL_TEST1, 0x00000242 }, { CX2072X_DIGITAL_TEST11, 0x00000000 }, { CX2072X_DIGITAL_TEST12, 0x00000084 }, { CX2072X_DIGITAL_TEST15, 0x00000077 }, { CX2072X_DIGITAL_TEST16, 0x00000021 }, { CX2072X_DIGITAL_TEST17, 0x00000018 }, { CX2072X_DIGITAL_TEST18, 0x00000024 }, { CX2072X_DIGITAL_TEST19, 0x00000001 }, { CX2072X_DIGITAL_TEST20, 0x00000002 }, }; /* * register initialization */ static const struct reg_sequence cx2072x_reg_init[] = { { CX2072X_ANALOG_TEST9, 0x080 }, /* DC offset Calibration */ { CX2072X_CODEC_TEST26, 0x65f }, /* Disable the PA */ { CX2072X_ANALOG_TEST10, 0x289 }, /* Set the speaker output gain */ { CX2072X_CODEC_TEST20, 0xf05 }, { CX2072X_CODEC_TESTXX, 0x380 }, { CX2072X_CODEC_TEST26, 0xb90 }, { CX2072X_CODEC_TEST9, 0x001 }, /* Enable 30 Hz High pass filter */ { CX2072X_ANALOG_TEST3, 0x300 }, /* Disable PCBEEP pad */ { CX2072X_CODEC_TEST24, 0x100 }, /* Disable SnM mode */ { CX2072X_PORTD_PIN_CTRL, 0x020 }, /* Enable PortD input */ { CX2072X_GPIO_ENABLE, 0x040 }, /* Enable GPIO7 pin for button */ { CX2072X_GPIO_UM_ENABLE, 0x040 }, /* Enable UM for GPIO7 */ { CX2072X_UM_RESPONSE, 0x080 }, /* Enable button response */ { CX2072X_DIGITAL_TEST12, 0x0c4 }, /* Enable headset button */ { CX2072X_DIGITAL_TEST0, 0x415 }, /* Power down class-D during idle */ { CX2072X_I2SPCM_CONTROL2, 0x00f }, /* Enable I2S TX */ { CX2072X_I2SPCM_CONTROL3, 0x00f }, /* Enable I2S RX */ }; static unsigned int cx2072x_register_size(unsigned int reg) { switch (reg) { case CX2072X_VENDOR_ID: case CX2072X_REVISION_ID: case CX2072X_PORTA_PIN_SENSE: case CX2072X_PORTB_PIN_SENSE: case CX2072X_PORTD_PIN_SENSE: case CX2072X_PORTE_PIN_SENSE: case CX2072X_PORTF_PIN_SENSE: case CX2072X_I2SPCM_CONTROL1: case CX2072X_I2SPCM_CONTROL2: case CX2072X_I2SPCM_CONTROL3: case CX2072X_I2SPCM_CONTROL4: case CX2072X_I2SPCM_CONTROL5: case CX2072X_I2SPCM_CONTROL6: case CX2072X_UM_INTERRUPT_CRTL_E: case CX2072X_EQ_G_COEFF: case CX2072X_SPKR_DRC_CONTROL: case CX2072X_SPKR_DRC_TEST: case CX2072X_DIGITAL_BIOS_TEST0: case CX2072X_DIGITAL_BIOS_TEST2: return 4; case CX2072X_EQ_ENABLE_BYPASS: case CX2072X_EQ_B0_COEFF: case CX2072X_EQ_B1_COEFF: case CX2072X_EQ_B2_COEFF: case CX2072X_EQ_A1_COEFF: case CX2072X_EQ_A2_COEFF: case CX2072X_DAC1_CONVERTER_FORMAT: case CX2072X_DAC2_CONVERTER_FORMAT: case CX2072X_ADC1_CONVERTER_FORMAT: case CX2072X_ADC2_CONVERTER_FORMAT: case CX2072X_CODEC_TEST2: case CX2072X_CODEC_TEST9: case CX2072X_CODEC_TEST20: case CX2072X_CODEC_TEST26: case CX2072X_ANALOG_TEST3: case CX2072X_ANALOG_TEST4: case CX2072X_ANALOG_TEST5: case CX2072X_ANALOG_TEST6: case CX2072X_ANALOG_TEST7: case CX2072X_ANALOG_TEST8: case CX2072X_ANALOG_TEST9: case CX2072X_ANALOG_TEST10: case CX2072X_ANALOG_TEST11: case CX2072X_ANALOG_TEST12: case CX2072X_ANALOG_TEST13: case CX2072X_DIGITAL_TEST0: case CX2072X_DIGITAL_TEST1: case CX2072X_DIGITAL_TEST11: case CX2072X_DIGITAL_TEST12: case CX2072X_DIGITAL_TEST15: case CX2072X_DIGITAL_TEST16: case CX2072X_DIGITAL_TEST17: case CX2072X_DIGITAL_TEST18: case CX2072X_DIGITAL_TEST19: case CX2072X_DIGITAL_TEST20: return 2; default: return 1; } } static bool cx2072x_readable_register(struct device *dev, unsigned int reg) { switch (reg) { case CX2072X_VENDOR_ID: case CX2072X_REVISION_ID: case CX2072X_CURRENT_BCLK_FREQUENCY: case CX2072X_AFG_POWER_STATE: case CX2072X_UM_RESPONSE: case CX2072X_GPIO_DATA: case CX2072X_GPIO_ENABLE: case CX2072X_GPIO_DIRECTION: case CX2072X_GPIO_WAKE: case CX2072X_GPIO_UM_ENABLE: case CX2072X_GPIO_STICKY_MASK: case CX2072X_DAC1_CONVERTER_FORMAT: case CX2072X_DAC1_AMP_GAIN_RIGHT: case CX2072X_DAC1_AMP_GAIN_LEFT: case CX2072X_DAC1_POWER_STATE: case CX2072X_DAC1_CONVERTER_STREAM_CHANNEL: case CX2072X_DAC1_EAPD_ENABLE: case CX2072X_DAC2_CONVERTER_FORMAT: case CX2072X_DAC2_AMP_GAIN_RIGHT: case CX2072X_DAC2_AMP_GAIN_LEFT: case CX2072X_DAC2_POWER_STATE: case CX2072X_DAC2_CONVERTER_STREAM_CHANNEL: case CX2072X_ADC1_CONVERTER_FORMAT: case CX2072X_ADC1_AMP_GAIN_RIGHT_0: case CX2072X_ADC1_AMP_GAIN_LEFT_0: case CX2072X_ADC1_AMP_GAIN_RIGHT_1: case CX2072X_ADC1_AMP_GAIN_LEFT_1: case CX2072X_ADC1_AMP_GAIN_RIGHT_2: case CX2072X_ADC1_AMP_GAIN_LEFT_2: case CX2072X_ADC1_AMP_GAIN_RIGHT_3: case CX2072X_ADC1_AMP_GAIN_LEFT_3: case CX2072X_ADC1_AMP_GAIN_RIGHT_4: case CX2072X_ADC1_AMP_GAIN_LEFT_4: case CX2072X_ADC1_AMP_GAIN_RIGHT_5: case CX2072X_ADC1_AMP_GAIN_LEFT_5: case CX2072X_ADC1_AMP_GAIN_RIGHT_6: case CX2072X_ADC1_AMP_GAIN_LEFT_6: case CX2072X_ADC1_CONNECTION_SELECT_CONTROL: case CX2072X_ADC1_POWER_STATE: case CX2072X_ADC1_CONVERTER_STREAM_CHANNEL: case CX2072X_ADC2_CONVERTER_FORMAT: case CX2072X_ADC2_AMP_GAIN_RIGHT_0: case CX2072X_ADC2_AMP_GAIN_LEFT_0: case CX2072X_ADC2_AMP_GAIN_RIGHT_1: case CX2072X_ADC2_AMP_GAIN_LEFT_1: case CX2072X_ADC2_AMP_GAIN_RIGHT_2: case CX2072X_ADC2_AMP_GAIN_LEFT_2: case CX2072X_ADC2_CONNECTION_SELECT_CONTROL: case CX2072X_ADC2_POWER_STATE: case CX2072X_ADC2_CONVERTER_STREAM_CHANNEL: case CX2072X_PORTA_CONNECTION_SELECT_CTRL: case CX2072X_PORTA_POWER_STATE: case CX2072X_PORTA_PIN_CTRL: case CX2072X_PORTA_UNSOLICITED_RESPONSE: case CX2072X_PORTA_PIN_SENSE: case CX2072X_PORTA_EAPD_BTL: case CX2072X_PORTB_POWER_STATE: case CX2072X_PORTB_PIN_CTRL: case CX2072X_PORTB_UNSOLICITED_RESPONSE: case CX2072X_PORTB_PIN_SENSE: case CX2072X_PORTB_EAPD_BTL: case CX2072X_PORTB_GAIN_RIGHT: case CX2072X_PORTB_GAIN_LEFT: case CX2072X_PORTC_POWER_STATE: case CX2072X_PORTC_PIN_CTRL: case CX2072X_PORTC_GAIN_RIGHT: case CX2072X_PORTC_GAIN_LEFT: case CX2072X_PORTD_POWER_STATE: case CX2072X_PORTD_PIN_CTRL: case CX2072X_PORTD_UNSOLICITED_RESPONSE: case CX2072X_PORTD_PIN_SENSE: case CX2072X_PORTD_GAIN_RIGHT: case CX2072X_PORTD_GAIN_LEFT: case CX2072X_PORTE_CONNECTION_SELECT_CTRL: case CX2072X_PORTE_POWER_STATE: case CX2072X_PORTE_PIN_CTRL: case CX2072X_PORTE_UNSOLICITED_RESPONSE: case CX2072X_PORTE_PIN_SENSE: case CX2072X_PORTE_EAPD_BTL: case CX2072X_PORTE_GAIN_RIGHT: case CX2072X_PORTE_GAIN_LEFT: case CX2072X_PORTF_POWER_STATE: case CX2072X_PORTF_PIN_CTRL: case CX2072X_PORTF_UNSOLICITED_RESPONSE: case CX2072X_PORTF_PIN_SENSE: case CX2072X_PORTF_GAIN_RIGHT: case CX2072X_PORTF_GAIN_LEFT: case CX2072X_PORTG_POWER_STATE: case CX2072X_PORTG_PIN_CTRL: case CX2072X_PORTG_CONNECTION_SELECT_CTRL: case CX2072X_PORTG_EAPD_BTL: case CX2072X_PORTM_POWER_STATE: case CX2072X_PORTM_PIN_CTRL: case CX2072X_PORTM_CONNECTION_SELECT_CTRL: case CX2072X_PORTM_EAPD_BTL: case CX2072X_MIXER_POWER_STATE: case CX2072X_MIXER_GAIN_RIGHT_0: case CX2072X_MIXER_GAIN_LEFT_0: case CX2072X_MIXER_GAIN_RIGHT_1: case CX2072X_MIXER_GAIN_LEFT_1: case CX2072X_EQ_ENABLE_BYPASS: case CX2072X_EQ_B0_COEFF: case CX2072X_EQ_B1_COEFF: case CX2072X_EQ_B2_COEFF: case CX2072X_EQ_A1_COEFF: case CX2072X_EQ_A2_COEFF: case CX2072X_EQ_G_COEFF: case CX2072X_SPKR_DRC_ENABLE_STEP: case CX2072X_SPKR_DRC_CONTROL: case CX2072X_SPKR_DRC_TEST: case CX2072X_DIGITAL_BIOS_TEST0: case CX2072X_DIGITAL_BIOS_TEST2: case CX2072X_I2SPCM_CONTROL1: case CX2072X_I2SPCM_CONTROL2: case CX2072X_I2SPCM_CONTROL3: case CX2072X_I2SPCM_CONTROL4: case CX2072X_I2SPCM_CONTROL5: case CX2072X_I2SPCM_CONTROL6: case CX2072X_UM_INTERRUPT_CRTL_E: case CX2072X_CODEC_TEST2: case CX2072X_CODEC_TEST9: case CX2072X_CODEC_TEST20: case CX2072X_CODEC_TEST26: case CX2072X_ANALOG_TEST4: case CX2072X_ANALOG_TEST5: case CX2072X_ANALOG_TEST6: case CX2072X_ANALOG_TEST7: case CX2072X_ANALOG_TEST8: case CX2072X_ANALOG_TEST9: case CX2072X_ANALOG_TEST10: case CX2072X_ANALOG_TEST11: case CX2072X_ANALOG_TEST12: case CX2072X_ANALOG_TEST13: case CX2072X_DIGITAL_TEST0: case CX2072X_DIGITAL_TEST1: case CX2072X_DIGITAL_TEST11: case CX2072X_DIGITAL_TEST12: case CX2072X_DIGITAL_TEST15: case CX2072X_DIGITAL_TEST16: case CX2072X_DIGITAL_TEST17: case CX2072X_DIGITAL_TEST18: case CX2072X_DIGITAL_TEST19: case CX2072X_DIGITAL_TEST20: return true; default: return false; } } static bool cx2072x_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case CX2072X_VENDOR_ID: case CX2072X_REVISION_ID: case CX2072X_UM_INTERRUPT_CRTL_E: case CX2072X_DIGITAL_TEST11: case CX2072X_PORTA_PIN_SENSE: case CX2072X_PORTB_PIN_SENSE: case CX2072X_PORTD_PIN_SENSE: case CX2072X_PORTE_PIN_SENSE: case CX2072X_PORTF_PIN_SENSE: case CX2072X_EQ_G_COEFF: case CX2072X_EQ_BAND: return true; default: return false; } } static int cx2072x_reg_raw_write(struct i2c_client *client, unsigned int reg, const void *val, size_t val_count) { struct device *dev = &client->dev; u8 buf[2 + CX2072X_MAX_EQ_COEFF]; int ret; if (WARN_ON(val_count + 2 > sizeof(buf))) return -EINVAL; buf[0] = reg >> 8; buf[1] = reg & 0xff; memcpy(buf + 2, val, val_count); ret = i2c_master_send(client, buf, val_count + 2); if (ret != val_count + 2) { dev_err(dev, "I2C write failed, ret = %d\n", ret); return ret < 0 ? ret : -EIO; } return 0; } static int cx2072x_reg_write(void *context, unsigned int reg, unsigned int value) { __le32 raw_value; unsigned int size; size = cx2072x_register_size(reg); if (reg == CX2072X_UM_INTERRUPT_CRTL_E) { /* Update the MSB byte only */ reg += 3; size = 1; value >>= 24; } raw_value = cpu_to_le32(value); return cx2072x_reg_raw_write(context, reg, &raw_value, size); } static int cx2072x_reg_read(void *context, unsigned int reg, unsigned int *value) { struct i2c_client *client = context; struct device *dev = &client->dev; __le32 recv_buf = 0; struct i2c_msg msgs[2]; unsigned int size; u8 send_buf[2]; int ret; size = cx2072x_register_size(reg); send_buf[0] = reg >> 8; send_buf[1] = reg & 0xff; msgs[0].addr = client->addr; msgs[0].len = sizeof(send_buf); msgs[0].buf = send_buf; msgs[0].flags = 0; msgs[1].addr = client->addr; msgs[1].len = size; msgs[1].buf = (u8 *)&recv_buf; msgs[1].flags = I2C_M_RD; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) { dev_err(dev, "Failed to read register, ret = %d\n", ret); return ret < 0 ? ret : -EIO; } *value = le32_to_cpu(recv_buf); return 0; } /* get suggested pre_div valuce from mclk frequency */ static unsigned int get_div_from_mclk(unsigned int mclk) { unsigned int div = 8; int i; for (i = 0; i < ARRAY_SIZE(mclk_pre_div); i++) { if (mclk <= mclk_pre_div[i].mclk) { div = mclk_pre_div[i].div; break; } } return div; } static int cx2072x_config_pll(struct cx2072x_priv *cx2072x) { struct device *dev = cx2072x->dev; unsigned int pre_div; unsigned int pre_div_val; unsigned int pll_input; unsigned int pll_output; unsigned int int_div; unsigned int frac_div; u64 frac_num; unsigned int frac; unsigned int sample_rate = cx2072x->sample_rate; int pt_sample_per_sync = 2; int pt_clock_per_sample = 96; switch (sample_rate) { case 48000: case 32000: case 24000: case 16000: break; case 96000: pt_sample_per_sync = 1; pt_clock_per_sample = 48; break; case 192000: pt_sample_per_sync = 0; pt_clock_per_sample = 24; break; default: dev_err(dev, "Unsupported sample rate %d\n", sample_rate); return -EINVAL; } /* Configure PLL settings */ pre_div = get_div_from_mclk(cx2072x->mclk_rate); pll_input = cx2072x->mclk_rate / pre_div; pll_output = sample_rate * 3072; int_div = pll_output / pll_input; frac_div = pll_output - (int_div * pll_input); if (frac_div) { frac_div *= 1000; frac_div /= pll_input; frac_num = (u64)(4000 + frac_div) * ((1 << 20) - 4); do_div(frac_num, 7); frac = ((u32)frac_num + 499) / 1000; } pre_div_val = (pre_div - 1) * 2; regmap_write(cx2072x->regmap, CX2072X_ANALOG_TEST4, 0x40 | (pre_div_val << 8)); if (frac_div == 0) { /* Int mode */ regmap_write(cx2072x->regmap, CX2072X_ANALOG_TEST7, 0x100); } else { /* frac mode */ regmap_write(cx2072x->regmap, CX2072X_ANALOG_TEST6, frac & 0xfff); regmap_write(cx2072x->regmap, CX2072X_ANALOG_TEST7, (u8)(frac >> 12)); } int_div--; regmap_write(cx2072x->regmap, CX2072X_ANALOG_TEST8, int_div); /* configure PLL tracking */ if (frac_div == 0) { /* disable PLL tracking */ regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST16, 0x00); } else { /* configure and enable PLL tracking */ regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST16, (pt_sample_per_sync << 4) & 0xf0); regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST17, pt_clock_per_sample); regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST18, pt_clock_per_sample * 3 / 2); regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST19, 0x01); regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST20, 0x02); regmap_update_bits(cx2072x->regmap, CX2072X_DIGITAL_TEST16, 0x01, 0x01); } return 0; } static int cx2072x_config_i2spcm(struct cx2072x_priv *cx2072x) { struct device *dev = cx2072x->dev; unsigned int bclk_rate = 0; int is_i2s = 0; int has_one_bit_delay = 0; int is_frame_inv = 0; int is_bclk_inv = 0; int pulse_len; int frame_len = cx2072x->frame_size; int sample_size = cx2072x->sample_size; int i2s_right_slot; int i2s_right_pause_interval = 0; int i2s_right_pause_pos; int is_big_endian = 1; u64 div; unsigned int mod; union cx2072x_reg_i2spcm_ctrl_reg1 reg1; union cx2072x_reg_i2spcm_ctrl_reg2 reg2; union cx2072x_reg_i2spcm_ctrl_reg3 reg3; union cx2072x_reg_i2spcm_ctrl_reg4 reg4; union cx2072x_reg_i2spcm_ctrl_reg5 reg5; union cx2072x_reg_i2spcm_ctrl_reg6 reg6; union cx2072x_reg_digital_bios_test2 regdbt2; const unsigned int fmt = cx2072x->dai_fmt; if (frame_len <= 0) { dev_err(dev, "Incorrect frame len %d\n", frame_len); return -EINVAL; } if (sample_size <= 0) { dev_err(dev, "Incorrect sample size %d\n", sample_size); return -EINVAL; } dev_dbg(dev, "config_i2spcm set_dai_fmt- %08x\n", fmt); regdbt2.ulval = 0xac; /* set master/slave */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: reg2.r.tx_master = 1; reg3.r.rx_master = 1; dev_dbg(dev, "Sets Master mode\n"); break; case SND_SOC_DAIFMT_CBS_CFS: reg2.r.tx_master = 0; reg3.r.rx_master = 0; dev_dbg(dev, "Sets Slave mode\n"); break; default: dev_err(dev, "Unsupported DAI master mode\n"); return -EINVAL; } /* set format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: is_i2s = 1; has_one_bit_delay = 1; pulse_len = frame_len / 2; break; case SND_SOC_DAIFMT_RIGHT_J: is_i2s = 1; pulse_len = frame_len / 2; break; case SND_SOC_DAIFMT_LEFT_J: is_i2s = 1; pulse_len = frame_len / 2; break; default: dev_err(dev, "Unsupported DAI format\n"); return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: is_frame_inv = is_i2s; is_bclk_inv = is_i2s; break; case SND_SOC_DAIFMT_IB_IF: is_frame_inv = !is_i2s; is_bclk_inv = !is_i2s; break; case SND_SOC_DAIFMT_IB_NF: is_frame_inv = is_i2s; is_bclk_inv = !is_i2s; break; case SND_SOC_DAIFMT_NB_IF: is_frame_inv = !is_i2s; is_bclk_inv = is_i2s; break; default: dev_err(dev, "Unsupported DAI clock inversion\n"); return -EINVAL; } reg1.r.rx_data_one_line = 1; reg1.r.tx_data_one_line = 1; if (is_i2s) { i2s_right_slot = (frame_len / 2) / BITS_PER_SLOT; i2s_right_pause_interval = (frame_len / 2) % BITS_PER_SLOT; i2s_right_pause_pos = i2s_right_slot * BITS_PER_SLOT; } reg1.r.rx_ws_pol = is_frame_inv; reg1.r.rx_ws_wid = pulse_len - 1; reg1.r.rx_frm_len = frame_len / BITS_PER_SLOT - 1; reg1.r.rx_sa_size = (sample_size / BITS_PER_SLOT) - 1; reg1.r.tx_ws_pol = reg1.r.rx_ws_pol; reg1.r.tx_ws_wid = pulse_len - 1; reg1.r.tx_frm_len = reg1.r.rx_frm_len; reg1.r.tx_sa_size = reg1.r.rx_sa_size; reg2.r.tx_endian_sel = !is_big_endian; reg2.r.tx_dstart_dly = has_one_bit_delay; if (cx2072x->en_aec_ref) reg2.r.tx_dstart_dly = 0; reg3.r.rx_endian_sel = !is_big_endian; reg3.r.rx_dstart_dly = has_one_bit_delay; reg4.ulval = 0; if (is_i2s) { reg2.r.tx_slot_1 = 0; reg2.r.tx_slot_2 = i2s_right_slot; reg3.r.rx_slot_1 = 0; if (cx2072x->en_aec_ref) reg3.r.rx_slot_2 = 0; else reg3.r.rx_slot_2 = i2s_right_slot; reg6.r.rx_pause_start_pos = i2s_right_pause_pos; reg6.r.rx_pause_cycles = i2s_right_pause_interval; reg6.r.tx_pause_start_pos = i2s_right_pause_pos; reg6.r.tx_pause_cycles = i2s_right_pause_interval; } else { dev_err(dev, "TDM mode is not implemented yet\n"); return -EINVAL; } regdbt2.r.i2s_bclk_invert = is_bclk_inv; /* Configures the BCLK output */ bclk_rate = cx2072x->sample_rate * frame_len; reg5.r.i2s_pcm_clk_div_chan_en = 0; /* Disables bclk output before setting new value */ regmap_write(cx2072x->regmap, CX2072X_I2SPCM_CONTROL5, 0); if (reg2.r.tx_master) { /* Configures BCLK rate */ div = PLL_OUT_HZ_48; mod = do_div(div, bclk_rate); if (mod) { dev_err(dev, "Unsupported BCLK %dHz\n", bclk_rate); return -EINVAL; } dev_dbg(dev, "enables BCLK %dHz output\n", bclk_rate); reg5.r.i2s_pcm_clk_div = (u32)div - 1; reg5.r.i2s_pcm_clk_div_chan_en = 1; } regmap_write(cx2072x->regmap, CX2072X_I2SPCM_CONTROL1, reg1.ulval); regmap_update_bits(cx2072x->regmap, CX2072X_I2SPCM_CONTROL2, 0xffffffc0, reg2.ulval); regmap_update_bits(cx2072x->regmap, CX2072X_I2SPCM_CONTROL3, 0xffffffc0, reg3.ulval); regmap_write(cx2072x->regmap, CX2072X_I2SPCM_CONTROL4, reg4.ulval); regmap_write(cx2072x->regmap, CX2072X_I2SPCM_CONTROL6, reg6.ulval); regmap_write(cx2072x->regmap, CX2072X_I2SPCM_CONTROL5, reg5.ulval); regmap_write(cx2072x->regmap, CX2072X_DIGITAL_BIOS_TEST2, regdbt2.ulval); return 0; } static int afg_power_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm); struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_POST_PMU: regmap_update_bits(cx2072x->regmap, CX2072X_DIGITAL_BIOS_TEST0, 0x00, 0x10); break; case SND_SOC_DAPM_PRE_PMD: regmap_update_bits(cx2072x->regmap, CX2072X_DIGITAL_BIOS_TEST0, 0x10, 0x10); break; } return 0; } static const struct snd_kcontrol_new cx2072x_snd_controls[] = { SOC_DOUBLE_R_TLV("PortD Boost Volume", CX2072X_PORTD_GAIN_LEFT, CX2072X_PORTD_GAIN_RIGHT, 0, 3, 0, boost_tlv), SOC_DOUBLE_R_TLV("PortC Boost Volume", CX2072X_PORTC_GAIN_LEFT, CX2072X_PORTC_GAIN_RIGHT, 0, 3, 0, boost_tlv), SOC_DOUBLE_R_TLV("PortB Boost Volume", CX2072X_PORTB_GAIN_LEFT, CX2072X_PORTB_GAIN_RIGHT, 0, 3, 0, boost_tlv), SOC_DOUBLE_R_TLV("PortD ADC1 Volume", CX2072X_ADC1_AMP_GAIN_LEFT_1, CX2072X_ADC1_AMP_GAIN_RIGHT_1, 0, 0x4a, 0, adc_tlv), SOC_DOUBLE_R_TLV("PortC ADC1 Volume", CX2072X_ADC1_AMP_GAIN_LEFT_2, CX2072X_ADC1_AMP_GAIN_RIGHT_2, 0, 0x4a, 0, adc_tlv), SOC_DOUBLE_R_TLV("PortB ADC1 Volume", CX2072X_ADC1_AMP_GAIN_LEFT_0, CX2072X_ADC1_AMP_GAIN_RIGHT_0, 0, 0x4a, 0, adc_tlv), SOC_DOUBLE_R_TLV("DAC1 Volume", CX2072X_DAC1_AMP_GAIN_LEFT, CX2072X_DAC1_AMP_GAIN_RIGHT, 0, 0x4a, 0, dac_tlv), SOC_DOUBLE_R("DAC1 Switch", CX2072X_DAC1_AMP_GAIN_LEFT, CX2072X_DAC1_AMP_GAIN_RIGHT, 7, 1, 0), SOC_DOUBLE_R_TLV("DAC2 Volume", CX2072X_DAC2_AMP_GAIN_LEFT, CX2072X_DAC2_AMP_GAIN_RIGHT, 0, 0x4a, 0, dac_tlv), SOC_SINGLE_TLV("HPF Freq", CX2072X_CODEC_TEST9, 0, 0x3f, 0, hpf_tlv), SOC_DOUBLE("HPF Switch", CX2072X_CODEC_TEST9, 8, 9, 1, 1), SOC_SINGLE("PortA HP Amp Switch", CX2072X_PORTA_PIN_CTRL, 7, 1, 0), }; static int cx2072x_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_component *codec = dai->component; struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); struct device *dev = codec->dev; const unsigned int sample_rate = params_rate(params); int sample_size, frame_size; /* Data sizes if not using TDM */ sample_size = params_width(params); if (sample_size < 0) return sample_size; frame_size = snd_soc_params_to_frame_size(params); if (frame_size < 0) return frame_size; if (cx2072x->mclk_rate == 0) { dev_err(dev, "Master clock rate is not configured\n"); return -EINVAL; } if (cx2072x->bclk_ratio) frame_size = cx2072x->bclk_ratio; switch (sample_rate) { case 48000: case 32000: case 24000: case 16000: case 96000: case 192000: break; default: dev_err(dev, "Unsupported sample rate %d\n", sample_rate); return -EINVAL; } dev_dbg(dev, "Sample size %d bits, frame = %d bits, rate = %d Hz\n", sample_size, frame_size, sample_rate); cx2072x->frame_size = frame_size; cx2072x->sample_size = sample_size; cx2072x->sample_rate = sample_rate; if (dai->id == CX2072X_DAI_DSP) { cx2072x->en_aec_ref = true; dev_dbg(cx2072x->dev, "enables aec reference\n"); regmap_write(cx2072x->regmap, CX2072X_ADC1_CONNECTION_SELECT_CONTROL, 3); } if (cx2072x->pll_changed) { cx2072x_config_pll(cx2072x); cx2072x->pll_changed = false; } if (cx2072x->i2spcm_changed) { cx2072x_config_i2spcm(cx2072x); cx2072x->i2spcm_changed = false; } return 0; } static int cx2072x_set_dai_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio) { struct snd_soc_component *codec = dai->component; struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); cx2072x->bclk_ratio = ratio; return 0; } static int cx2072x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_component *codec = dai->component; struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); if (clk_set_rate(cx2072x->mclk, freq)) { dev_err(codec->dev, "set clk rate failed\n"); return -EINVAL; } cx2072x->mclk_rate = freq; return 0; } static int cx2072x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *codec = dai->component; struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); struct device *dev = codec->dev; dev_dbg(dev, "set_dai_fmt- %08x\n", fmt); /* set master/slave */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBS_CFS: break; default: dev_err(dev, "Unsupported DAI master mode\n"); return -EINVAL; } /* set format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: break; default: dev_err(dev, "Unsupported DAI format\n"); return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: case SND_SOC_DAIFMT_IB_IF: case SND_SOC_DAIFMT_IB_NF: case SND_SOC_DAIFMT_NB_IF: break; default: dev_err(dev, "Unsupported DAI clock inversion\n"); return -EINVAL; } cx2072x->dai_fmt = fmt; return 0; } static const struct snd_kcontrol_new portaouten_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTA_PIN_CTRL, 6, 1, 0); static const struct snd_kcontrol_new porteouten_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTE_PIN_CTRL, 6, 1, 0); static const struct snd_kcontrol_new portgouten_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTG_PIN_CTRL, 6, 1, 0); static const struct snd_kcontrol_new portmouten_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTM_PIN_CTRL, 6, 1, 0); static const struct snd_kcontrol_new portbinen_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTB_PIN_CTRL, 5, 1, 0); static const struct snd_kcontrol_new portcinen_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTC_PIN_CTRL, 5, 1, 0); static const struct snd_kcontrol_new portdinen_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTD_PIN_CTRL, 5, 1, 0); static const struct snd_kcontrol_new porteinen_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_PORTE_PIN_CTRL, 5, 1, 0); static const struct snd_kcontrol_new i2sadc1l_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL2, 0, 1, 0); static const struct snd_kcontrol_new i2sadc1r_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL2, 1, 1, 0); static const struct snd_kcontrol_new i2sadc2l_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL2, 2, 1, 0); static const struct snd_kcontrol_new i2sadc2r_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL2, 3, 1, 0); static const struct snd_kcontrol_new i2sdac1l_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL3, 0, 1, 0); static const struct snd_kcontrol_new i2sdac1r_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL3, 1, 1, 0); static const struct snd_kcontrol_new i2sdac2l_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL3, 2, 1, 0); static const struct snd_kcontrol_new i2sdac2r_ctl = SOC_DAPM_SINGLE("Switch", CX2072X_I2SPCM_CONTROL3, 3, 1, 0); static const char * const dac_enum_text[] = { "DAC1 Switch", "DAC2 Switch", }; static const struct soc_enum porta_dac_enum = SOC_ENUM_SINGLE(CX2072X_PORTA_CONNECTION_SELECT_CTRL, 0, 2, dac_enum_text); static const struct snd_kcontrol_new porta_mux = SOC_DAPM_ENUM("PortA Mux", porta_dac_enum); static const struct soc_enum portg_dac_enum = SOC_ENUM_SINGLE(CX2072X_PORTG_CONNECTION_SELECT_CTRL, 0, 2, dac_enum_text); static const struct snd_kcontrol_new portg_mux = SOC_DAPM_ENUM("PortG Mux", portg_dac_enum); static const struct soc_enum porte_dac_enum = SOC_ENUM_SINGLE(CX2072X_PORTE_CONNECTION_SELECT_CTRL, 0, 2, dac_enum_text); static const struct snd_kcontrol_new porte_mux = SOC_DAPM_ENUM("PortE Mux", porte_dac_enum); static const struct soc_enum portm_dac_enum = SOC_ENUM_SINGLE(CX2072X_PORTM_CONNECTION_SELECT_CTRL, 0, 2, dac_enum_text); static const struct snd_kcontrol_new portm_mux = SOC_DAPM_ENUM("PortM Mux", portm_dac_enum); static const char * const adc1in_sel_text[] = { "PortB Switch", "PortD Switch", "PortC Switch", "Widget15 Switch", "PortE Switch", "PortF Switch", "PortH Switch" }; static const struct soc_enum adc1in_sel_enum = SOC_ENUM_SINGLE(CX2072X_ADC1_CONNECTION_SELECT_CONTROL, 0, 7, adc1in_sel_text); static const struct snd_kcontrol_new adc1_mux = SOC_DAPM_ENUM("ADC1 Mux", adc1in_sel_enum); static const char * const adc2in_sel_text[] = { "PortC Switch", "Widget15 Switch", "PortH Switch" }; static const struct soc_enum adc2in_sel_enum = SOC_ENUM_SINGLE(CX2072X_ADC2_CONNECTION_SELECT_CONTROL, 0, 3, adc2in_sel_text); static const struct snd_kcontrol_new adc2_mux = SOC_DAPM_ENUM("ADC2 Mux", adc2in_sel_enum); static const struct snd_kcontrol_new wid15_mix[] = { SOC_DAPM_SINGLE("DAC1L Switch", CX2072X_MIXER_GAIN_LEFT_0, 7, 1, 1), SOC_DAPM_SINGLE("DAC1R Switch", CX2072X_MIXER_GAIN_RIGHT_0, 7, 1, 1), SOC_DAPM_SINGLE("DAC2L Switch", CX2072X_MIXER_GAIN_LEFT_1, 7, 1, 1), SOC_DAPM_SINGLE("DAC2R Switch", CX2072X_MIXER_GAIN_RIGHT_1, 7, 1, 1), }; #define CX2072X_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, wmask, won_val, \ woff_val, wevent, wflags) \ {.id = snd_soc_dapm_supply, .name = wname, .kcontrol_news = NULL, \ .num_kcontrols = 0, .reg = wreg, .shift = wshift, .mask = wmask, \ .on_val = won_val, .off_val = woff_val, \ .subseq = wsubseq, .event = wevent, .event_flags = wflags} #define CX2072X_DAPM_SWITCH(wname, wreg, wshift, wmask, won_val, woff_val, \ wevent, wflags) \ {.id = snd_soc_dapm_switch, .name = wname, .kcontrol_news = NULL, \ .num_kcontrols = 0, .reg = wreg, .shift = wshift, .mask = wmask, \ .on_val = won_val, .off_val = woff_val, \ .event = wevent, .event_flags = wflags} #define CX2072X_DAPM_SWITCH(wname, wreg, wshift, wmask, won_val, woff_val, \ wevent, wflags) \ {.id = snd_soc_dapm_switch, .name = wname, .kcontrol_news = NULL, \ .num_kcontrols = 0, .reg = wreg, .shift = wshift, .mask = wmask, \ .on_val = won_val, .off_val = woff_val, \ .event = wevent, .event_flags = wflags} #define CX2072X_DAPM_REG_E(wid, wname, wreg, wshift, wmask, won_val, woff_val, \ wevent, wflags) \ {.id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \ .reg = wreg, .shift = wshift, .mask = wmask, \ .on_val = won_val, .off_val = woff_val, \ .event = wevent, .event_flags = wflags} static const struct snd_soc_dapm_widget cx2072x_dapm_widgets[] = { /*Playback*/ SND_SOC_DAPM_AIF_IN("In AIF", "Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_SWITCH("I2S DAC1L", SND_SOC_NOPM, 0, 0, &i2sdac1l_ctl), SND_SOC_DAPM_SWITCH("I2S DAC1R", SND_SOC_NOPM, 0, 0, &i2sdac1r_ctl), SND_SOC_DAPM_SWITCH("I2S DAC2L", SND_SOC_NOPM, 0, 0, &i2sdac2l_ctl), SND_SOC_DAPM_SWITCH("I2S DAC2R", SND_SOC_NOPM, 0, 0, &i2sdac2r_ctl), SND_SOC_DAPM_REG(snd_soc_dapm_dac, "DAC1", CX2072X_DAC1_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_dac, "DAC2", CX2072X_DAC2_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_MUX("PortA Mux", SND_SOC_NOPM, 0, 0, &porta_mux), SND_SOC_DAPM_MUX("PortG Mux", SND_SOC_NOPM, 0, 0, &portg_mux), SND_SOC_DAPM_MUX("PortE Mux", SND_SOC_NOPM, 0, 0, &porte_mux), SND_SOC_DAPM_MUX("PortM Mux", SND_SOC_NOPM, 0, 0, &portm_mux), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortA Power", CX2072X_PORTA_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortM Power", CX2072X_PORTM_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortG Power", CX2072X_PORTG_POWER_STATE, 0, 0xfff, 0x00, 0x03), CX2072X_DAPM_SUPPLY_S("AFG Power", 0, CX2072X_AFG_POWER_STATE, 0, 0xfff, 0x00, 0x03, afg_power_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_SWITCH("PortA Out En", SND_SOC_NOPM, 0, 0, &portaouten_ctl), SND_SOC_DAPM_SWITCH("PortE Out En", SND_SOC_NOPM, 0, 0, &porteouten_ctl), SND_SOC_DAPM_SWITCH("PortG Out En", SND_SOC_NOPM, 0, 0, &portgouten_ctl), SND_SOC_DAPM_SWITCH("PortM Out En", SND_SOC_NOPM, 0, 0, &portmouten_ctl), SND_SOC_DAPM_OUTPUT("PORTA"), SND_SOC_DAPM_OUTPUT("PORTG"), SND_SOC_DAPM_OUTPUT("PORTE"), SND_SOC_DAPM_OUTPUT("PORTM"), SND_SOC_DAPM_OUTPUT("AEC REF"), /*Capture*/ SND_SOC_DAPM_AIF_OUT("Out AIF", "Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_SWITCH("I2S ADC1L", SND_SOC_NOPM, 0, 0, &i2sadc1l_ctl), SND_SOC_DAPM_SWITCH("I2S ADC1R", SND_SOC_NOPM, 0, 0, &i2sadc1r_ctl), SND_SOC_DAPM_SWITCH("I2S ADC2L", SND_SOC_NOPM, 0, 0, &i2sadc2l_ctl), SND_SOC_DAPM_SWITCH("I2S ADC2R", SND_SOC_NOPM, 0, 0, &i2sadc2r_ctl), SND_SOC_DAPM_REG(snd_soc_dapm_adc, "ADC1", CX2072X_ADC1_POWER_STATE, 0, 0xff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_adc, "ADC2", CX2072X_ADC2_POWER_STATE, 0, 0xff, 0x00, 0x03), SND_SOC_DAPM_MUX("ADC1 Mux", SND_SOC_NOPM, 0, 0, &adc1_mux), SND_SOC_DAPM_MUX("ADC2 Mux", SND_SOC_NOPM, 0, 0, &adc2_mux), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortB Power", CX2072X_PORTB_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortC Power", CX2072X_PORTC_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortD Power", CX2072X_PORTD_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "PortE Power", CX2072X_PORTE_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_REG(snd_soc_dapm_supply, "Widget15 Power", CX2072X_MIXER_POWER_STATE, 0, 0xfff, 0x00, 0x03), SND_SOC_DAPM_MIXER("Widget15 Mixer", SND_SOC_NOPM, 0, 0, wid15_mix, ARRAY_SIZE(wid15_mix)), SND_SOC_DAPM_SWITCH("PortB In En", SND_SOC_NOPM, 0, 0, &portbinen_ctl), SND_SOC_DAPM_SWITCH("PortC In En", SND_SOC_NOPM, 0, 0, &portcinen_ctl), SND_SOC_DAPM_SWITCH("PortD In En", SND_SOC_NOPM, 0, 0, &portdinen_ctl), SND_SOC_DAPM_SWITCH("PortE In En", SND_SOC_NOPM, 0, 0, &porteinen_ctl), SND_SOC_DAPM_MICBIAS("Headset Bias", CX2072X_ANALOG_TEST11, 1, 0), SND_SOC_DAPM_MICBIAS("PortB Mic Bias", CX2072X_PORTB_PIN_CTRL, 2, 0), SND_SOC_DAPM_MICBIAS("PortD Mic Bias", CX2072X_PORTD_PIN_CTRL, 2, 0), SND_SOC_DAPM_MICBIAS("PortE Mic Bias", CX2072X_PORTE_PIN_CTRL, 2, 0), SND_SOC_DAPM_INPUT("PORTB"), SND_SOC_DAPM_INPUT("PORTC"), SND_SOC_DAPM_INPUT("PORTD"), SND_SOC_DAPM_INPUT("PORTEIN"), }; static const struct snd_soc_dapm_route cx2072x_intercon[] = { /* Playback */ {"In AIF", NULL, "AFG Power"}, {"I2S DAC1L", "Switch", "In AIF"}, {"I2S DAC1R", "Switch", "In AIF"}, {"I2S DAC2L", "Switch", "In AIF"}, {"I2S DAC2R", "Switch", "In AIF"}, {"DAC1", NULL, "I2S DAC1L"}, {"DAC1", NULL, "I2S DAC1R"}, {"DAC2", NULL, "I2S DAC2L"}, {"DAC2", NULL, "I2S DAC2R"}, {"PortA Mux", "DAC1 Switch", "DAC1"}, {"PortA Mux", "DAC2 Switch", "DAC2"}, {"PortG Mux", "DAC1 Switch", "DAC1"}, {"PortG Mux", "DAC2 Switch", "DAC2"}, {"PortE Mux", "DAC1 Switch", "DAC1"}, {"PortE Mux", "DAC2 Switch", "DAC2"}, {"PortM Mux", "DAC1 Switch", "DAC1"}, {"PortM Mux", "DAC2 Switch", "DAC2"}, {"Widget15 Mixer", "DAC1L Switch", "DAC1"}, {"Widget15 Mixer", "DAC1R Switch", "DAC2"}, {"Widget15 Mixer", "DAC2L Switch", "DAC1"}, {"Widget15 Mixer", "DAC2R Switch", "DAC2"}, {"Widget15 Mixer", NULL, "Widget15 Power"}, {"PortA Out En", "Switch", "PortA Mux"}, {"PortG Out En", "Switch", "PortG Mux"}, {"PortE Out En", "Switch", "PortE Mux"}, {"PortM Out En", "Switch", "PortM Mux"}, {"PortA Mux", NULL, "PortA Power"}, {"PortG Mux", NULL, "PortG Power"}, {"PortE Mux", NULL, "PortE Power"}, {"PortM Mux", NULL, "PortM Power"}, {"PortA Out En", NULL, "PortA Power"}, {"PortG Out En", NULL, "PortG Power"}, {"PortE Out En", NULL, "PortE Power"}, {"PortM Out En", NULL, "PortM Power"}, {"PORTA", NULL, "PortA Out En"}, {"PORTG", NULL, "PortG Out En"}, {"PORTE", NULL, "PortE Out En"}, {"PORTM", NULL, "PortM Out En"}, /* Capture */ {"PORTD", NULL, "Headset Bias"}, {"PortB In En", "Switch", "PORTB"}, {"PortC In En", "Switch", "PORTC"}, {"PortD In En", "Switch", "PORTD"}, {"PortE In En", "Switch", "PORTEIN"}, {"ADC1 Mux", "PortB Switch", "PortB In En"}, {"ADC1 Mux", "PortC Switch", "PortC In En"}, {"ADC1 Mux", "PortD Switch", "PortD In En"}, {"ADC1 Mux", "PortE Switch", "PortE In En"}, {"ADC1 Mux", "Widget15 Switch", "Widget15 Mixer"}, {"ADC2 Mux", "PortC Switch", "PortC In En"}, {"ADC2 Mux", "Widget15 Switch", "Widget15 Mixer"}, {"ADC1", NULL, "ADC1 Mux"}, {"ADC2", NULL, "ADC2 Mux"}, {"I2S ADC1L", "Switch", "ADC1"}, {"I2S ADC1R", "Switch", "ADC1"}, {"I2S ADC2L", "Switch", "ADC2"}, {"I2S ADC2R", "Switch", "ADC2"}, {"Out AIF", NULL, "I2S ADC1L"}, {"Out AIF", NULL, "I2S ADC1R"}, {"Out AIF", NULL, "I2S ADC2L"}, {"Out AIF", NULL, "I2S ADC2R"}, {"Out AIF", NULL, "AFG Power"}, {"AEC REF", NULL, "Out AIF"}, {"PortB In En", NULL, "PortB Power"}, {"PortC In En", NULL, "PortC Power"}, {"PortD In En", NULL, "PortD Power"}, {"PortE In En", NULL, "PortE Power"}, }; static int cx2072x_set_bias_level(struct snd_soc_component *codec, enum snd_soc_bias_level level) { struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); const enum snd_soc_bias_level old_level = snd_soc_component_get_bias_level(codec); if (level == SND_SOC_BIAS_STANDBY && old_level == SND_SOC_BIAS_OFF) regmap_write(cx2072x->regmap, CX2072X_AFG_POWER_STATE, 0); else if (level == SND_SOC_BIAS_OFF && old_level != SND_SOC_BIAS_OFF) regmap_write(cx2072x->regmap, CX2072X_AFG_POWER_STATE, 3); return 0; } /* * FIXME: the whole jack detection code below is pretty platform-specific; * it has lots of implicit assumptions about the pins, etc. * However, since we have no other code and reference, take this hard-coded * setup for now. Once when we have different platform implementations, * this needs to be rewritten in a more generic form, or moving into the * platform data. */ static void cx2072x_enable_jack_detect(struct snd_soc_component *codec) { struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(codec); /* No-sticky input type */ regmap_write(cx2072x->regmap, CX2072X_GPIO_STICKY_MASK, 0x1f); /* Use GPOI0 as interrupt pin */ regmap_write(cx2072x->regmap, CX2072X_UM_INTERRUPT_CRTL_E, 0x12 << 24); /* Enables unsolitited message on PortA */ regmap_write(cx2072x->regmap, CX2072X_PORTA_UNSOLICITED_RESPONSE, 0x80); /* support both nokia and apple headset set. Monitor time = 275 ms */ regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST15, 0x73); /* Disable TIP detection */ regmap_write(cx2072x->regmap, CX2072X_ANALOG_TEST12, 0x300); /* Switch MusicD3Live pin to GPIO */ regmap_write(cx2072x->regmap, CX2072X_DIGITAL_TEST1, 0); snd_soc_dapm_mutex_lock(dapm); snd_soc_dapm_force_enable_pin_unlocked(dapm, "PORTD"); snd_soc_dapm_force_enable_pin_unlocked(dapm, "Headset Bias"); snd_soc_dapm_force_enable_pin_unlocked(dapm, "PortD Mic Bias"); snd_soc_dapm_mutex_unlock(dapm); } static void cx2072x_disable_jack_detect(struct snd_soc_component *codec) { struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); regmap_write(cx2072x->regmap, CX2072X_UM_INTERRUPT_CRTL_E, 0); regmap_write(cx2072x->regmap, CX2072X_PORTA_UNSOLICITED_RESPONSE, 0); } static int cx2072x_jack_status_check(void *data) { struct snd_soc_component *codec = data; struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); unsigned int jack; unsigned int type = 0; int state = 0; mutex_lock(&cx2072x->lock); regmap_read(cx2072x->regmap, CX2072X_PORTA_PIN_SENSE, &jack); jack = jack >> 24; regmap_read(cx2072x->regmap, CX2072X_DIGITAL_TEST11, &type); if (jack == 0x80) { type = type >> 8; if (type & 0x8) { /* Apple headset */ state |= SND_JACK_HEADSET; if (type & 0x2) state |= SND_JACK_BTN_0; } else { /* * Nokia headset (type & 0x4) and * regular Headphone */ state |= SND_JACK_HEADPHONE; } } /* clear interrupt */ regmap_write(cx2072x->regmap, CX2072X_UM_INTERRUPT_CRTL_E, 0x12 << 24); mutex_unlock(&cx2072x->lock); dev_dbg(codec->dev, "CX2072X_HSDETECT type=0x%X,Jack state = %x\n", type, state); return state; } static const struct snd_soc_jack_gpio cx2072x_jack_gpio = { .name = "headset", .report = SND_JACK_HEADSET | SND_JACK_BTN_0, .debounce_time = 150, .wake = true, .jack_status_check = cx2072x_jack_status_check, }; static int cx2072x_set_jack(struct snd_soc_component *codec, struct snd_soc_jack *jack, void *data) { struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); int err; if (!jack) { cx2072x_disable_jack_detect(codec); return 0; } if (!cx2072x->jack_gpio.gpiod_dev) { cx2072x->jack_gpio = cx2072x_jack_gpio; cx2072x->jack_gpio.gpiod_dev = codec->dev; cx2072x->jack_gpio.data = codec; err = snd_soc_jack_add_gpios(jack, 1, &cx2072x->jack_gpio); if (err) { cx2072x->jack_gpio.gpiod_dev = NULL; return err; } } cx2072x_enable_jack_detect(codec); return 0; } static int cx2072x_probe(struct snd_soc_component *codec) { struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(codec); cx2072x->codec = codec; /* * FIXME: below is, again, a very platform-specific init sequence, * but we keep the code here just for simplicity. It seems that all * existing hardware implementations require this, so there is no very * much reason to move this out of the codec driver to the platform * data. * But of course it's no "right" thing; if you are a good boy, don't * read and follow the code like this! */ pm_runtime_get_sync(codec->dev); regmap_write(cx2072x->regmap, CX2072X_AFG_POWER_STATE, 0); regmap_multi_reg_write(cx2072x->regmap, cx2072x_reg_init, ARRAY_SIZE(cx2072x_reg_init)); /* configure PortC as input device */ regmap_update_bits(cx2072x->regmap, CX2072X_PORTC_PIN_CTRL, 0x20, 0x20); regmap_update_bits(cx2072x->regmap, CX2072X_DIGITAL_BIOS_TEST2, 0x84, 0xff); regmap_write(cx2072x->regmap, CX2072X_AFG_POWER_STATE, 3); pm_runtime_put(codec->dev); return 0; } static const struct snd_soc_component_driver soc_codec_driver_cx2072x = { .probe = cx2072x_probe, .set_bias_level = cx2072x_set_bias_level, .set_jack = cx2072x_set_jack, .controls = cx2072x_snd_controls, .num_controls = ARRAY_SIZE(cx2072x_snd_controls), .dapm_widgets = cx2072x_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(cx2072x_dapm_widgets), .dapm_routes = cx2072x_intercon, .num_dapm_routes = ARRAY_SIZE(cx2072x_intercon), }; /* * DAI ops */ static const struct snd_soc_dai_ops cx2072x_dai_ops = { .set_sysclk = cx2072x_set_dai_sysclk, .set_fmt = cx2072x_set_dai_fmt, .hw_params = cx2072x_hw_params, .set_bclk_ratio = cx2072x_set_dai_bclk_ratio, }; static int cx2072x_dsp_dai_probe(struct snd_soc_dai *dai) { struct cx2072x_priv *cx2072x = snd_soc_component_get_drvdata(dai->component); cx2072x->en_aec_ref = true; return 0; } #define CX2072X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = { { /* playback and capture */ .name = "cx2072x-hifi", .id = CX2072X_DAI_HIFI, .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = CX2072X_RATES_DSP, .formats = CX2072X_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = CX2072X_RATES_DSP, .formats = CX2072X_FORMATS, }, .ops = &cx2072x_dai_ops, .symmetric_rate = 1, }, { /* plabayck only, return echo reference to Conexant DSP chip */ .name = "cx2072x-dsp", .id = CX2072X_DAI_DSP, .probe = cx2072x_dsp_dai_probe, .playback = { .stream_name = "DSP Playback", .channels_min = 2, .channels_max = 2, .rates = CX2072X_RATES_DSP, .formats = CX2072X_FORMATS, }, .ops = &cx2072x_dai_ops, }, { /* plabayck only, return echo reference through I2S TX */ .name = "cx2072x-aec", .id = 3, .capture = { .stream_name = "AEC Capture", .channels_min = 2, .channels_max = 2, .rates = CX2072X_RATES_DSP, .formats = CX2072X_FORMATS, }, }, }; static const struct regmap_config cx2072x_regmap = { .reg_bits = 16, .val_bits = 32, .max_register = CX2072X_REG_MAX, .reg_defaults = cx2072x_reg_defaults, .num_reg_defaults = ARRAY_SIZE(cx2072x_reg_defaults), .cache_type = REGCACHE_RBTREE, .readable_reg = cx2072x_readable_register, .volatile_reg = cx2072x_volatile_register, /* Needs custom read/write functions for various register lengths */ .reg_read = cx2072x_reg_read, .reg_write = cx2072x_reg_write, }; static int __maybe_unused cx2072x_runtime_suspend(struct device *dev) { struct cx2072x_priv *cx2072x = dev_get_drvdata(dev); clk_disable_unprepare(cx2072x->mclk); return 0; } static int __maybe_unused cx2072x_runtime_resume(struct device *dev) { struct cx2072x_priv *cx2072x = dev_get_drvdata(dev); return clk_prepare_enable(cx2072x->mclk); } static int cx2072x_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct cx2072x_priv *cx2072x; unsigned int ven_id, rev_id; int ret; cx2072x = devm_kzalloc(&i2c->dev, sizeof(struct cx2072x_priv), GFP_KERNEL); if (!cx2072x) return -ENOMEM; cx2072x->regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &cx2072x_regmap); if (IS_ERR(cx2072x->regmap)) return PTR_ERR(cx2072x->regmap); mutex_init(&cx2072x->lock); i2c_set_clientdata(i2c, cx2072x); cx2072x->dev = &i2c->dev; cx2072x->pll_changed = true; cx2072x->i2spcm_changed = true; cx2072x->bclk_ratio = 0; cx2072x->mclk = devm_clk_get(cx2072x->dev, "mclk"); if (IS_ERR(cx2072x->mclk)) { dev_err(cx2072x->dev, "Failed to get MCLK\n"); return PTR_ERR(cx2072x->mclk); } regmap_read(cx2072x->regmap, CX2072X_VENDOR_ID, &ven_id); regmap_read(cx2072x->regmap, CX2072X_REVISION_ID, &rev_id); dev_info(cx2072x->dev, "codec version: %08x,%08x\n", ven_id, rev_id); ret = devm_snd_soc_register_component(cx2072x->dev, &soc_codec_driver_cx2072x, soc_codec_cx2072x_dai, ARRAY_SIZE(soc_codec_cx2072x_dai)); if (ret < 0) return ret; pm_runtime_use_autosuspend(cx2072x->dev); pm_runtime_enable(cx2072x->dev); return 0; } static int cx2072x_i2c_remove(struct i2c_client *i2c) { pm_runtime_disable(&i2c->dev); return 0; } static const struct i2c_device_id cx2072x_i2c_id[] = { { "cx20721", 0 }, { "cx20723", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, cx2072x_i2c_id); #ifdef CONFIG_ACPI static struct acpi_device_id cx2072x_acpi_match[] = { { "14F10720", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, cx2072x_acpi_match); #endif static const struct dev_pm_ops cx2072x_runtime_pm = { SET_RUNTIME_PM_OPS(cx2072x_runtime_suspend, cx2072x_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static struct i2c_driver cx2072x_i2c_driver = { .driver = { .name = "cx2072x", .acpi_match_table = ACPI_PTR(cx2072x_acpi_match), .pm = &cx2072x_runtime_pm, }, .probe = cx2072x_i2c_probe, .remove = cx2072x_i2c_remove, .id_table = cx2072x_i2c_id, }; module_i2c_driver(cx2072x_i2c_driver); MODULE_DESCRIPTION("ASoC cx2072x Codec Driver"); MODULE_AUTHOR("Simon Ho <[email protected]>"); MODULE_LICENSE("GPL");
674899.c
/* * Copyright 2017 Two Pore Guys, Inc. * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted providing that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <errno.h> #include <assert.h> #include "../linker_set.h" #include "../internal.h" static struct rpct_member *union_read_member(const char *decl, rpc_object_t obj, struct rpct_type *type) { struct rpct_member *member; const char *typedecl = NULL; const char *description = NULL; rpc_object_t constraints = NULL; rpc_object_unpack(obj, "{s,s,v}", "type", &typedecl, "description", &description, "constraints", &constraints); if (typedecl == NULL) { rpc_set_last_errorf(EINVAL, "%s: type key not provided or invalid", decl); return (NULL); } member = g_malloc0(sizeof(*member)); member->name = g_strdup(decl); member->description = description != NULL ? g_strdup(description) : NULL; member->origin = type; member->type = rpct_instantiate_type(typedecl, NULL, type, type->file); member->constraints = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, (GDestroyNotify)rpc_release_impl); if (constraints != NULL) { rpc_dictionary_apply(constraints, ^(const char *key, rpc_object_t value) { g_hash_table_insert(member->constraints, g_strdup(key), value); return ((bool)true); }); } return (member); } static bool union_validate(struct rpct_typei *typei, rpc_object_t obj, struct rpct_error_context *errctx) { __block struct rpct_typei *mtypei; __block rpc_object_t interior = NULL; bool ret; ret = rpct_members_apply(typei->type, ^(struct rpct_member *member) { struct rpct_error_context newctx = { .path = errctx->path, .errors = g_ptr_array_new() }; mtypei = rpct_typei_get_member_type(typei, member); interior = rpc_copy(obj); rpct_set_typei(mtypei, interior); if (rpct_validate_instance(mtypei, interior, &newctx)) { g_ptr_array_free(newctx.errors, true); return ((bool)false); } g_ptr_array_free(newctx.errors, true); return ((bool)true); }); if (!ret) { rpct_add_error(errctx, NULL, "None of the union branches matches the object"); return (false); } ret = rpct_run_validators(mtypei, interior, errctx); rpc_release(interior); return (ret); } static rpc_object_t union_serialize(rpc_object_t obj) { assert(obj != NULL); assert(obj->ro_typei != NULL); return (rpc_object_pack("{s,v}", RPCT_TYPE_FIELD, obj->ro_typei->canonical_form, RPCT_VALUE_FIELD, rpc_copy(obj))); } static rpc_object_t union_deserialize(rpc_object_t obj) { return (rpc_copy(rpc_dictionary_get_value(obj, RPCT_VALUE_FIELD))); } static struct rpct_class_handler union_class_handler = { .id = RPC_TYPING_UNION, .name = "union", .member_fn = union_read_member, .validate_fn = union_validate, .serialize_fn = union_serialize, .deserialize_fn = union_deserialize }; DECLARE_TYPE_CLASS(union_class_handler);
112658.c
/* * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include "ssl_locl.h" #include "record/record_locl.h" #include "internal/ktls.h" #include "internal/cryptlib.h" #include <openssl/comp.h> #include <openssl/evp.h> #include <openssl/kdf.h> #include <openssl/rand.h> #include <openssl/obj_mac.h> #include <openssl/core_names.h> #include <openssl/trace.h> /* seed1 through seed5 are concatenated */ static int tls1_PRF(SSL *s, const void *seed1, size_t seed1_len, const void *seed2, size_t seed2_len, const void *seed3, size_t seed3_len, const void *seed4, size_t seed4_len, const void *seed5, size_t seed5_len, const unsigned char *sec, size_t slen, unsigned char *out, size_t olen, int fatal) { const EVP_MD *md = ssl_prf_md(s); EVP_KDF *kdf; EVP_KDF_CTX *kctx = NULL; OSSL_PARAM params[8], *p = params; const char *mdname; if (md == NULL) { /* Should never happen */ if (fatal) SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_PRF, ERR_R_INTERNAL_ERROR); else SSLerr(SSL_F_TLS1_PRF, ERR_R_INTERNAL_ERROR); return 0; } kdf = EVP_KDF_fetch(NULL, OSSL_KDF_NAME_TLS1_PRF, NULL); if (kdf == NULL) goto err; kctx = EVP_KDF_CTX_new(kdf); EVP_KDF_free(kdf); if (kctx == NULL) goto err; mdname = EVP_MD_name(md); *p++ = OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, (char *)mdname, strlen(mdname) + 1); *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SECRET, (unsigned char *)sec, (size_t)slen); *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SEED, (void *)seed1, (size_t)seed1_len); *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SEED, (void *)seed2, (size_t)seed2_len); *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SEED, (void *)seed3, (size_t)seed3_len); *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SEED, (void *)seed4, (size_t)seed4_len); *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SEED, (void *)seed5, (size_t)seed5_len); *p = OSSL_PARAM_construct_end(); if (EVP_KDF_CTX_set_params(kctx, params) && EVP_KDF_derive(kctx, out, olen)) { EVP_KDF_CTX_free(kctx); return 1; } err: if (fatal) SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_PRF, ERR_R_INTERNAL_ERROR); else SSLerr(SSL_F_TLS1_PRF, ERR_R_INTERNAL_ERROR); EVP_KDF_CTX_free(kctx); return 0; } static int tls1_generate_key_block(SSL *s, unsigned char *km, size_t num) { int ret; /* Calls SSLfatal() as required */ ret = tls1_PRF(s, TLS_MD_KEY_EXPANSION_CONST, TLS_MD_KEY_EXPANSION_CONST_SIZE, s->s3.server_random, SSL3_RANDOM_SIZE, s->s3.client_random, SSL3_RANDOM_SIZE, NULL, 0, NULL, 0, s->session->master_key, s->session->master_key_length, km, num, 1); return ret; } #ifndef OPENSSL_NO_KTLS /* * Count the number of records that were not processed yet from record boundary. * * This function assumes that there are only fully formed records read in the * record layer. If read_ahead is enabled, then this might be false and this * function will fail. */ static int count_unprocessed_records(SSL *s) { SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); PACKET pkt, subpkt; int count = 0; if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left)) return -1; while (PACKET_remaining(&pkt) > 0) { /* Skip record type and version */ if (!PACKET_forward(&pkt, 3)) return -1; /* Read until next record */ if (PACKET_get_length_prefixed_2(&pkt, &subpkt)) return -1; count += 1; } return count; } #endif int tls1_change_cipher_state(SSL *s, int which) { unsigned char *p, *mac_secret; unsigned char *ms, *key, *iv; EVP_CIPHER_CTX *dd; const EVP_CIPHER *c; #ifndef OPENSSL_NO_COMP const SSL_COMP *comp; #endif const EVP_MD *m; int mac_type; size_t *mac_secret_size; EVP_MD_CTX *mac_ctx; EVP_PKEY *mac_key; size_t n, i, j, k, cl; int reuse_dd = 0; #ifndef OPENSSL_NO_KTLS struct tls12_crypto_info_aes_gcm_128 crypto_info; BIO *bio; unsigned char geniv[12]; int count_unprocessed; int bit; #endif c = s->s3.tmp.new_sym_enc; m = s->s3.tmp.new_hash; mac_type = s->s3.tmp.new_mac_pkey_type; #ifndef OPENSSL_NO_COMP comp = s->s3.tmp.new_compression; #endif if (which & SSL3_CC_READ) { if (s->ext.use_etm) s->s3.flags |= TLS1_FLAGS_ENCRYPT_THEN_MAC_READ; else s->s3.flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC_READ; if (s->s3.tmp.new_cipher->algorithm2 & TLS1_STREAM_MAC) s->mac_flags |= SSL_MAC_FLAG_READ_MAC_STREAM; else s->mac_flags &= ~SSL_MAC_FLAG_READ_MAC_STREAM; if (s->enc_read_ctx != NULL) { reuse_dd = 1; } else if ((s->enc_read_ctx = EVP_CIPHER_CTX_new()) == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_MALLOC_FAILURE); goto err; } else { /* * make sure it's initialised in case we exit later with an error */ EVP_CIPHER_CTX_reset(s->enc_read_ctx); } dd = s->enc_read_ctx; mac_ctx = ssl_replace_hash(&s->read_hash, NULL); if (mac_ctx == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } #ifndef OPENSSL_NO_COMP COMP_CTX_free(s->expand); s->expand = NULL; if (comp != NULL) { s->expand = COMP_CTX_new(comp->method); if (s->expand == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, SSL_R_COMPRESSION_LIBRARY_ERROR); goto err; } } #endif /* * this is done by dtls1_reset_seq_numbers for DTLS */ if (!SSL_IS_DTLS(s)) RECORD_LAYER_reset_read_sequence(&s->rlayer); mac_secret = &(s->s3.read_mac_secret[0]); mac_secret_size = &(s->s3.read_mac_secret_size); } else { s->statem.enc_write_state = ENC_WRITE_STATE_INVALID; if (s->ext.use_etm) s->s3.flags |= TLS1_FLAGS_ENCRYPT_THEN_MAC_WRITE; else s->s3.flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC_WRITE; if (s->s3.tmp.new_cipher->algorithm2 & TLS1_STREAM_MAC) s->mac_flags |= SSL_MAC_FLAG_WRITE_MAC_STREAM; else s->mac_flags &= ~SSL_MAC_FLAG_WRITE_MAC_STREAM; if (s->enc_write_ctx != NULL && !SSL_IS_DTLS(s)) { reuse_dd = 1; } else if ((s->enc_write_ctx = EVP_CIPHER_CTX_new()) == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_MALLOC_FAILURE); goto err; } dd = s->enc_write_ctx; if (SSL_IS_DTLS(s)) { mac_ctx = EVP_MD_CTX_new(); if (mac_ctx == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_MALLOC_FAILURE); goto err; } s->write_hash = mac_ctx; } else { mac_ctx = ssl_replace_hash(&s->write_hash, NULL); if (mac_ctx == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_MALLOC_FAILURE); goto err; } } #ifndef OPENSSL_NO_COMP COMP_CTX_free(s->compress); s->compress = NULL; if (comp != NULL) { s->compress = COMP_CTX_new(comp->method); if (s->compress == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, SSL_R_COMPRESSION_LIBRARY_ERROR); goto err; } } #endif /* * this is done by dtls1_reset_seq_numbers for DTLS */ if (!SSL_IS_DTLS(s)) RECORD_LAYER_reset_write_sequence(&s->rlayer); mac_secret = &(s->s3.write_mac_secret[0]); mac_secret_size = &(s->s3.write_mac_secret_size); } if (reuse_dd) EVP_CIPHER_CTX_reset(dd); p = s->s3.tmp.key_block; i = *mac_secret_size = s->s3.tmp.new_mac_secret_size; /* TODO(size_t): convert me */ cl = EVP_CIPHER_key_length(c); j = cl; /* Was j=(exp)?5:EVP_CIPHER_key_length(c); */ /* If GCM/CCM mode only part of IV comes from PRF */ if (EVP_CIPHER_mode(c) == EVP_CIPH_GCM_MODE) k = EVP_GCM_TLS_FIXED_IV_LEN; else if (EVP_CIPHER_mode(c) == EVP_CIPH_CCM_MODE) k = EVP_CCM_TLS_FIXED_IV_LEN; else k = EVP_CIPHER_iv_length(c); if ((which == SSL3_CHANGE_CIPHER_CLIENT_WRITE) || (which == SSL3_CHANGE_CIPHER_SERVER_READ)) { ms = &(p[0]); n = i + i; key = &(p[n]); n += j + j; iv = &(p[n]); n += k + k; } else { n = i; ms = &(p[n]); n += i + j; key = &(p[n]); n += j + k; iv = &(p[n]); n += k; } if (n > s->s3.tmp.key_block_length) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } memcpy(mac_secret, ms, i); if (!(EVP_CIPHER_flags(c) & EVP_CIPH_FLAG_AEAD_CIPHER)) { /* TODO(size_t): Convert this function */ mac_key = EVP_PKEY_new_mac_key(mac_type, NULL, mac_secret, (int)*mac_secret_size); if (mac_key == NULL || EVP_DigestSignInit(mac_ctx, NULL, m, NULL, mac_key) <= 0) { EVP_PKEY_free(mac_key); SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } EVP_PKEY_free(mac_key); } OSSL_TRACE_BEGIN(TLS) { BIO_printf(trc_out, "which = %04X, mac key:\n", which); BIO_dump_indent(trc_out, ms, i, 4); } OSSL_TRACE_END(TLS); if (EVP_CIPHER_mode(c) == EVP_CIPH_GCM_MODE) { if (!EVP_CipherInit_ex(dd, c, NULL, key, NULL, (which & SSL3_CC_WRITE)) || !EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_GCM_SET_IV_FIXED, (int)k, iv)) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } } else if (EVP_CIPHER_mode(c) == EVP_CIPH_CCM_MODE) { int taglen; if (s->s3.tmp. new_cipher->algorithm_enc & (SSL_AES128CCM8 | SSL_AES256CCM8)) taglen = EVP_CCM8_TLS_TAG_LEN; else taglen = EVP_CCM_TLS_TAG_LEN; if (!EVP_CipherInit_ex(dd, c, NULL, NULL, NULL, (which & SSL3_CC_WRITE)) || !EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_AEAD_SET_IVLEN, 12, NULL) || !EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_AEAD_SET_TAG, taglen, NULL) || !EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_CCM_SET_IV_FIXED, (int)k, iv) || !EVP_CipherInit_ex(dd, NULL, NULL, key, NULL, -1)) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } } else { if (!EVP_CipherInit_ex(dd, c, NULL, key, iv, (which & SSL3_CC_WRITE))) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } } /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */ if ((EVP_CIPHER_flags(c) & EVP_CIPH_FLAG_AEAD_CIPHER) && *mac_secret_size && !EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_AEAD_SET_MAC_KEY, (int)*mac_secret_size, mac_secret)) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } #ifndef OPENSSL_NO_KTLS if (s->compress) goto skip_ktls; if (((which & SSL3_CC_READ) && (s->mode & SSL_MODE_NO_KTLS_RX)) || ((which & SSL3_CC_WRITE) && (s->mode & SSL_MODE_NO_KTLS_TX))) goto skip_ktls; /* ktls supports only the maximum fragment size */ if (ssl_get_max_send_fragment(s) != SSL3_RT_MAX_PLAIN_LENGTH) goto skip_ktls; /* check that cipher is AES_GCM_128 */ if (EVP_CIPHER_nid(c) != NID_aes_128_gcm || EVP_CIPHER_mode(c) != EVP_CIPH_GCM_MODE || EVP_CIPHER_key_length(c) != TLS_CIPHER_AES_GCM_128_KEY_SIZE) goto skip_ktls; /* check version is 1.2 */ if (s->version != TLS1_2_VERSION) goto skip_ktls; if (which & SSL3_CC_WRITE) bio = s->wbio; else bio = s->rbio; if (!ossl_assert(bio != NULL)) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } /* All future data will get encrypted by ktls. Flush the BIO or skip ktls */ if (which & SSL3_CC_WRITE) { if (BIO_flush(bio) <= 0) goto skip_ktls; } /* ktls doesn't support renegotiation */ if ((BIO_get_ktls_send(s->wbio) && (which & SSL3_CC_WRITE)) || (BIO_get_ktls_recv(s->rbio) && (which & SSL3_CC_READ))) { SSLfatal(s, SSL_AD_NO_RENEGOTIATION, SSL_F_TLS1_CHANGE_CIPHER_STATE, ERR_R_INTERNAL_ERROR); goto err; } memset(&crypto_info, 0, sizeof(crypto_info)); crypto_info.info.cipher_type = TLS_CIPHER_AES_GCM_128; crypto_info.info.version = s->version; EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_GET_IV, EVP_GCM_TLS_FIXED_IV_LEN + EVP_GCM_TLS_EXPLICIT_IV_LEN, geniv); memcpy(crypto_info.iv, geniv + EVP_GCM_TLS_FIXED_IV_LEN, TLS_CIPHER_AES_GCM_128_IV_SIZE); memcpy(crypto_info.salt, geniv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(crypto_info.key, key, EVP_CIPHER_key_length(c)); if (which & SSL3_CC_WRITE) memcpy(crypto_info.rec_seq, &s->rlayer.write_sequence, TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); else memcpy(crypto_info.rec_seq, &s->rlayer.read_sequence, TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); if (which & SSL3_CC_READ) { count_unprocessed = count_unprocessed_records(s); if (count_unprocessed < 0) goto skip_ktls; /* increment the crypto_info record sequence */ while (count_unprocessed) { for (bit = 7; bit >= 0; bit--) { /* increment */ ++crypto_info.rec_seq[bit]; if (crypto_info.rec_seq[bit] != 0) break; } count_unprocessed--; } } /* ktls works with user provided buffers directly */ if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE)) { if (which & SSL3_CC_WRITE) ssl3_release_write_buffer(s); SSL_set_options(s, SSL_OP_NO_RENEGOTIATION); } skip_ktls: #endif /* OPENSSL_NO_KTLS */ s->statem.enc_write_state = ENC_WRITE_STATE_VALID; OSSL_TRACE_BEGIN(TLS) { BIO_printf(trc_out, "which = %04X, key:\n", which); BIO_dump_indent(trc_out, key, EVP_CIPHER_key_length(c), 4); BIO_printf(trc_out, "iv:\n"); BIO_dump_indent(trc_out, iv, k, 4); } OSSL_TRACE_END(TLS); return 1; err: return 0; } int tls1_setup_key_block(SSL *s) { unsigned char *p; const EVP_CIPHER *c; const EVP_MD *hash; SSL_COMP *comp; int mac_type = NID_undef; size_t num, mac_secret_size = 0; int ret = 0; if (s->s3.tmp.key_block_length != 0) return 1; if (!ssl_cipher_get_evp(s->session, &c, &hash, &mac_type, &mac_secret_size, &comp, s->ext.use_etm)) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_SETUP_KEY_BLOCK, SSL_R_CIPHER_OR_HASH_UNAVAILABLE); return 0; } s->s3.tmp.new_sym_enc = c; s->s3.tmp.new_hash = hash; s->s3.tmp.new_mac_pkey_type = mac_type; s->s3.tmp.new_mac_secret_size = mac_secret_size; num = EVP_CIPHER_key_length(c) + mac_secret_size + EVP_CIPHER_iv_length(c); num *= 2; ssl3_cleanup_key_block(s); if ((p = OPENSSL_malloc(num)) == NULL) { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_SETUP_KEY_BLOCK, ERR_R_MALLOC_FAILURE); goto err; } s->s3.tmp.key_block_length = num; s->s3.tmp.key_block = p; OSSL_TRACE_BEGIN(TLS) { BIO_printf(trc_out, "client random\n"); BIO_dump_indent(trc_out, s->s3.client_random, SSL3_RANDOM_SIZE, 4); BIO_printf(trc_out, "server random\n"); BIO_dump_indent(trc_out, s->s3.server_random, SSL3_RANDOM_SIZE, 4); BIO_printf(trc_out, "master key\n"); BIO_dump_indent(trc_out, s->session->master_key, s->session->master_key_length, 4); } OSSL_TRACE_END(TLS); if (!tls1_generate_key_block(s, p, num)) { /* SSLfatal() already called */ goto err; } OSSL_TRACE_BEGIN(TLS) { BIO_printf(trc_out, "key block\n"); BIO_dump_indent(trc_out, p, num, 4); } OSSL_TRACE_END(TLS); if (!(s->options & SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) && s->method->version <= TLS1_VERSION) { /* * enable vulnerability countermeasure for CBC ciphers with known-IV * problem (http://www.openssl.org/~bodo/tls-cbc.txt) */ s->s3.need_empty_fragments = 1; if (s->session->cipher != NULL) { if (s->session->cipher->algorithm_enc == SSL_eNULL) s->s3.need_empty_fragments = 0; #ifndef OPENSSL_NO_RC4 if (s->session->cipher->algorithm_enc == SSL_RC4) s->s3.need_empty_fragments = 0; #endif } } ret = 1; err: return ret; } size_t tls1_final_finish_mac(SSL *s, const char *str, size_t slen, unsigned char *out) { size_t hashlen; unsigned char hash[EVP_MAX_MD_SIZE]; if (!ssl3_digest_cached_records(s, 0)) { /* SSLfatal() already called */ return 0; } if (!ssl_handshake_hash(s, hash, sizeof(hash), &hashlen)) { /* SSLfatal() already called */ return 0; } if (!tls1_PRF(s, str, slen, hash, hashlen, NULL, 0, NULL, 0, NULL, 0, s->session->master_key, s->session->master_key_length, out, TLS1_FINISH_MAC_LENGTH, 1)) { /* SSLfatal() already called */ return 0; } OPENSSL_cleanse(hash, hashlen); return TLS1_FINISH_MAC_LENGTH; } int tls1_generate_master_secret(SSL *s, unsigned char *out, unsigned char *p, size_t len, size_t *secret_size) { if (s->session->flags & SSL_SESS_FLAG_EXTMS) { unsigned char hash[EVP_MAX_MD_SIZE * 2]; size_t hashlen; /* * Digest cached records keeping record buffer (if present): this wont * affect client auth because we're freezing the buffer at the same * point (after client key exchange and before certificate verify) */ if (!ssl3_digest_cached_records(s, 1) || !ssl_handshake_hash(s, hash, sizeof(hash), &hashlen)) { /* SSLfatal() already called */ return 0; } OSSL_TRACE_BEGIN(TLS) { BIO_printf(trc_out, "Handshake hashes:\n"); BIO_dump(trc_out, (char *)hash, hashlen); } OSSL_TRACE_END(TLS); if (!tls1_PRF(s, TLS_MD_EXTENDED_MASTER_SECRET_CONST, TLS_MD_EXTENDED_MASTER_SECRET_CONST_SIZE, hash, hashlen, NULL, 0, NULL, 0, NULL, 0, p, len, out, SSL3_MASTER_SECRET_SIZE, 1)) { /* SSLfatal() already called */ return 0; } OPENSSL_cleanse(hash, hashlen); } else { if (!tls1_PRF(s, TLS_MD_MASTER_SECRET_CONST, TLS_MD_MASTER_SECRET_CONST_SIZE, s->s3.client_random, SSL3_RANDOM_SIZE, NULL, 0, s->s3.server_random, SSL3_RANDOM_SIZE, NULL, 0, p, len, out, SSL3_MASTER_SECRET_SIZE, 1)) { /* SSLfatal() already called */ return 0; } } OSSL_TRACE_BEGIN(TLS) { BIO_printf(trc_out, "Premaster Secret:\n"); BIO_dump_indent(trc_out, p, len, 4); BIO_printf(trc_out, "Client Random:\n"); BIO_dump_indent(trc_out, s->s3.client_random, SSL3_RANDOM_SIZE, 4); BIO_printf(trc_out, "Server Random:\n"); BIO_dump_indent(trc_out, s->s3.server_random, SSL3_RANDOM_SIZE, 4); BIO_printf(trc_out, "Master Secret:\n"); BIO_dump_indent(trc_out, s->session->master_key, SSL3_MASTER_SECRET_SIZE, 4); } OSSL_TRACE_END(TLS); *secret_size = SSL3_MASTER_SECRET_SIZE; return 1; } int tls1_export_keying_material(SSL *s, unsigned char *out, size_t olen, const char *label, size_t llen, const unsigned char *context, size_t contextlen, int use_context) { unsigned char *val = NULL; size_t vallen = 0, currentvalpos; int rv; /* * construct PRF arguments we construct the PRF argument ourself rather * than passing separate values into the TLS PRF to ensure that the * concatenation of values does not create a prohibited label. */ vallen = llen + SSL3_RANDOM_SIZE * 2; if (use_context) { vallen += 2 + contextlen; } val = OPENSSL_malloc(vallen); if (val == NULL) goto err2; currentvalpos = 0; memcpy(val + currentvalpos, (unsigned char *)label, llen); currentvalpos += llen; memcpy(val + currentvalpos, s->s3.client_random, SSL3_RANDOM_SIZE); currentvalpos += SSL3_RANDOM_SIZE; memcpy(val + currentvalpos, s->s3.server_random, SSL3_RANDOM_SIZE); currentvalpos += SSL3_RANDOM_SIZE; if (use_context) { val[currentvalpos] = (contextlen >> 8) & 0xff; currentvalpos++; val[currentvalpos] = contextlen & 0xff; currentvalpos++; if ((contextlen > 0) || (context != NULL)) { memcpy(val + currentvalpos, context, contextlen); } } /* * disallow prohibited labels note that SSL3_RANDOM_SIZE > max(prohibited * label len) = 15, so size of val > max(prohibited label len) = 15 and * the comparisons won't have buffer overflow */ if (memcmp(val, TLS_MD_CLIENT_FINISH_CONST, TLS_MD_CLIENT_FINISH_CONST_SIZE) == 0) goto err1; if (memcmp(val, TLS_MD_SERVER_FINISH_CONST, TLS_MD_SERVER_FINISH_CONST_SIZE) == 0) goto err1; if (memcmp(val, TLS_MD_MASTER_SECRET_CONST, TLS_MD_MASTER_SECRET_CONST_SIZE) == 0) goto err1; if (memcmp(val, TLS_MD_EXTENDED_MASTER_SECRET_CONST, TLS_MD_EXTENDED_MASTER_SECRET_CONST_SIZE) == 0) goto err1; if (memcmp(val, TLS_MD_KEY_EXPANSION_CONST, TLS_MD_KEY_EXPANSION_CONST_SIZE) == 0) goto err1; rv = tls1_PRF(s, val, vallen, NULL, 0, NULL, 0, NULL, 0, NULL, 0, s->session->master_key, s->session->master_key_length, out, olen, 0); goto ret; err1: SSLerr(SSL_F_TLS1_EXPORT_KEYING_MATERIAL, SSL_R_TLS_ILLEGAL_EXPORTER_LABEL); rv = 0; goto ret; err2: SSLerr(SSL_F_TLS1_EXPORT_KEYING_MATERIAL, ERR_R_MALLOC_FAILURE); rv = 0; ret: OPENSSL_clear_free(val, vallen); return rv; } int tls1_alert_code(int code) { switch (code) { case SSL_AD_CLOSE_NOTIFY: return SSL3_AD_CLOSE_NOTIFY; case SSL_AD_UNEXPECTED_MESSAGE: return SSL3_AD_UNEXPECTED_MESSAGE; case SSL_AD_BAD_RECORD_MAC: return SSL3_AD_BAD_RECORD_MAC; case SSL_AD_DECRYPTION_FAILED: return TLS1_AD_DECRYPTION_FAILED; case SSL_AD_RECORD_OVERFLOW: return TLS1_AD_RECORD_OVERFLOW; case SSL_AD_DECOMPRESSION_FAILURE: return SSL3_AD_DECOMPRESSION_FAILURE; case SSL_AD_HANDSHAKE_FAILURE: return SSL3_AD_HANDSHAKE_FAILURE; case SSL_AD_NO_CERTIFICATE: return -1; case SSL_AD_BAD_CERTIFICATE: return SSL3_AD_BAD_CERTIFICATE; case SSL_AD_UNSUPPORTED_CERTIFICATE: return SSL3_AD_UNSUPPORTED_CERTIFICATE; case SSL_AD_CERTIFICATE_REVOKED: return SSL3_AD_CERTIFICATE_REVOKED; case SSL_AD_CERTIFICATE_EXPIRED: return SSL3_AD_CERTIFICATE_EXPIRED; case SSL_AD_CERTIFICATE_UNKNOWN: return SSL3_AD_CERTIFICATE_UNKNOWN; case SSL_AD_ILLEGAL_PARAMETER: return SSL3_AD_ILLEGAL_PARAMETER; case SSL_AD_UNKNOWN_CA: return TLS1_AD_UNKNOWN_CA; case SSL_AD_ACCESS_DENIED: return TLS1_AD_ACCESS_DENIED; case SSL_AD_DECODE_ERROR: return TLS1_AD_DECODE_ERROR; case SSL_AD_DECRYPT_ERROR: return TLS1_AD_DECRYPT_ERROR; case SSL_AD_EXPORT_RESTRICTION: return TLS1_AD_EXPORT_RESTRICTION; case SSL_AD_PROTOCOL_VERSION: return TLS1_AD_PROTOCOL_VERSION; case SSL_AD_INSUFFICIENT_SECURITY: return TLS1_AD_INSUFFICIENT_SECURITY; case SSL_AD_INTERNAL_ERROR: return TLS1_AD_INTERNAL_ERROR; case SSL_AD_USER_CANCELLED: return TLS1_AD_USER_CANCELLED; case SSL_AD_NO_RENEGOTIATION: return TLS1_AD_NO_RENEGOTIATION; case SSL_AD_UNSUPPORTED_EXTENSION: return TLS1_AD_UNSUPPORTED_EXTENSION; case SSL_AD_CERTIFICATE_UNOBTAINABLE: return TLS1_AD_CERTIFICATE_UNOBTAINABLE; case SSL_AD_UNRECOGNIZED_NAME: return TLS1_AD_UNRECOGNIZED_NAME; case SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE: return TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE; case SSL_AD_BAD_CERTIFICATE_HASH_VALUE: return TLS1_AD_BAD_CERTIFICATE_HASH_VALUE; case SSL_AD_UNKNOWN_PSK_IDENTITY: return TLS1_AD_UNKNOWN_PSK_IDENTITY; case SSL_AD_INAPPROPRIATE_FALLBACK: return TLS1_AD_INAPPROPRIATE_FALLBACK; case SSL_AD_NO_APPLICATION_PROTOCOL: return TLS1_AD_NO_APPLICATION_PROTOCOL; case SSL_AD_CERTIFICATE_REQUIRED: return SSL_AD_HANDSHAKE_FAILURE; default: return -1; } }
394582.c
#include <std.h> inherit ROOM; void create(){ ::create(); set_terrain(CITY); set_travel(PAVED_ROAD); set_property("light", 2); set_short("A Street in Daggerdale."); set("day long", @GIL %^ORANGE%^You are in the Southeast corner of the Market Square. The street runs North and West from here. A metal pole holds an unlit lantern aloft here. Several carts line the square here. A sign hangs outside of a shop to the East of here. GIL ); set("night long", @GIL %^YELLOW%^You are in the Southeast corner of the Market Square. The street runs North and West from here. A metal pole here holds a lit lantern aloft and brightens the night. Several carts line the square here. A sign hangs outside of a shop to the East of here. GIL ); set_smell("default", "You catch the scent of varnish."); set_listen("default", "You can hear the sounds of a saw cutting wood."); set_items(([ "pole" : "It's made of steel and holds a lantern to light the darkness", "street" : "It's made of clean polished cobblestones.", "lantern" : "It's a large oil lantern with crystal glass windows.", "building" : "It's made of perfectly carved granite blocks, it's Dwarven stone work.", "wall" : "It's 50 foot tall and made from huge polished granite blocks.", "shop" : "This is where you can have wood things repaired.", "sign" : "Ye Ole Woodwrights Shoppe --->", "carts" : "These carts sell many items not found in the shops.", "square" : "This is the central Square of Daggerdale, many people sell their wares here.", ])); set_exits(([ "north" : "/d/dagger/Daggerdale/streets/street22", "west" : "/d/dagger/Daggerdale/streets/street15", "east" : "/d/dagger/Daggerdale/shops/woodwright", ])); } void reset(){ ::reset(); if(!present("lantern pole")){ new("/d/dagger/Daggerdale/streets/items/objs/lantern_pole.c")->move(TO); } }
714126.c
/* * phenotype_jobs.c * * Created on: 6 Nov 2019 * Author: billy */ #include <string.h> #include "observation.h" #include "phenotype_jobs.h" #include "study.h" #include "streams.h" #include "study_jobs.h" #include "string_utils.h" #include "char_parameter.h" #include "string_parameter.h" #include "json_parameter.h" /* * Static variables */ static NamedParameterType S_PHENOTYPES_TABLE_COLUMN_DELIMITER = { "PH Data delimiter", PT_CHAR }; static NamedParameterType S_PHENOTYPES_TABLE = { "PH Upload", PT_JSON_TABLE}; static NamedParameterType S_STUDIES_LIST = { "PH Study", PT_STRING }; static const char S_DEFAULT_COLUMN_DELIMITER = '|'; static const char * const S_ROW_INDEX_TITLE_S = "Plot ID"; /* * Static declarations */ static Parameter *GetTableParameter (ParameterSet *param_set_p, ParameterGroup *group_p, const FieldTrialServiceData *data_p); static json_t *GetTableParameterHints (void); static bool AddPhenotypesFromJSON (ServiceJob *job_p, const json_t *phenotypes_json_p, Study *area_p, const FieldTrialServiceData *data_p); /* * API definitions */ bool AddSubmissionPhenotypeParams (ServiceData *data_p, ParameterSet *param_set_p, Resource *resource_p) { bool success_flag = false; Parameter *param_p = NULL; ParameterGroup *group_p = CreateAndAddParameterGroupToParameterSet ("Phenotypes", false, data_p, param_set_p); if ((param_p = EasyCreateAndAddStringParameterToParameterSet (data_p, param_set_p, group_p, S_STUDIES_LIST.npt_type, S_STUDIES_LIST.npt_name_s, "Study", "The Study that these phenotype are from", NULL, PL_ALL)) != NULL) { const FieldTrialServiceData *dfw_service_data_p = (FieldTrialServiceData *) data_p; if (SetUpStudiesListParameter (dfw_service_data_p, (StringParameter *) param_p, NULL, false)) { const char delim = S_DEFAULT_COLUMN_DELIMITER; if ((param_p = EasyCreateAndAddCharParameterToParameterSet (data_p, param_set_p, group_p, S_PHENOTYPES_TABLE_COLUMN_DELIMITER.npt_name_s, "Delimiter", "The character delimiting columns", &delim, PL_ADVANCED)) != NULL) { if ((param_p = GetTableParameter (param_set_p, group_p, dfw_service_data_p)) != NULL) { success_flag = true; } else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "GetTableParameter failed"); } } else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to add %s parameter", S_STUDIES_LIST.npt_name_s); } } else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "SetUpStudiesListParameter failed"); } } else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to add %s parameter", S_STUDIES_LIST.npt_name_s); } return success_flag; } bool GetSubmissionPhenotypesParameterTypeForNamedParameter (const char *param_name_s, ParameterType *pt_p) { bool success_flag = true; if (strcmp (param_name_s, S_STUDIES_LIST.npt_name_s) == 0) { *pt_p = S_STUDIES_LIST.npt_type; } else if (strcmp (param_name_s, S_PHENOTYPES_TABLE_COLUMN_DELIMITER.npt_name_s) == 0) { *pt_p = S_PHENOTYPES_TABLE_COLUMN_DELIMITER.npt_type; } else if (strcmp (param_name_s, S_PHENOTYPES_TABLE.npt_name_s) == 0) { *pt_p = S_PHENOTYPES_TABLE.npt_type; } else { success_flag = false; } return success_flag; } bool RunForSubmissionPhenotypesParams (FieldTrialServiceData *data_p, ParameterSet *param_set_p, ServiceJob *job_p) { bool job_done_flag = false; const json_t *phenotypes_json_p = NULL; if (GetCurrentJSONParameterValueFromParameterSet (param_set_p, S_PHENOTYPES_TABLE.npt_name_s, &phenotypes_json_p)) { /* * Has a spreadsheet been uploaded? */ if ((phenotypes_json_p != NULL) && (json_array_size (phenotypes_json_p) > 0)) { OperationStatus status = OS_FAILED; /* * The data could be either an array of json objects * or a tabular string. so try it as json array first */ const char *study_id_s = NULL; if (GetCurrentStringParameterValueFromParameterSet (param_set_p, S_STUDIES_LIST.npt_name_s, &study_id_s)) { Study *study_p = GetStudyByIdString (study_id_s, VF_STORAGE, data_p); if (study_p) { if (AddPhenotypesFromJSON (job_p, phenotypes_json_p, study_p, data_p)) { status = OS_SUCCEEDED; } FreeStudy (study_p); } } SetServiceJobStatus (job_p, status); job_done_flag = true; } /* if (value.st_boolean_value) */ } /* if (GetParameterValueFromParameterSet (param_set_p, S_ADD_EXPERIMENTAL_AREA.npt_name_s, &value, true)) */ return job_done_flag; } /* * Static definitions */ static Parameter *GetTableParameter (ParameterSet *param_set_p, ParameterGroup *group_p, const FieldTrialServiceData *data_p) { Parameter *param_p = EasyCreateAndAddJSONParameterToParameterSet (& (data_p -> dftsd_base_data), param_set_p, group_p, S_PHENOTYPES_TABLE.npt_type, S_PHENOTYPES_TABLE.npt_name_s, "Plot data to upload", "The data to upload", NULL, PL_ALL); if (param_p) { bool success_flag = false; json_t *hints_p = GetTableParameterHints (); if (hints_p) { if (AddParameterKeyJSONValuePair (param_p, PA_TABLE_COLUMN_HEADINGS_S, hints_p)) { const char delim_s [2] = { S_DEFAULT_COLUMN_DELIMITER, '\0' }; if (AddParameterKeyStringValuePair (param_p, PA_TABLE_COLUMN_DELIMITER_S, delim_s)) { success_flag = true; } } json_decref (hints_p); } /* if (hints_p) */ if (!success_flag) { FreeParameter (param_p); param_p = NULL; } } /* if (param_p) */ return param_p; } static json_t *GetTableParameterHints (void) { json_t *hints_p = json_array (); if (hints_p) { if (AddColumnParameterHint (S_ROW_INDEX_TITLE_S, NULL, PT_TIME, true, hints_p)) { return hints_p; } json_decref (hints_p); } return NULL; } static bool AddPhenotypesFromJSON (ServiceJob *job_p, const json_t *phenotypes_json_p, Study *study_p, const FieldTrialServiceData *data_p) { OperationStatus status = OS_FAILED; bool success_flag = true; if (json_is_array (phenotypes_json_p)) { const size_t num_rows = json_array_size (phenotypes_json_p); size_t i; size_t num_imported = 0; bool imported_row_flag; for (i = 0; i < num_rows; ++ i) { const json_t *phenotype_json_p = json_array_get (phenotypes_json_p, i); json_int_t row_index = 0; imported_row_flag = false; if (GetJSONInteger (phenotype_json_p, S_ROW_INDEX_TITLE_S, &row_index)) { const char *key_s; json_t *value_p; const char *date_suffix_s = " date"; json_object_foreach (phenotype_json_p, key_s, value_p) { /* * Is it a measurement date or the treatment? */ if (!DoesStringEndWith (key_s, date_suffix_s)) { char *date_key_s = ConcatenateStrings (key_s, date_suffix_s); if (date_key_s) { const char *date_s = GetJSONString (phenotype_json_p, date_key_s); if (date_s) { if (json_is_string (value_p)) { const char *treatment_s = json_string_value (value_p); if (treatment_s) { Observation *obs_p; //AddObservationToRow (); } } } FreeCopiedString (date_key_s); } } } } /* if (GetJSONInteger (phenotype_json_p, S_PLOT_INDEX_TITLE_S, &plot_index)) */ else { PrintJSONToErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, phenotype_json_p, "No \"%s\" key", S_ROW_INDEX_TITLE_S); } if (!imported_row_flag) { PrintJSONToErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, phenotype_json_p, "Failed to import plot data"); } } /* for (i = 0; i < num_rows; ++ i) */ if (num_imported == num_rows) { status = OS_SUCCEEDED; } else if (num_imported > 0) { status = OS_PARTIALLY_SUCCEEDED; } } /* if (json_is_array (plots_json_p)) */ SetServiceJobStatus (job_p, status); return success_flag; }
733660.c
/* Area: ffi_call, closure_call Purpose: Check structure alignment of uint64. Limitations: none. PR: none. Originator: <[email protected]> 20031203 */ /* { dg-do run } */ /* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ #include "ffitest.h" typedef struct cls_struct_align { unsigned char a; unsigned long long b; unsigned char c; } cls_struct_align; cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, struct cls_struct_align a2) { struct cls_struct_align result; result.a = a1.a + a2.a; result.b = a1.b + a2.b; result.c = a1.c + a2.c; printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); return result; } static void cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) { struct cls_struct_align a1, a2; a1 = *(struct cls_struct_align*)(args[0]); a2 = *(struct cls_struct_align*)(args[1]); *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); } int main (void) { ffi_cif cif; void *code; ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); void* args_dbl[5]; ffi_type* cls_struct_fields[4]; ffi_type cls_struct_type; ffi_type* dbl_arg_types[5]; struct cls_struct_align g_dbl = { 12, 4951, 127 }; struct cls_struct_align f_dbl = { 1, 9320, 13 }; struct cls_struct_align res_dbl; cls_struct_type.size = 0; cls_struct_type.alignment = 0; cls_struct_type.type = FFI_TYPE_STRUCT; cls_struct_type.elements = cls_struct_fields; cls_struct_fields[0] = &ffi_type_uchar; cls_struct_fields[1] = &ffi_type_uint64; cls_struct_fields[2] = &ffi_type_uchar; cls_struct_fields[3] = NULL; dbl_arg_types[0] = &cls_struct_type; dbl_arg_types[1] = &cls_struct_type; dbl_arg_types[2] = NULL; CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, dbl_arg_types) == FFI_OK); args_dbl[0] = &g_dbl; args_dbl[1] = &f_dbl; args_dbl[2] = NULL; ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); /* { dg-output "\nres: 13 14271 140" } */ CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); /* { dg-output "\nres: 13 14271 140" } */ exit(0); }
807684.c
/* * Copyright 2019-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* * SEED low level APIs are deprecated for public use, but still ok for * internal use. */ #include "internal/deprecated.h" #include "cipher_seed.h" static int cipher_hw_seed_initkey(PROV_CIPHER_CTX *ctx, const unsigned char *key, size_t keylen) { PROV_SEED_CTX *sctx = (PROV_SEED_CTX *)ctx; SEED_set_key(key, &(sctx->ks.ks)); return 1; } # define PROV_CIPHER_HW_seed_mode(mode, UCMODE) \ IMPLEMENT_CIPHER_HW_##UCMODE(mode, seed, PROV_SEED_CTX, SEED_KEY_SCHEDULE, \ SEED_##mode) \ static const PROV_CIPHER_HW seed_##mode = { \ cipher_hw_seed_initkey, \ cipher_hw_seed_##mode##_cipher \ }; \ const PROV_CIPHER_HW *PROV_CIPHER_HW_seed_##mode(size_t keybits) \ { \ return &seed_##mode; \ } PROV_CIPHER_HW_seed_mode(cbc, CBC) PROV_CIPHER_HW_seed_mode(ecb, ECB) PROV_CIPHER_HW_seed_mode(ofb128, OFB) PROV_CIPHER_HW_seed_mode(cfb128, CFB)