Dataset Viewer
Auto-converted to Parquet
func
stringlengths
12
2.67k
cwe
stringclasses
7 values
__index_level_0__
int64
0
20k
inline double stirling_approx_tail(double k) { static double kTailValues[] = {0.0810614667953272, 0.0413406959554092, 0.0276779256849983, 0.02079067210376509, 0.0166446911898211, 0.0138761288230707, 0.0118967099458917, 0.0104112652619720, 0.00925546218271273, 0.00833056343336287}; if (k <= 9) { return kTailValues[static_cast<int>(k)]; } double kp1sq = (k + 1) * (k + 1); return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1); }
safe
0
TEST(HeaderMapImplTest, IterateReverse) { TestHeaderMapImpl headers; headers.addCopy("hello", "world"); headers.addCopy("foo", "bar"); LowerCaseString world_key("world"); headers.setReferenceKey(world_key, "hello"); using MockCb = testing::MockFunction<void(const std::string&, const std::string&)>; MockCb cb; InSequence seq; EXPECT_CALL(cb, Call("world", "hello")); EXPECT_CALL(cb, Call("foo", "bar")); // no "hello" headers.iterateReverse( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { static_cast<MockCb*>(cb_v)->Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); if (header.key().getStringView() != "foo") { return HeaderMap::Iterate::Continue; } else { return HeaderMap::Iterate::Break; } }, &cb); }
safe
1
inline static void _slurm_rpc_trigger_set(slurm_msg_t * msg) { int rc; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurmctld_config.auth_info); gid_t gid = g_slurm_auth_get_gid(msg->auth_cred, slurmctld_config.auth_info); trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data; DEF_TIMERS; START_TIMER; debug("Processing RPC: REQUEST_TRIGGER_SET from uid=%d", uid); rc = trigger_set(uid, gid, trigger_ptr); END_TIMER2("_slurm_rpc_trigger_set"); slurm_send_rc_msg(msg, rc); }
safe
2
static void uvesafb_check_limits(struct fb_var_screeninfo *var, struct fb_info *info) { const struct fb_videomode *mode; struct uvesafb_par *par = info->par; /* * If pixclock is set to 0, then we're using default BIOS timings * and thus don't have to perform any checks here. */ if (!var->pixclock) return; if (par->vbe_ib.vbe_version < 0x0300) { fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60, var, info); return; } if (!fb_validate_mode(var, info)) return; mode = fb_find_best_mode(var, &info->modelist); if (mode) { if (mode->xres == var->xres && mode->yres == var->yres && !(mode->vmode & (FB_VMODE_INTERLACED | FB_VMODE_DOUBLE))) { fb_videomode_to_var(var, mode); return; } } if (info->monspecs.gtf && !fb_get_mode(FB_MAXTIMINGS, 0, var, info)) return; /* Use default refresh rate */ var->pixclock = 0; }
safe
3
mesh_state_delete(struct module_qstate* qstate) { struct mesh_area* mesh; struct mesh_state_ref* super, ref; struct mesh_state* mstate; if(!qstate) return; mstate = qstate->mesh_info; mesh = mstate->s.env->mesh; mesh_detach_subs(&mstate->s); if(mstate->list_select == mesh_forever_list) { mesh->num_forever_states --; mesh_list_remove(mstate, &mesh->forever_first, &mesh->forever_last); } else if(mstate->list_select == mesh_jostle_list) { mesh_list_remove(mstate, &mesh->jostle_first, &mesh->jostle_last); } if(!mstate->reply_list && !mstate->cb_list && mstate->super_set.count == 0) { log_assert(mesh->num_detached_states > 0); mesh->num_detached_states--; } if(mstate->reply_list || mstate->cb_list) { log_assert(mesh->num_reply_states > 0); mesh->num_reply_states--; } ref.node.key = &ref; ref.s = mstate; RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) { (void)rbtree_delete(&super->s->sub_set, &ref); } (void)rbtree_delete(&mesh->run, mstate); (void)rbtree_delete(&mesh->all, mstate); mesh_state_cleanup(mstate); }
safe
4
void CLASS nikon_coolscan_load_raw() { if(!image) throw LIBRAW_EXCEPTION_IO_CORRUPT; int bypp = tiff_bps <= 8 ? 1 : 2; int bufsize = width * 3 * bypp; if (tiff_bps <= 8) gamma_curve(1.0 / imgdata.params.coolscan_nef_gamma, 0., 1, 255); else gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,65535); fseek (ifp, data_offset, SEEK_SET); unsigned char *buf = (unsigned char*)malloc(bufsize); unsigned short *ubuf = (unsigned short *)buf; for(int row = 0; row < raw_height; row++) { int red = fread (buf, 1, bufsize, ifp); unsigned short (*ip)[4] = (unsigned short (*)[4]) image + row*width; if(tiff_bps <= 8) for(int col=0; col<width;col++) { ip[col][0] = curve[buf[col*3]]; ip[col][1] = curve[buf[col*3+1]]; ip[col][2] = curve[buf[col*3+2]]; ip[col][3]=0; } else for(int col=0; col<width;col++) { ip[col][0] = curve[ubuf[col*3]]; ip[col][1] = curve[ubuf[col*3+1]]; ip[col][2] = curve[ubuf[col*3+2]]; ip[col][3]=0; } } free(buf); }
safe
5
static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, const struct ieee80211_reg_rule *rule) { int j; struct nlattr *nl_wmm_rules = nla_nest_start_noflag(msg, NL80211_FREQUENCY_ATTR_WMM); if (!nl_wmm_rules) goto nla_put_failure; for (j = 0; j < IEEE80211_NUM_ACS; j++) { struct nlattr *nl_wmm_rule = nla_nest_start_noflag(msg, j); if (!nl_wmm_rule) goto nla_put_failure; if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, rule->wmm_rule.client[j].cw_min) || nla_put_u16(msg, NL80211_WMMR_CW_MAX, rule->wmm_rule.client[j].cw_max) || nla_put_u8(msg, NL80211_WMMR_AIFSN, rule->wmm_rule.client[j].aifsn) || nla_put_u16(msg, NL80211_WMMR_TXOP, rule->wmm_rule.client[j].cot)) goto nla_put_failure; nla_nest_end(msg, nl_wmm_rule); } nla_nest_end(msg, nl_wmm_rules); return 0; nla_put_failure: return -ENOBUFS; }
safe
6
add_index_file(ParsedURL *pu, URLFile *uf) { char *p, *q; TextList *index_file_list = NULL; TextListItem *ti; if (non_null(index_file)) index_file_list = make_domain_list(index_file); if (index_file_list == NULL) { uf->stream = NULL; return; } for (ti = index_file_list->first; ti; ti = ti->next) { p = Strnew_m_charp(pu->file, "/", file_quote(ti->ptr), NULL)->ptr; p = cleanupName(p); q = cleanupName(file_unquote(p)); examineFile(q, uf); if (uf->stream != NULL) { pu->file = p; pu->real_file = q; return; } } }
safe
7
static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, u64 ns, bool matched) { struct kvm *kvm = vcpu->kvm; lockdep_assert_held(&kvm->arch.tsc_write_lock); /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = tsc; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; kvm->arch.last_tsc_offset = offset; vcpu->arch.last_guest_tsc = tsc; kvm_vcpu_write_tsc_offset(vcpu, offset); if (!matched) { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = tsc; kvm->arch.cur_tsc_offset = offset; kvm->arch.nr_vcpus_matched_tsc = 0; } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { kvm->arch.nr_vcpus_matched_tsc++; } /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; kvm_track_tsc_matching(vcpu); }
safe
8
static void trace_packet(const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ipt_entry *e) { const void *table_base; const struct ipt_entry *root; const char *hookname, *chainname, *comment; const struct ipt_entry *iter; unsigned int rulenum = 0; table_base = private->entries[smp_processor_id()]; root = get_entry(table_base, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); }
safe
9
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, int type, struct igmpv3_grec **ppgr, unsigned int mtu) { struct net_device *dev = pmc->interface->dev; struct igmpv3_report *pih; struct igmpv3_grec *pgr; if (!skb) { skb = igmpv3_newpack(dev, mtu); if (!skb) return NULL; } pgr = skb_put(skb, sizeof(struct igmpv3_grec)); pgr->grec_type = type; pgr->grec_auxwords = 0; pgr->grec_nsrcs = 0; pgr->grec_mca = pmc->multiaddr; pih = igmpv3_report_hdr(skb); pih->ngrec = htons(ntohs(pih->ngrec)+1); *ppgr = pgr; return skb; }
safe
10
TfLiteStatus Subgraph::UndoAllDelegates() { // Return early if there is nothing to reset to. if (pre_delegation_execution_plan_.empty()) return kTfLiteOk; // First free all delegate nodes. for (int execution_plan_index = 0; execution_plan_index < execution_plan_.size(); ++execution_plan_index) { int node_index = execution_plan_[execution_plan_index]; TfLiteNode& node = nodes_and_registration_[node_index].first; if (node.delegate == nullptr) { continue; } CleanupNode(node_index); } // Reset execution plan. execution_plan_ = pre_delegation_execution_plan_; pre_delegation_execution_plan_.clear(); // Delegate nodes are appended to nodes_and_registration_. Therefore, // cleanup nodes_and_registration_ to only contain nodes from // pre_delegation_execution_plan_. int max_retained_node_index = 0; for (int execution_plan_index = 0; execution_plan_index < execution_plan_.size(); ++execution_plan_index) { max_retained_node_index = std::max(max_retained_node_index, execution_plan_[execution_plan_index]); } nodes_and_registration_.resize(max_retained_node_index + 1); // After undoing delegates, the graph is uninvokable, but mutable. state_ = kStateUninvokable; delegates_undone_ = true; return kTfLiteOk; }
safe
11
read_2007_section_appinfo (Bit_Chain *restrict dat, Dwg_Data *restrict dwg, r2007_section *restrict sections_map, r2007_page *restrict pages_map) { Bit_Chain old_dat, sec_dat = { 0 }; Bit_Chain *str_dat; Dwg_AppInfo *_obj = &dwg->appinfo; Dwg_Object *obj = NULL; int error = 0; BITCODE_RL rcount1 = 0, rcount2 = 0; // not compressed, page size: 0x80 error = read_data_section (&sec_dat, dat, sections_map, pages_map, SECTION_APPINFO); if (error >= DWG_ERR_CRITICAL || !sec_dat.chain) { LOG_INFO ("%s section not found\n", "AppInfo"); if (sec_dat.chain) free (sec_dat.chain); return error; } LOG_TRACE ("\nAppInfo (%lu)\n-------------------\n", sec_dat.size) old_dat = *dat; str_dat = dat = &sec_dat; // restrict in size bit_chain_set_version (&old_dat, dat); // clang-format off #include "appinfo.spec" // clang-format on LOG_TRACE ("\n") if (sec_dat.chain) free (sec_dat.chain); *dat = old_dat; // unrestrict return error; }
safe
12
static int selinux_syslog(int type) { int rc; switch (type) { case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ rc = task_has_system(current, SYSTEM__SYSLOG_READ); break; case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */ /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE); break; case SYSLOG_ACTION_CLOSE: /* Close log */ case SYSLOG_ACTION_OPEN: /* Open log */ case SYSLOG_ACTION_READ: /* Read from log */ case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */ case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ default: rc = task_has_system(current, SYSTEM__SYSLOG_MOD); break; } return rc; }
safe
13
int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); if (res) { pr_err("ex %016llx phy%02d PHY control failed: %d\n", SAS_ADDR(dev->sas_addr), phy_id, res); } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) { pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]); res = pc_resp[2]; } kfree(pc_resp); kfree(pc_req); return res; }
safe
14
OJPEGWriteStreamSof(TIFF* tif, void** mem, uint32* len) { OJPEGState* sp=(OJPEGState*)tif->tif_data; uint8 m; assert(OJPEG_BUFFER>=2+8+sp->samples_per_pixel_per_plane*3); assert(255>=8+sp->samples_per_pixel_per_plane*3); sp->out_buffer[0]=255; sp->out_buffer[1]=sp->sof_marker_id; /* Lf */ sp->out_buffer[2]=0; sp->out_buffer[3]=8+sp->samples_per_pixel_per_plane*3; /* P */ sp->out_buffer[4]=8; /* Y */ sp->out_buffer[5]=(uint8)(sp->sof_y>>8); sp->out_buffer[6]=(sp->sof_y&255); /* X */ sp->out_buffer[7]=(uint8)(sp->sof_x>>8); sp->out_buffer[8]=(sp->sof_x&255); /* Nf */ sp->out_buffer[9]=sp->samples_per_pixel_per_plane; for (m=0; m<sp->samples_per_pixel_per_plane; m++) { /* C */ sp->out_buffer[10+m*3]=sp->sof_c[sp->plane_sample_offset+m]; /* H and V */ sp->out_buffer[10+m*3+1]=sp->sof_hv[sp->plane_sample_offset+m]; /* Tq */ sp->out_buffer[10+m*3+2]=sp->sof_tq[sp->plane_sample_offset+m]; } *len=10+sp->samples_per_pixel_per_plane*3; *mem=(void*)sp->out_buffer; sp->out_state++; }
safe
15
struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, int dd_data_size, bool xmit_can_sleep) { struct Scsi_Host *shost; struct iscsi_host *ihost; shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); if (!shost) return NULL; ihost = shost_priv(shost); if (xmit_can_sleep) { snprintf(ihost->workq_name, sizeof(ihost->workq_name), "iscsi_q_%d", shost->host_no); ihost->workq = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, ihost->workq_name); if (!ihost->workq) goto free_host; } spin_lock_init(&ihost->lock); ihost->state = ISCSI_HOST_SETUP; ihost->num_sessions = 0; init_waitqueue_head(&ihost->session_removal_wq); return shost; free_host: scsi_host_put(shost); return NULL; }
safe
16
void lease_file_rewrite(void) { int index; unsigned short eport, iport; int proto; char iaddr[32]; char desc[64]; char rhost[40]; unsigned int timestamp; if (lease_file == NULL) return; remove(lease_file); for(index = 0; ; index++) { if(get_redirect_rule_by_index(index, 0/*ifname*/, &eport, iaddr, sizeof(iaddr), &iport, &proto, desc, sizeof(desc), rhost, sizeof(rhost), &timestamp, 0, 0) < 0) break; if(lease_file_add(eport, iaddr, iport, proto, desc, timestamp) < 0) break; } }
safe
17
void r_bin_mdmp_free(struct r_bin_mdmp_obj *obj) { if (!obj) { return; } r_list_free (obj->streams.ex_threads); r_list_free (obj->streams.memories); r_list_free (obj->streams.memories64.memories); r_list_free (obj->streams.memory_infos); r_list_free (obj->streams.modules); r_list_free (obj->streams.operations); r_list_free (obj->streams.thread_infos); r_list_free (obj->streams.threads); r_list_free (obj->streams.token_infos); r_list_free (obj->streams.unloaded_modules); free (obj->streams.exception); free (obj->streams.system_info); free (obj->streams.comments_a); free (obj->streams.comments_w); free (obj->streams.handle_data); free (obj->streams.function_table); free (obj->streams.misc_info.misc_info_1); r_list_free (obj->pe32_bins); r_list_free (obj->pe64_bins); r_buf_free (obj->b); free (obj->hdr); obj->b = NULL; free (obj); return; }
safe
18
GF_Err sdtp_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; /*out-of-order sdtp, assume no padding at the end*/ if (!ptr->sampleCount) ptr->sampleCount = (u32) ptr->size; else if (ptr->sampleCount > (u32) ptr->size) return GF_ISOM_INVALID_FILE; ptr->sample_info = (u8 *) gf_malloc(sizeof(u8)*ptr->sampleCount); if (!ptr->sample_info) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->sampleCount; gf_bs_read_data(bs, (char*)ptr->sample_info, ptr->sampleCount); ISOM_DECREASE_SIZE(ptr, ptr->sampleCount); return GF_OK;
safe
19
get_keyblock_byfprint (KBNODE * ret_keyblock, const byte * fprint, size_t fprint_len) { int rc; if (fprint_len == 20 || fprint_len == 16) { struct getkey_ctx_s ctx; memset (&ctx, 0, sizeof ctx); ctx.not_allocated = 1; ctx.kr_handle = keydb_new (); ctx.nitems = 1; ctx.items[0].mode = (fprint_len == 16 ? KEYDB_SEARCH_MODE_FPR16 : KEYDB_SEARCH_MODE_FPR20); memcpy (ctx.items[0].u.fpr, fprint, fprint_len); rc = lookup (&ctx, ret_keyblock, 0); get_pubkey_end (&ctx); } else rc = GPG_ERR_GENERAL; /* Oops */ return rc; }
safe
20
GF_Err gf_isom_read_null_terminated_string(GF_Box *s, GF_BitStream *bs, u64 size, char **out_str) { u32 len=10; u32 i=0; *out_str = gf_malloc(sizeof(char)*len); while (1) { ISOM_DECREASE_SIZE(s, 1 ); (*out_str)[i] = gf_bs_read_u8(bs); if (!(*out_str)[i]) break; i++; if (i==len) { len += 10; *out_str = gf_realloc(*out_str, sizeof(char)*len); } if (gf_bs_available(bs) == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] missing null character in null terminated string\n")); (*out_str)[i] = 0; return GF_OK; } if (i >= size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] string bigger than container, probably missing null character\n")); (*out_str)[i] = 0; return GF_OK; } } return GF_OK;
safe
21
void luaV_objlen (lua_State *L, StkId ra, const TValue *rb) { const TValue *tm; switch (ttypetag(rb)) { case LUA_VTABLE: { Table *h = hvalue(rb); tm = fasttm(L, h->metatable, TM_LEN); if (tm) break; /* metamethod? break switch to call it */ setivalue(s2v(ra), luaH_getn(h)); /* else primitive len */ return; } case LUA_VSHRSTR: { setivalue(s2v(ra), tsvalue(rb)->shrlen); return; } case LUA_VLNGSTR: { setivalue(s2v(ra), tsvalue(rb)->u.lnglen); return; } default: { /* try metamethod */ tm = luaT_gettmbyobj(L, rb, TM_LEN); if (l_unlikely(notm(tm))) /* no metamethod? */ luaG_typeerror(L, rb, "get length of"); break; } } luaT_callTMres(L, tm, rb, rb, ra); }
safe
22
bool detect_ramfs_rootfs(void) { FILE *f; char *p, *p2; char *line = NULL; size_t len = 0; int i; f = fopen("/proc/self/mountinfo", "r"); if (!f) return false; while (getline(&line, &len, f) != -1) { for (p = line, i = 0; p && i < 4; i++) p = strchr(p + 1, ' '); if (!p) continue; p2 = strchr(p + 1, ' '); if (!p2) continue; *p2 = '\0'; if (strcmp(p + 1, "/") == 0) { // this is '/'. is it the ramfs? p = strchr(p2 + 1, '-'); if (p && strncmp(p, "- rootfs rootfs ", 16) == 0) { free(line); fclose(f); return true; } } } free(line); fclose(f); return false; }
safe
23
static int MP4_ReadBox_dref( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dref_t ); MP4_GETVERSIONFLAGS( p_box->data.p_dref ); MP4_GET4BYTES( p_box->data.p_dref->i_entry_count ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dref\" entry-count %d", p_box->data.p_dref->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); }
safe
24
void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct module *owner = cls_session->transport->owner; struct Scsi_Host *shost = session->host; iscsi_pool_free(&session->cmdpool); iscsi_remove_session(cls_session); kfree(session->password); kfree(session->password_in); kfree(session->username); kfree(session->username_in); kfree(session->targetname); kfree(session->targetalias); kfree(session->initiatorname); kfree(session->boot_root); kfree(session->boot_nic); kfree(session->boot_target); kfree(session->ifacename); kfree(session->portal_type); kfree(session->discovery_parent_type); iscsi_free_session(cls_session); iscsi_host_dec_session_cnt(shost); module_put(owner); }
safe
25
map_token (const value_valuestring *token_map, guint8 codepage, guint8 token) { const value_string *vs; const char *s; if (token_map) { /* Found map */ if ((vs = val_to_valstr (codepage, token_map))) { /* Found codepage map */ s = try_val_to_str (token, vs); if (s) { /* Found valid token */ DebugLog(("map_token(codepage = %u, token = %u: [%s]\n", codepage, token, s)); return s; } /* No valid token mapping in specified code page of token map */ DebugLog(("map_token(codepage = %u, token = %u: " wbxml_UNDEFINED_TOKEN "\n", codepage, token)); return wbxml_UNDEFINED_TOKEN; } /* There is no token map entry for the requested code page */ DebugLog(("map_token(codepage = %u, token = %u: " wbxml_UNDEFINED_TOKEN_CODE_PAGE "\n", codepage, token)); return wbxml_UNDEFINED_TOKEN_CODE_PAGE; } /* The token map does not exist */ DebugLog(("map_token(codepage = %u, token = %u: " wbxml_UNDEFINED_TOKEN_MAP "\n", codepage, token)); return wbxml_UNDEFINED_TOKEN_MAP; }
safe
26
void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) { struct hd_struct *part2 = NULL; unsigned long now = jiffies; unsigned int inflight[2]; int stats = 0; if (part->stamp != now) stats |= 1; if (part->partno) { part2 = &part_to_disk(part)->part0; if (part2->stamp != now) stats |= 2; } if (!stats) return; part_in_flight(q, part, inflight); if (stats & 2) part_round_stats_single(q, cpu, part2, now, inflight[1]); if (stats & 1) part_round_stats_single(q, cpu, part, now, inflight[0]); }
safe
27
struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo) { int ret; struct platform_device *pdev; pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { pdev->platform_dma_mask = pdevinfo->dma_mask; pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } ret = platform_device_add_resources(pdev, pdevinfo->res, pdevinfo->num_res); if (ret) goto err; ret = platform_device_add_data(pdev, pdevinfo->data, pdevinfo->size_data); if (ret) goto err; if (pdevinfo->properties) { ret = platform_device_add_properties(pdev, pdevinfo->properties); if (ret) goto err; } ret = platform_device_add(pdev); if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); platform_device_put(pdev); return ERR_PTR(ret); } return pdev; }
safe
28
get_next_address( address_node *addr ) { const char addr_prefix[] = "192.168.0."; static int curr_addr_num = 1; #define ADDR_LENGTH 16 + 1 /* room for 192.168.1.255 */ char addr_string[ADDR_LENGTH]; sockaddr_u *final_addr; struct addrinfo *ptr; int gai_err; final_addr = emalloc(sizeof(*final_addr)); if (addr->type == T_String) { snprintf(addr_string, sizeof(addr_string), "%s%d", addr_prefix, curr_addr_num++); printf("Selecting ip address %s for hostname %s\n", addr_string, addr->address); gai_err = getaddrinfo(addr_string, "ntp", NULL, &ptr); } else { gai_err = getaddrinfo(addr->address, "ntp", NULL, &ptr); } if (gai_err) { fprintf(stderr, "ERROR!! Could not get a new address\n"); exit(1); } memcpy(final_addr, ptr->ai_addr, ptr->ai_addrlen); fprintf(stderr, "Successful in setting ip address of simulated server to: %s\n", stoa(final_addr)); freeaddrinfo(ptr); return final_addr; }
safe
29
static void tracing_start_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; /* If global, we need to also start the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_start(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (tr->stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); tr->stop_count = 0; } goto out; } buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); }
safe
30
} EXPORT_SYMBOL_GPL(iscsi_unblock_session); static void __iscsi_block_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, block_work); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FAILED; spin_unlock_irqrestore(&session->lock, flags); scsi_target_block(&session->dev); ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); if (session->recovery_tmo >= 0) queue_delayed_work(iscsi_eh_timer_workq,
safe
31
delete_nsec(dns_db_t *db, dns_dbversion_t *ver, dns_dbnode_t *node, dns_name_t *name, dns_diff_t *diff) { dns_rdataset_t rdataset; isc_result_t result; dns_rdataset_init(&rdataset); result = dns_db_findrdataset(db, node, ver, dns_rdatatype_nsec, 0, 0, &rdataset, NULL); if (result == ISC_R_NOTFOUND) return (ISC_R_SUCCESS); if (result != ISC_R_SUCCESS) return (result); for (result = dns_rdataset_first(&rdataset); result == ISC_R_SUCCESS; result = dns_rdataset_next(&rdataset)) { dns_rdata_t rdata = DNS_RDATA_INIT; dns_rdataset_current(&rdataset, &rdata); CHECK(update_one_rr(db, ver, diff, DNS_DIFFOP_DEL, name, rdataset.ttl, &rdata)); } if (result == ISC_R_NOMORE) result = ISC_R_SUCCESS; failure: dns_rdataset_disassociate(&rdataset); return (result); }
safe
32
static bool checkreturn pb_dec_bytes(pb_istream_t *stream, const pb_field_t *field, void *dest) { uint32_t size; size_t alloc_size; pb_bytes_array_t *bdest; if (!pb_decode_varint32(stream, &size)) return false; alloc_size = PB_BYTES_ARRAY_T_ALLOCSIZE(size); if (size > alloc_size) PB_RETURN_ERROR(stream, "size too large"); if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) { #ifndef PB_ENABLE_MALLOC PB_RETURN_ERROR(stream, "no malloc support"); #else if (!allocate_field(stream, dest, alloc_size, 1)) return false; bdest = *(pb_bytes_array_t**)dest; #endif } else { if (alloc_size > field->data_size) PB_RETURN_ERROR(stream, "bytes overflow"); bdest = (pb_bytes_array_t*)dest; } bdest->size = size; return pb_read(stream, bdest->bytes, size); }
safe
33
aac_type_find_scan_loas_frames_ep (GstTypeFind * tf, DataScanCtx * scan_ctx, gint max_frames) { DataScanCtx c = *scan_ctx; guint16 snc; guint len; gint count = 0; do { if (!data_scan_ctx_ensure_data (tf, &c, 5)) break; /* EPAudioSyncStream */ len = ((c.data[2] & 0x0f) << 9) | (c.data[3] << 1) | ((c.data[4] & 0x80) >> 7); if (len == 0 || !data_scan_ctx_ensure_data (tf, &c, len + 2)) { GST_DEBUG ("Wrong sync or next frame not within reach, len=%u", len); break; } /* check length of frame */ snc = GST_READ_UINT16_BE (c.data + len); if (snc != 0x4de1) { GST_DEBUG ("No sync found at 0x%" G_GINT64_MODIFIER "x", c.offset + len); break; } ++count; GST_DEBUG ("Found LOAS syncword #%d at offset 0x%" G_GINT64_MODIFIER "x, " "framelen %u", count, c.offset, len); data_scan_ctx_advance (tf, &c, len); } while (count < max_frames && (c.offset - scan_ctx->offset) < 64 * 1024); GST_DEBUG ("found %d consecutive frames", count); return count; }
safe
34
_g_path_is_parent_of (const char *dirname, const char *filename) { int dirname_l, filename_l, separator_position; if ((dirname == NULL) || (filename == NULL)) return FALSE; dirname_l = strlen (dirname); filename_l = strlen (filename); if ((dirname_l == filename_l + 1) && (dirname[dirname_l - 1] == '/')) return FALSE; if ((filename_l == dirname_l + 1) && (filename[filename_l - 1] == '/')) return FALSE; if (dirname[dirname_l - 1] == '/') separator_position = dirname_l - 1; else separator_position = dirname_l; return ((filename_l > dirname_l) && (strncmp (dirname, filename, dirname_l) == 0) && (filename[separator_position] == '/')); }
safe
35
string_append_listele(uschar * list, uschar sep, const uschar * ele) { uschar * new = NULL; int sz = 0, off = 0; uschar * sp; if (list) { new = string_cat(new, &sz, &off, list, Ustrlen(list)); new = string_cat(new, &sz, &off, &sep, 1); } while((sp = Ustrchr(ele, sep))) { new = string_cat(new, &sz, &off, ele, sp-ele+1); new = string_cat(new, &sz, &off, &sep, 1); ele = sp+1; } new = string_cat(new, &sz, &off, ele, Ustrlen(ele)); new[off] = '\0'; return new; }
safe
36
xmlNewTextChild(xmlNodePtr parent, xmlNsPtr ns, const xmlChar *name, const xmlChar *content) { xmlNodePtr cur, prev; if (parent == NULL) { #ifdef DEBUG_TREE xmlGenericError(xmlGenericErrorContext, "xmlNewTextChild : parent == NULL\n"); #endif return(NULL); } if (name == NULL) { #ifdef DEBUG_TREE xmlGenericError(xmlGenericErrorContext, "xmlNewTextChild : name == NULL\n"); #endif return(NULL); } /* * Allocate a new node */ if (parent->type == XML_ELEMENT_NODE) { if (ns == NULL) cur = xmlNewDocRawNode(parent->doc, parent->ns, name, content); else cur = xmlNewDocRawNode(parent->doc, ns, name, content); } else if ((parent->type == XML_DOCUMENT_NODE) || (parent->type == XML_HTML_DOCUMENT_NODE)) { if (ns == NULL) cur = xmlNewDocRawNode((xmlDocPtr) parent, NULL, name, content); else cur = xmlNewDocRawNode((xmlDocPtr) parent, ns, name, content); } else if (parent->type == XML_DOCUMENT_FRAG_NODE) { cur = xmlNewDocRawNode( parent->doc, ns, name, content); } else { return(NULL); } if (cur == NULL) return(NULL); /* * add the new element at the end of the children list. */ cur->type = XML_ELEMENT_NODE; cur->parent = parent; cur->doc = parent->doc; if (parent->children == NULL) { parent->children = cur; parent->last = cur; } else { prev = parent->last; prev->next = cur; cur->prev = prev; parent->last = cur; } return(cur); }
safe
37
MODULE_ENTRY (fill_info) (GdkPixbufFormat *info) { static const GdkPixbufModulePattern signature[] = { { " \x1\x1", "x ", 100 }, { " \x1\x9", "x ", 100 }, { " \x2", "xz ", 99 }, /* only 99 since .CUR also matches this */ { " \x3", "xz ", 100 }, { " \xa", "xz ", 100 }, { " \xb", "xz ", 100 }, { NULL, NULL, 0 } }; static const gchar *mime_types[] = { "image/x-tga", NULL }; static const gchar *extensions[] = { "tga", "targa", NULL }; info->name = "tga"; info->signature = (GdkPixbufModulePattern *) signature; info->description = NC_("image format", "Targa"); info->mime_types = (gchar **) mime_types; info->extensions = (gchar **) extensions; info->flags = GDK_PIXBUF_FORMAT_THREADSAFE; info->license = "LGPL"; }
safe
38
longlong Field::convert_decimal2longlong(const my_decimal *val, bool unsigned_flag, int *err) { longlong i; if (unsigned_flag) { if (val->sign()) { set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); i= 0; *err= 1; } else if (warn_if_overflow(my_decimal2int((E_DEC_ERROR & ~E_DEC_OVERFLOW & ~E_DEC_TRUNCATED), val, TRUE, &i))) { i= ~(longlong) 0; *err= 1; } } else if (warn_if_overflow(my_decimal2int((E_DEC_ERROR & ~E_DEC_OVERFLOW & ~E_DEC_TRUNCATED), val, FALSE, &i))) { i= (val->sign() ? LONGLONG_MIN : LONGLONG_MAX); *err= 1; } return i; }
safe
39
main(int argc, char *argv[]) { oid objid[MAX_OID_LEN]; int objidlen = MAX_OID_LEN; int count; netsnmp_variable_list variable; netsnmp_init_mib(); if (argc < 2) print_subtree(stdout, tree_head, 0); variable.type = ASN_INTEGER; variable.val.integer = 3; variable.val_len = 4; for (argc--; argc; argc--, argv++) { objidlen = MAX_OID_LEN; printf("read_objid(%s) = %d\n", argv[1], read_objid(argv[1], objid, &objidlen)); for (count = 0; count < objidlen; count++) printf("%d.", objid[count]); printf("\n"); print_variable(objid, objidlen, &variable); } }
safe
40
int fpm_unix_resolve_socket_premissions(struct fpm_worker_pool_s *wp) /* {{{ */ { struct fpm_worker_pool_config_s *c = wp->config; /* uninitialized */ wp->socket_uid = -1; wp->socket_gid = -1; wp->socket_mode = 0660; if (!c) { return 0; } if (c->listen_owner && *c->listen_owner) { struct passwd *pwd; pwd = getpwnam(c->listen_owner); if (!pwd) { zlog(ZLOG_SYSERROR, "[pool %s] cannot get uid for user '%s'", wp->config->name, c->listen_owner); return -1; } wp->socket_uid = pwd->pw_uid; wp->socket_gid = pwd->pw_gid; } if (c->listen_group && *c->listen_group) { struct group *grp; grp = getgrnam(c->listen_group); if (!grp) { zlog(ZLOG_SYSERROR, "[pool %s] cannot get gid for group '%s'", wp->config->name, c->listen_group); return -1; } wp->socket_gid = grp->gr_gid; } if (c->listen_mode && *c->listen_mode) { wp->socket_mode = strtoul(c->listen_mode, 0, 8); } return 0; }
safe
41
ex_tabs(exarg_T *eap UNUSED) { tabpage_T *tp; win_T *wp; int tabcount = 1; msg_start(); msg_scroll = TRUE; for (tp = first_tabpage; tp != NULL && !got_int; tp = tp->tp_next) { msg_putchar('\n'); vim_snprintf((char *)IObuff, IOSIZE, _("Tab page %d"), tabcount++); msg_outtrans_attr(IObuff, HL_ATTR(HLF_T)); out_flush(); /* output one line at a time */ ui_breakcheck(); if (tp == curtab) wp = firstwin; else wp = tp->tp_firstwin; for ( ; wp != NULL && !got_int; wp = wp->w_next) { msg_putchar('\n'); msg_putchar(wp == curwin ? '>' : ' '); msg_putchar(' '); msg_putchar(bufIsChanged(wp->w_buffer) ? '+' : ' '); msg_putchar(' '); if (buf_spname(wp->w_buffer) != NULL) vim_strncpy(IObuff, buf_spname(wp->w_buffer), IOSIZE - 1); else home_replace(wp->w_buffer, wp->w_buffer->b_fname, IObuff, IOSIZE, TRUE); msg_outtrans(IObuff); out_flush(); /* output one line at a time */ ui_breakcheck(); } } }
safe
42
static __init void vmx_set_cpu_caps(void) { kvm_set_cpu_caps(); /* CPUID 0x1 */ if (nested) kvm_cpu_cap_set(X86_FEATURE_VMX); /* CPUID 0x7 */ if (kvm_mpx_supported()) kvm_cpu_cap_check_and_set(X86_FEATURE_MPX); if (!cpu_has_vmx_invpcid()) kvm_cpu_cap_clear(X86_FEATURE_INVPCID); if (vmx_pt_mode_is_host_guest()) kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); if (vmx_umip_emulated()) kvm_cpu_cap_set(X86_FEATURE_UMIP); /* CPUID 0xD.1 */ supported_xss = 0; if (!cpu_has_vmx_xsaves()) kvm_cpu_cap_clear(X86_FEATURE_XSAVES); /* CPUID 0x80000001 */ if (!cpu_has_vmx_rdtscp()) kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); }
safe
43
list_terms() { int i; char *line_buffer = gp_alloc(BUFSIZ, "list_terms"); int sort_idxs[TERMCOUNT]; /* sort terminal types alphabetically */ for( i = 0; i < TERMCOUNT; i++ ) sort_idxs[i] = i; qsort( sort_idxs, TERMCOUNT, sizeof(int), termcomp ); /* now sort_idxs[] contains the sorted indices */ StartOutput(); strcpy(line_buffer, "\nAvailable terminal types:\n"); OutLine(line_buffer); for (i = 0; i < TERMCOUNT; i++) { sprintf(line_buffer, " %15s %s\n", term_tbl[sort_idxs[i]].name, term_tbl[sort_idxs[i]].description); OutLine(line_buffer); } EndOutput(); free(line_buffer); }
safe
44
PHP_METHOD(HttpParams, offsetGet) { char *name_str; int name_len; zval **zparam, *zparams; if (SUCCESS != zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name_str, &name_len)) { return; } zparams = php_http_ztyp(IS_ARRAY, zend_read_property(php_http_params_class_entry, getThis(), ZEND_STRL("params"), 0 TSRMLS_CC)); if (SUCCESS == zend_symtable_find(Z_ARRVAL_P(zparams), name_str, name_len + 1, (void *) &zparam)) { RETVAL_ZVAL(*zparam, 1, 0); } zval_ptr_dtor(&zparams); }
safe
45
void TCDeleteDeviceObject (PDEVICE_OBJECT DeviceObject, PEXTENSION Extension) { UNICODE_STRING Win32NameString; NTSTATUS ntStatus; Dump ("TCDeleteDeviceObject BEGIN\n"); if (Extension->bRootDevice) { RtlInitUnicodeString (&Win32NameString, (LPWSTR) DOS_ROOT_PREFIX); ntStatus = IoDeleteSymbolicLink (&Win32NameString); if (!NT_SUCCESS (ntStatus)) Dump ("IoDeleteSymbolicLink failed ntStatus = 0x%08x\n", ntStatus); RootDeviceObject = NULL; } else { if (Extension->peThread != NULL) TCStopVolumeThread (DeviceObject, Extension); if (Extension->UserSid) TCfree (Extension->UserSid); if (Extension->SecurityClientContextValid) { if (OsMajorVersion == 5 && OsMinorVersion == 0) { ObDereferenceObject (Extension->SecurityClientContext.ClientToken); } else { // Windows 2000 does not support PsDereferenceImpersonationToken() used by SeDeleteClientSecurity(). // TODO: Use only SeDeleteClientSecurity() once support for Windows 2000 is dropped. VOID (*PsDereferenceImpersonationTokenD) (PACCESS_TOKEN ImpersonationToken); UNICODE_STRING name; RtlInitUnicodeString (&name, L"PsDereferenceImpersonationToken"); PsDereferenceImpersonationTokenD = MmGetSystemRoutineAddress (&name); if (!PsDereferenceImpersonationTokenD) TC_BUG_CHECK (STATUS_NOT_IMPLEMENTED); # define PsDereferencePrimaryToken # define PsDereferenceImpersonationToken PsDereferenceImpersonationTokenD SeDeleteClientSecurity (&Extension->SecurityClientContext); # undef PsDereferencePrimaryToken # undef PsDereferenceImpersonationToken } } VirtualVolumeDeviceObjects[Extension->nDosDriveNo] = NULL; } IoDeleteDevice (DeviceObject); Dump ("TCDeleteDeviceObject END\n"); }
safe
46
static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx, se_idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_disable_se(dev, se_idx); nfc_put_device(dev); return rc; }
safe
47
return err; } static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->logout_flashnode) { err = -ENOSYS; goto exit_logout_fnode; } shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } idx = ev->u.logout_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost);
safe
48
struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (unlikely(IS_ERR(filename))) return ERR_CAST(filename); file = path_openat(-1, filename, &nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(-1, filename, &nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(-1, filename, &nd, op, flags | LOOKUP_REVAL); putname(filename); return file; }
safe
49
string_endswith(PyStringObject *self, PyObject *args) { Py_ssize_t start = 0; Py_ssize_t end = PY_SSIZE_T_MAX; PyObject *subobj; int result; if (!stringlib_parse_args_finds("endswith", args, &subobj, &start, &end)) return NULL; if (PyTuple_Check(subobj)) { Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(subobj); i++) { result = _string_tailmatch(self, PyTuple_GET_ITEM(subobj, i), start, end, +1); if (result == -1) return NULL; else if (result) { Py_RETURN_TRUE; } } Py_RETURN_FALSE; } result = _string_tailmatch(self, subobj, start, end, +1); if (result == -1) { if (PyErr_ExceptionMatches(PyExc_TypeError)) PyErr_Format(PyExc_TypeError, "endswith first arg must be str, " "unicode, or tuple, not %s", Py_TYPE(subobj)->tp_name); return NULL; } else return PyBool_FromLong(result); }
safe
50
static void test_conditional_updates() { json_t *object, *other; object = json_pack("{sisi}", "foo", 1, "bar", 2); other = json_pack("{sisi}", "foo", 3, "baz", 4); if(json_object_update_existing(object, other)) fail("json_object_update_existing failed"); if(json_object_size(object) != 2) fail("json_object_update_existing added new items"); if(json_integer_value(json_object_get(object, "foo")) != 3) fail("json_object_update_existing failed to update existing key"); if(json_integer_value(json_object_get(object, "bar")) != 2) fail("json_object_update_existing updated wrong key"); json_decref(object); object = json_pack("{sisi}", "foo", 1, "bar", 2); if(json_object_update_missing(object, other)) fail("json_object_update_missing failed"); if(json_object_size(object) != 3) fail("json_object_update_missing didn't add new items"); if(json_integer_value(json_object_get(object, "foo")) != 1) fail("json_object_update_missing updated existing key"); if(json_integer_value(json_object_get(object, "bar")) != 2) fail("json_object_update_missing updated wrong key"); if(json_integer_value(json_object_get(object, "baz")) != 4) fail("json_object_update_missing didn't add new items"); json_decref(object); json_decref(other); }
safe
51
b64flush(bs, outp) struct b64state *bs; u_char *outp; { int outlen = 0; if (bs->bs_offs == 8) { *outp++ = base64[(bs->bs_bits >> 2) & 0x3F]; *outp++ = base64[(bs->bs_bits << 4) & 0x3F]; outlen = 2; } else if (bs->bs_offs == 16) { *outp++ = base64[(bs->bs_bits >> 10) & 0x3F]; *outp++ = base64[(bs->bs_bits >> 4) & 0x3F]; *outp++ = base64[(bs->bs_bits << 2) & 0x3F]; outlen = 3; } bs->bs_offs = 0; bs->bs_bits = 0; return (outlen); }
safe
52
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; gfn_t gfn; kvm->arch.indirect_shadow_pages++; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); /* the non-leaf shadow pages are keeping readonly. */ if (sp->role.level > PG_LEVEL_4K) return kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); kvm_mmu_gfn_disallow_lpage(slot, gfn); }
safe
53
decode_NXAST_RAW_LEARN2(const struct nx_action_learn2 *nal, enum ofp_version ofp_version OVS_UNUSED, const struct vl_mff_map *vl_mff_map, uint64_t *tlv_bitmap, struct ofpbuf *ofpacts) { struct ofpbuf b = ofpbuf_const_initializer(nal, ntohs(nal->up.len)); struct ofpact_learn *learn; enum ofperr error; if (nal->pad2) { return OFPERR_NXBAC_MUST_BE_ZERO; } learn = ofpact_put_LEARN(ofpacts); error = decode_LEARN_common(&nal->up, NXAST_RAW_LEARN2, learn); if (error) { return error; } learn->limit = ntohl(nal->limit); if (learn->flags & ~(NX_LEARN_F_SEND_FLOW_REM | NX_LEARN_F_DELETE_LEARNED | NX_LEARN_F_WRITE_RESULT)) { return OFPERR_OFPBAC_BAD_ARGUMENT; } ofpbuf_pull(&b, sizeof *nal); if (learn->flags & NX_LEARN_F_WRITE_RESULT) { error = nx_pull_header(&b, vl_mff_map, &learn->result_dst.field, NULL); if (error) { return error; } if (!learn->result_dst.field->writable) { return OFPERR_OFPBAC_BAD_SET_ARGUMENT; } learn->result_dst.ofs = ntohs(nal->result_dst_ofs); learn->result_dst.n_bits = 1; } else if (nal->result_dst_ofs) { return OFPERR_OFPBAC_BAD_ARGUMENT; } return decode_LEARN_specs(b.data, (char *) nal + ntohs(nal->up.len), vl_mff_map, tlv_bitmap, ofpacts); }
safe
54
CMS_ContentInfo *CMS_encrypt(STACK_OF(X509) *certs, BIO *data, const EVP_CIPHER *cipher, unsigned int flags) { CMS_ContentInfo *cms; int i; X509 *recip; cms = CMS_EnvelopedData_create(cipher); if (!cms) goto merr; for (i = 0; i < sk_X509_num(certs); i++) { recip = sk_X509_value(certs, i); if (!CMS_add1_recipient_cert(cms, recip, flags)) { CMSerr(CMS_F_CMS_ENCRYPT, CMS_R_RECIPIENT_ERROR); goto err; } } if(!(flags & CMS_DETACHED)) CMS_set_detached(cms, 0); if ((flags & (CMS_STREAM|CMS_PARTIAL)) || CMS_final(cms, data, NULL, flags)) return cms; else goto err; merr: CMSerr(CMS_F_CMS_ENCRYPT, ERR_R_MALLOC_FAILURE); err: if (cms) CMS_ContentInfo_free(cms); return NULL; }
safe
55
flatpak_remote_state_lookup_cache (FlatpakRemoteState *self, const char *ref, guint64 *download_size, guint64 *installed_size, const char **metadata, GError **error) { g_autoptr(GVariant) cache_v = NULL; g_autoptr(GVariant) cache = NULL; g_autoptr(GVariant) res = NULL; g_autoptr(GVariant) refdata = NULL; int pos; if (!flatpak_remote_state_ensure_metadata (self, error)) return FALSE; cache_v = g_variant_lookup_value (self->metadata, "xa.cache", NULL); if (cache_v == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, _("No flatpak cache in remote '%s' summary"), self->remote_name); return FALSE; } cache = g_variant_get_child_value (cache_v, 0); if (!flatpak_variant_bsearch_str (cache, ref, &pos)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, _("No entry for %s in remote '%s' summary flatpak cache "), ref, self->remote_name); return FALSE; } refdata = g_variant_get_child_value (cache, pos); res = g_variant_get_child_value (refdata, 1); if (installed_size) { guint64 v; g_variant_get_child (res, 0, "t", &v); *installed_size = GUINT64_FROM_BE (v); } if (download_size) { guint64 v; g_variant_get_child (res, 1, "t", &v); *download_size = GUINT64_FROM_BE (v); } if (metadata) g_variant_get_child (res, 2, "&s", metadata); return TRUE; }
safe
56
void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartrow; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartrow = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartrow : (numrows - hstartrow); m = numrows - hstartrow; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartrow * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } }
safe
57
_PRIVATE_ int ldb_register_extended_match_rules(struct ldb_context *ldb) { struct ldb_extended_match_rule *bitmask_and; struct ldb_extended_match_rule *bitmask_or; struct ldb_extended_match_rule *always_false; int ret; /* Register bitmask-and match */ bitmask_and = talloc_zero(ldb, struct ldb_extended_match_rule); if (bitmask_and == NULL) { return LDB_ERR_OPERATIONS_ERROR; } bitmask_and->oid = LDB_OID_COMPARATOR_AND; bitmask_and->callback = ldb_match_bitmask; ret = ldb_register_extended_match_rule(ldb, bitmask_and); if (ret != LDB_SUCCESS) { return ret; } /* Register bitmask-or match */ bitmask_or = talloc_zero(ldb, struct ldb_extended_match_rule); if (bitmask_or == NULL) { return LDB_ERR_OPERATIONS_ERROR; } bitmask_or->oid = LDB_OID_COMPARATOR_OR; bitmask_or->callback = ldb_match_bitmask; ret = ldb_register_extended_match_rule(ldb, bitmask_or); if (ret != LDB_SUCCESS) { return ret; } /* Register always-false match */ always_false = talloc_zero(ldb, struct ldb_extended_match_rule); if (always_false == NULL) { return LDB_ERR_OPERATIONS_ERROR; } always_false->oid = SAMBA_LDAP_MATCH_ALWAYS_FALSE; always_false->callback = ldb_comparator_false; ret = ldb_register_extended_match_rule(ldb, always_false); if (ret != LDB_SUCCESS) { return ret; } return LDB_SUCCESS; }
safe
58
void SSL_copy_session_id(SSL *t, const SSL *f) { CERT *tmp; /* Do we need to to SSL locking? */ SSL_set_session(t, SSL_get_session(f)); /* * what if we are setup as SSLv2 but want to talk SSLv3 or vice-versa */ if (t->method != f->method) { t->method->ssl_free(t); /* cleanup current */ t->method = f->method; /* change method */ t->method->ssl_new(t); /* setup new */ } tmp = t->cert; if (f->cert != NULL) { CRYPTO_add(&f->cert->references, 1, CRYPTO_LOCK_SSL_CERT); t->cert = f->cert; } else t->cert = NULL; if (tmp != NULL) ssl_cert_free(tmp); SSL_set_session_id_context(t, f->sid_ctx, f->sid_ctx_length); }
safe
59
cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) { enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; thd->count_cuted_fields= CHECK_FIELD_IGNORE; MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); bool result= 0; for (store_key **copy=ref->key_copy ; *copy ; copy++) { if ((*copy)->copy() & 1) { result= 1; break; } } thd->count_cuted_fields= save_count_cuted_fields; dbug_tmp_restore_column_map(&table->write_set, old_map); return result; }
safe
60
int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) { struct compat_timespec __user *ctv; int err; struct timespec ts; if (COMPAT_USE_64BIT_TIME) return sock_get_timestampns (sk, userstamp); ctv = (struct compat_timespec __user *) userstamp; err = -ENOENT; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk, SOCK_TIMESTAMP); ts = ktime_to_timespec(sk->sk_stamp); if (ts.tv_sec == -1) return err; if (ts.tv_sec == 0) { sk->sk_stamp = ktime_get_real(); ts = ktime_to_timespec(sk->sk_stamp); } err = 0; if (put_user(ts.tv_sec, &ctv->tv_sec) || put_user(ts.tv_nsec, &ctv->tv_nsec)) err = -EFAULT; return err; }
safe
61
ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) { struct ath6kl_urb_context *urb_context = NULL; unsigned long flags; /* bail if this pipe is not initialized */ if (!pipe->ar_usb) return NULL; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = list_first_entry(&pipe->urb_list_head, struct ath6kl_urb_context, link); list_del(&urb_context->link); pipe->urb_cnt--; } spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); return urb_context; }
safe
62
static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; }
safe
63
mysql_ssl_set(MYSQL *mysql __attribute__((unused)) , const char *key __attribute__((unused)), const char *cert __attribute__((unused)), const char *ca __attribute__((unused)), const char *capath __attribute__((unused)), const char *cipher __attribute__((unused))) { DBUG_ENTER("mysql_ssl_set"); #if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) my_free(mysql->options.ssl_key); my_free(mysql->options.ssl_cert); my_free(mysql->options.ssl_ca); my_free(mysql->options.ssl_capath); my_free(mysql->options.ssl_cipher); mysql->options.ssl_key= strdup_if_not_null(key); mysql->options.ssl_cert= strdup_if_not_null(cert); mysql->options.ssl_ca= strdup_if_not_null(ca); mysql->options.ssl_capath= strdup_if_not_null(capath); mysql->options.ssl_cipher= strdup_if_not_null(cipher); mysql->options.use_ssl= TRUE; #endif /* HAVE_OPENSSL && !EMBEDDED_LIBRARY */ DBUG_RETURN(0); }
safe
64
static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction) { FDrive *cur_drv; SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); cur_drv = get_cur_drv(fdctrl); if (fdctrl->fifo[2] + cur_drv->track >= cur_drv->max_track) { fd_seek(cur_drv, cur_drv->head, cur_drv->max_track - 1, cur_drv->sect, 1); } else { fd_seek(cur_drv, cur_drv->head, cur_drv->track + fdctrl->fifo[2], cur_drv->sect, 1); } fdctrl_to_command_phase(fdctrl); /* Raise Interrupt */ fdctrl->status0 |= FD_SR0_SEEK; fdctrl_raise_irq(fdctrl); }
safe
65
mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly) { GenericParamTableEntry *entry; /* * The GenericParam table must be sorted according to the `owner' field. * We need to do this sorting prior to writing the GenericParamConstraint * table, since we have to use the final GenericParam table indices there * and they must also be sorted. */ entry = g_new0 (GenericParamTableEntry, 1); entry->owner = owner; /* FIXME: track where gen_params should be freed and remove the GC root as well */ MONO_GC_REGISTER_ROOT_IF_MOVING (entry->gparam); entry->gparam = gparam; g_ptr_array_add (assembly->gen_params, entry); }
safe
66
TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersRejected) { // Default header (and trailer) count limit is 100. config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); Http::TestRequestTrailerMapImpl request_trailers; for (int i = 0; i < 150; i++) { // TODO(alyssawilk) QUIC fails without the trailers being distinct because // the checks are done before transformation. Either make the transformation // use commas, or do QUIC checks before and after. request_trailers.addCopy(absl::StrCat("trailer", i), std::string(1, 'a')); } initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto encoder_decoder = codec_client_->startRequest(default_request_headers_); request_encoder_ = &encoder_decoder.first; auto response = std::move(encoder_decoder.second); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); EXPECT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); codec_client_->sendData(*request_encoder_, 1, false); codec_client_->sendTrailers(*request_encoder_, request_trailers); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("431", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->waitForReset()); codec_client_->close(); } }
safe
67
static int sd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; void __user *p = (void __user *)arg; int error; SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " "cmd=0x%x\n", disk->disk_name, cmd)); error = scsi_verify_blk_ioctl(bdev, cmd); if (error < 0) return error; /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ error = scsi_nonblockable_ioctl(sdp, cmd, p, (mode & FMODE_NDELAY) != 0); if (!scsi_block_when_processing_errors(sdp) || !error) goto out; /* * Send SCSI addressing ioctls directly to mid level, send other * ioctls to block level and then onto mid level if they can't be * resolved. */ switch (cmd) { case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: error = scsi_ioctl(sdp, cmd, p); break; default: error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); if (error != -ENOTTY) break; error = scsi_ioctl(sdp, cmd, p); break; } out: return error; }
safe
68
PJ_DEF(unsigned) pjmedia_sdp_attr_remove_all(unsigned *count, pjmedia_sdp_attr *attr_array[], const char *name) { unsigned i, removed = 0; pj_str_t attr_name; PJ_ASSERT_RETURN(count && attr_array && name, PJ_EINVAL); PJ_ASSERT_RETURN(*count <= PJMEDIA_MAX_SDP_ATTR, PJ_ETOOMANY); attr_name.ptr = (char*)name; attr_name.slen = pj_ansi_strlen(name); for (i=0; i<*count; ) { if (pj_strcmp(&attr_array[i]->name, &attr_name)==0) { pj_array_erase(attr_array, sizeof(pjmedia_sdp_attr*), *count, i); --(*count); ++removed; } else { ++i; } } return removed; }
safe
69
static void aesni_gcm_dec_avx2(void *ctx, u8 *out, const u8 *in, unsigned long ciphertext_len, u8 *iv, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } else { aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad, aad_len, auth_tag, auth_tag_len); } }
safe
70
static void xgmac_enet_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); XgmacState *s = XGMAC(dev); memory_region_init_io(&s->iomem, OBJECT(s), &enet_mem_ops, s, "xgmac", 0x1000); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->sbd_irq); sysbus_init_irq(sbd, &s->pmt_irq); sysbus_init_irq(sbd, &s->mci_irq); qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_xgmac_enet_info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); s->regs[XGMAC_ADDR_HIGH(0)] = (s->conf.macaddr.a[5] << 8) | s->conf.macaddr.a[4]; s->regs[XGMAC_ADDR_LOW(0)] = (s->conf.macaddr.a[3] << 24) | (s->conf.macaddr.a[2] << 16) | (s->conf.macaddr.a[1] << 8) | s->conf.macaddr.a[0]; }
safe
71
void FoFiTrueType::cvtCharStrings(char **encoding, int *codeToGID, FoFiOutputFunc outputFunc, void *outputStream) { char *name; GooString *buf; char buf2[16]; int i, k; // always define '.notdef' (*outputFunc)(outputStream, "/CharStrings 256 dict dup begin\n", 32); (*outputFunc)(outputStream, "/.notdef 0 def\n", 15); // if there's no 'cmap' table, punt if (nCmaps == 0) { goto err; } // map char name to glyph index: // 1. use encoding to map name to char code // 2. use codeToGID to map char code to glyph index // N.B. We do this in reverse order because font subsets can have // weird encodings that use the same character name twice, and // the first definition is probably the one we want. k = 0; // make gcc happy for (i = 255; i >= 0; --i) { if (encoding) { name = encoding[i]; } else { sprintf(buf2, "c%02x", i); name = buf2; } if (name && strcmp(name, ".notdef")) { k = codeToGID[i]; // note: Distiller (maybe Adobe's PS interpreter in general) // doesn't like TrueType fonts that have CharStrings entries // which point to nonexistent glyphs, hence the (k < nGlyphs) // test if (k > 0 && k < nGlyphs) { (*outputFunc)(outputStream, "/", 1); (*outputFunc)(outputStream, name, strlen(name)); buf = GooString::format(" {0:d} def\n", k); (*outputFunc)(outputStream, buf->getCString(), buf->getLength()); delete buf; } } } err: (*outputFunc)(outputStream, "end readonly def\n", 17); }
safe
72
static int mxf_read_track(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset) { MXFTrack *track = arg; switch(tag) { case 0x4801: track->track_id = avio_rb32(pb); break; case 0x4804: avio_read(pb, track->track_number, 4); break; case 0x4802: mxf_read_utf16be_string(pb, size, &track->name); break; case 0x4b01: track->edit_rate.num = avio_rb32(pb); track->edit_rate.den = avio_rb32(pb); break; case 0x4803: avio_read(pb, track->sequence_ref, 16); break; } return 0; }
safe
73
int modbus_mask_write_register(modbus_t *ctx, int addr, uint16_t and_mask, uint16_t or_mask) { int rc; int req_length; /* The request length can not exceed _MIN_REQ_LENGTH - 2 and 4 bytes to * store the masks. The ugly substraction is there to remove the 'nb' value * (2 bytes) which is not used. */ uint8_t req[_MIN_REQ_LENGTH + 2]; req_length = ctx->backend->build_request_basis(ctx, MODBUS_FC_MASK_WRITE_REGISTER, addr, 0, req); /* HACKISH, count is not used */ req_length -= 2; req[req_length++] = and_mask >> 8; req[req_length++] = and_mask & 0x00ff; req[req_length++] = or_mask >> 8; req[req_length++] = or_mask & 0x00ff; rc = send_msg(ctx, req, req_length); if (rc > 0) { /* Used by write_bit and write_register */ uint8_t rsp[MAX_MESSAGE_LENGTH]; rc = _modbus_receive_msg(ctx, rsp, MSG_CONFIRMATION); if (rc == -1) return -1; rc = check_confirmation(ctx, req, rsp, rc); } return rc; }
safe
74
int orangefs_init_acl(struct inode *inode, struct inode *dir) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct posix_acl *default_acl, *acl; umode_t mode = inode->i_mode; int error = 0; ClearModeFlag(orangefs_inode); error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) return error; if (default_acl) { error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } /* If mode of the inode was changed, then do a forcible ->setattr */ if (mode != inode->i_mode) { SetModeFlag(orangefs_inode); inode->i_mode = mode; orangefs_flush_inode(inode); } return error; }
safe
75
lys_is_disabled(const struct lys_node *node, int recursive) { FUN_IN; int i; if (!node) { return NULL; } check: if (node->nodetype != LYS_INPUT && node->nodetype != LYS_OUTPUT) { /* input/output does not have if-feature, so skip them */ /* check local if-features */ for (i = 0; i < node->iffeature_size; i++) { if (!resolve_iffeature(&node->iffeature[i])) { return node; } } } if (!recursive) { return NULL; } /* go through parents */ if (node->nodetype == LYS_AUGMENT) { /* go to parent actually means go to the target node */ node = ((struct lys_node_augment *)node)->target; if (!node) { /* unresolved augment, let's say it's enabled */ return NULL; } } else if (node->nodetype == LYS_EXT) { return NULL; } else if (node->parent) { node = node->parent; } else { return NULL; } if (recursive == 2) { /* continue only if the node cannot have a data instance */ if (node->nodetype & (LYS_CONTAINER | LYS_LEAF | LYS_LEAFLIST | LYS_LIST)) { return NULL; } } goto check; }
safe
76
static CURLMcode add_next_timeout(struct timeval now, struct Curl_multi *multi, struct Curl_easy *d) { struct timeval *tv = &d->state.expiretime; struct curl_llist *list = d->state.timeoutlist; struct curl_llist_element *e; /* move over the timeout list for this specific handle and remove all timeouts that are now passed tense and store the next pending timeout in *tv */ for(e = list->head; e;) { struct curl_llist_element *n = e->next; long diff = curlx_tvdiff(*(struct timeval *)e->ptr, now); if(diff <= 0) /* remove outdated entry */ Curl_llist_remove(list, e, NULL); else /* the list is sorted so get out on the first mismatch */ break; e = n; } e = list->head; if(!e) { /* clear the expire times within the handles that we remove from the splay tree */ tv->tv_sec = 0; tv->tv_usec = 0; } else { /* copy the first entry to 'tv' */ memcpy(tv, e->ptr, sizeof(*tv)); /* remove first entry from list */ Curl_llist_remove(list, e, NULL); /* insert this node again into the splay */ multi->timetree = Curl_splayinsert(*tv, multi->timetree, &d->state.timenode); } return CURLM_OK; }
safe
77
static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len) { unsigned int shift, buflen = buf->len - buf->head->iov_len; WARN_ON_ONCE(len > buf->page_len); if (buf->head->iov_len >= buf->len || len > buflen) buflen = len; if (buf->page_len > buflen) { buf->buflen -= buf->page_len - buflen; buf->page_len = buflen; } if (len >= buf->page_len) return 0; shift = buf->page_len - len; xdr_buf_try_expand(buf, shift); xdr_buf_pages_shift_right(buf, len, buflen - len, shift); buf->page_len = len; buf->len -= shift; buf->buflen -= shift; return shift; }
safe
78
cmsSEQ* CMSEXPORT cmsAllocProfileSequenceDescription(cmsContext ContextID, cmsUInt32Number n) { cmsSEQ* Seq; cmsUInt32Number i; if (n == 0) return NULL; // In a absolutely arbitrary way, I hereby decide to allow a maxim of 255 profiles linked // in a devicelink. It makes not sense anyway and may be used for exploits, so let's close the door! if (n > 255) return NULL; Seq = (cmsSEQ*) _cmsMallocZero(ContextID, sizeof(cmsSEQ)); if (Seq == NULL) return NULL; Seq -> ContextID = ContextID; Seq -> seq = (cmsPSEQDESC*) _cmsCalloc(ContextID, n, sizeof(cmsPSEQDESC)); Seq -> n = n; if (Seq -> seq == NULL) { _cmsFree(ContextID, Seq); return NULL; } for (i=0; i < n; i++) { Seq -> seq[i].Manufacturer = NULL; Seq -> seq[i].Model = NULL; Seq -> seq[i].Description = NULL; } return Seq; }
safe
79
static PHP_NAMED_FUNCTION(zif_zip_read) { zval *zip_dp; zip_read_rsrc *zr_rsrc; int ret; zip_rsrc *rsrc_int; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zip_dp) == FAILURE) { return; } if ((rsrc_int = (zip_rsrc *)zend_fetch_resource(Z_RES_P(zip_dp), le_zip_dir_name, le_zip_dir)) == NULL) { RETURN_FALSE; } if (rsrc_int && rsrc_int->za) { if (rsrc_int->index_current >= rsrc_int->num_files) { RETURN_FALSE; } zr_rsrc = emalloc(sizeof(zip_read_rsrc)); ret = zip_stat_index(rsrc_int->za, rsrc_int->index_current, 0, &zr_rsrc->sb); if (ret != 0) { efree(zr_rsrc); RETURN_FALSE; } zr_rsrc->zf = zip_fopen_index(rsrc_int->za, rsrc_int->index_current, 0); if (zr_rsrc->zf) { rsrc_int->index_current++; RETURN_RES(zend_register_resource(zr_rsrc, le_zip_entry)); } else { efree(zr_rsrc); RETURN_FALSE; } } else { RETURN_FALSE; } }
safe
80
START_TEST(ratelimit_helpers) { struct ratelimit rl; unsigned int i, j; /* 10 attempts every 1000ms */ ratelimit_init(&rl, ms2us(1000), 10); for (j = 0; j < 3; ++j) { /* a burst of 9 attempts must succeed */ for (i = 0; i < 9; ++i) { ck_assert_int_eq(ratelimit_test(&rl), RATELIMIT_PASS); } /* the 10th attempt reaches the threshold */ ck_assert_int_eq(ratelimit_test(&rl), RATELIMIT_THRESHOLD); /* ..then further attempts must fail.. */ ck_assert_int_eq(ratelimit_test(&rl), RATELIMIT_EXCEEDED); /* ..regardless of how often we try. */ for (i = 0; i < 100; ++i) { ck_assert_int_eq(ratelimit_test(&rl), RATELIMIT_EXCEEDED); } /* ..even after waiting 20ms */ msleep(100); for (i = 0; i < 100; ++i) { ck_assert_int_eq(ratelimit_test(&rl), RATELIMIT_EXCEEDED); } /* but after 1000ms the counter is reset */ msleep(950); /* +50ms to account for time drifts */ } }
safe
81
static int compat_getdrvstat(int drive, bool poll, struct compat_floppy_drive_struct __user *arg) { struct compat_floppy_drive_struct v; memset(&v, 0, sizeof(struct compat_floppy_drive_struct)); mutex_lock(&floppy_mutex); if (poll) { if (lock_fdc(drive)) goto Eintr; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) goto Eintr; process_fd_request(); } v.spinup_date = UDRS->spinup_date; v.select_date = UDRS->select_date; v.first_read_date = UDRS->first_read_date; v.probed_format = UDRS->probed_format; v.track = UDRS->track; v.maxblock = UDRS->maxblock; v.maxtrack = UDRS->maxtrack; v.generation = UDRS->generation; v.keep_data = UDRS->keep_data; v.fd_ref = UDRS->fd_ref; v.fd_device = UDRS->fd_device; v.last_checked = UDRS->last_checked; v.dmabuf = (uintptr_t)UDRS->dmabuf; v.bufblocks = UDRS->bufblocks; mutex_unlock(&floppy_mutex); if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) return -EFAULT; return 0; Eintr: mutex_unlock(&floppy_mutex); return -EINTR; }
safe
82
long posix_cpu_nsleep_restart(struct restart_block *restart_block) { clockid_t which_clock = restart_block->arg0; struct timespec __user *rmtp; struct timespec t; struct itimerspec it; int error; rmtp = (struct timespec __user *) restart_block->arg1; t.tv_sec = restart_block->arg2; t.tv_nsec = restart_block->arg3; restart_block->fn = do_no_restart_syscall; error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); if (error == -ERESTART_RESTARTBLOCK) { /* * Report back to the user the time still remaining. */ if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) return -EFAULT; restart_block->fn = posix_cpu_nsleep_restart; restart_block->arg0 = which_clock; restart_block->arg1 = (unsigned long) rmtp; restart_block->arg2 = t.tv_sec; restart_block->arg3 = t.tv_nsec; } return error; }
safe
83
uint32 CSoundFile::CalculateXParam(PATTERNINDEX pat, ROWINDEX row, CHANNELINDEX chn, bool *isExtended) const { if(isExtended != nullptr) *isExtended = false; ROWINDEX maxCommands = 4; const ModCommand *m = Patterns[pat].GetpModCommand(row, chn); uint32 val = m->param; switch(m->command) { case CMD_OFFSET: // 24 bit command maxCommands = 2; break; case CMD_TEMPO: case CMD_PATTERNBREAK: case CMD_POSITIONJUMP: // 16 bit command maxCommands = 1; break; default: return val; } const bool xmTempoFix = m->command == CMD_TEMPO && GetType() == MOD_TYPE_XM; ROWINDEX numRows = std::min(Patterns[pat].GetNumRows() - row - 1, maxCommands); while(numRows > 0) { m += Patterns[pat].GetNumChannels(); if(m->command != CMD_XPARAM) { break; } if(xmTempoFix && val < 256) { // With XM, 0x20 is the lowest tempo. Anything below changes ticks per row. val -= 0x20; } val = (val << 8) | m->param; numRows--; if(isExtended != nullptr) *isExtended = true; } return val; }
safe
84
PJ_DEF(char*) pjsip_rx_data_get_info(pjsip_rx_data *rdata) { char obj_name[PJ_MAX_OBJ_NAME]; PJ_ASSERT_RETURN(rdata->msg_info.msg, "INVALID MSG"); if (rdata->msg_info.info) return rdata->msg_info.info; pj_ansi_strcpy(obj_name, "rdata"); pj_ansi_snprintf(obj_name+5, sizeof(obj_name)-5, "%p", rdata); rdata->msg_info.info = get_msg_info(rdata->tp_info.pool, obj_name, rdata->msg_info.msg); return rdata->msg_info.info; }
safe
85
static inline int user_regset_copyout_zero(unsigned int *pos, unsigned int *count, void **kbuf, void __user **ubuf, const int start_pos, const int end_pos) { if (*count == 0) return 0; BUG_ON(*pos < start_pos); if (end_pos < 0 || *pos < end_pos) { unsigned int copy = (end_pos < 0 ? *count : min(*count, end_pos - *pos)); if (*kbuf) { memset(*kbuf, 0, copy); *kbuf += copy; } else if (__clear_user(*ubuf, copy)) return -EFAULT; else *ubuf += copy; *pos += copy; *count -= copy; } return 0; }
safe
86
rb_str_buf_append(str, str2) VALUE str, str2; { long capa, len; rb_str_modify(str); if (FL_TEST(str, STR_ASSOC)) { FL_UNSET(str, STR_ASSOC); capa = RSTRING(str)->aux.capa = RSTRING(str)->len; } else { capa = RSTRING(str)->aux.capa; } len = RSTRING(str)->len+RSTRING(str2)->len; if (capa <= len) { while (len > capa) { capa = (capa + 1) * 2; } RESIZE_CAPA(str, capa); } memcpy(RSTRING(str)->ptr + RSTRING(str)->len, RSTRING(str2)->ptr, RSTRING(str2)->len); RSTRING(str)->len += RSTRING(str2)->len; RSTRING(str)->ptr[RSTRING(str)->len] = '\0'; /* sentinel */ OBJ_INFECT(str, str2); return str; }
safe
87
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: if (check_map_access_type(env, regno, reg->off, access_size, meta && meta->raw_mode ? BPF_WRITE : BPF_READ)) return -EACCES; return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MEM: return check_mem_region_access(env, regno, reg->off, access_size, reg->mem_size, zero_size_allowed); case PTR_TO_RDONLY_BUF: if (meta && meta->raw_mode) return -EACCES; return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, "rdonly", &env->prog->aux->max_rdonly_access); case PTR_TO_RDWR_BUF: return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, "rdwr", &env->prog->aux->max_rdwr_access); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } }
safe
88
static void *ffs_acquire_dev(const char *dev_name) { struct ffs_dev *ffs_dev; ENTER(); ffs_dev_lock(); ffs_dev = _ffs_find_dev(dev_name); if (!ffs_dev) ffs_dev = ERR_PTR(-ENOENT); else if (ffs_dev->mounted) ffs_dev = ERR_PTR(-EBUSY); else if (ffs_dev->ffs_acquire_dev_callback && ffs_dev->ffs_acquire_dev_callback(ffs_dev)) ffs_dev = ERR_PTR(-ENOENT); else ffs_dev->mounted = true; ffs_dev_unlock(); return ffs_dev; }
safe
89
static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, void __user *arg) { int ret; struct cdrom_subchnl q; u_char requested, back; if (copy_from_user(&q, (struct cdrom_subchnl __user *)arg, sizeof(q))) return -EFAULT; requested = q.cdsc_format; if (!((requested == CDROM_MSF) || (requested == CDROM_LBA))) return -EINVAL; ret = cdrom_read_subchannel(cdi, &q, 0); if (ret) return ret; back = q.cdsc_format; /* local copy */ sanitize_format(&q.cdsc_absaddr, &back, requested); sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); if (copy_to_user((struct cdrom_subchnl __user *)arg, &q, sizeof(q))) return -EFAULT; /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; }
safe
90
void AsyncConnection::DelayedDelivery::flush() { stop_dispatch = true; center->submit_to( center->get_id(), [this] () mutable { std::lock_guard<std::mutex> l(delay_lock); while (!delay_queue.empty()) { Message *m = delay_queue.front().second; if (msgr->ms_can_fast_dispatch(m)) { dispatch_queue->fast_dispatch(m); } else { dispatch_queue->enqueue(m, m->get_priority(), conn_id); } delay_queue.pop_front(); } for (auto i : register_time_events) center->delete_time_event(i); register_time_events.clear(); stop_dispatch = false; }, true); }
safe
91
void addrconf_dad_failure(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; if (addrconf_dad_end(ifp)) { in6_ifa_put(ifp); return; } net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n", ifp->idev->dev->name, &ifp->addr); if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) { struct in6_addr addr; addr.s6_addr32[0] = htonl(0xfe800000); addr.s6_addr32[1] = 0; if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && ipv6_addr_equal(&ifp->addr, &addr)) { /* DAD failed for link-local based on MAC address */ idev->cnf.disable_ipv6 = 1; pr_info("%s: IPv6 being disabled!\n", ifp->idev->dev->name); } } spin_lock_bh(&ifp->state_lock); /* transition from _POSTDAD to _ERRDAD */ ifp->state = INET6_IFADDR_STATE_ERRDAD; spin_unlock_bh(&ifp->state_lock); addrconf_mod_dad_work(ifp, 0); }
safe
92
static int buffer_want_with_caps(const git_remote_head *head, transport_smart_caps *caps, git_buf *buf) { git_buf str = GIT_BUF_INIT; char oid[GIT_OID_HEXSZ +1] = {0}; size_t len; /* Prefer multi_ack_detailed */ if (caps->multi_ack_detailed) git_buf_puts(&str, GIT_CAP_MULTI_ACK_DETAILED " "); else if (caps->multi_ack) git_buf_puts(&str, GIT_CAP_MULTI_ACK " "); /* Prefer side-band-64k if the server supports both */ if (caps->side_band_64k) git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND_64K); else if (caps->side_band) git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND); if (caps->include_tag) git_buf_puts(&str, GIT_CAP_INCLUDE_TAG " "); if (caps->thin_pack) git_buf_puts(&str, GIT_CAP_THIN_PACK " "); if (caps->ofs_delta) git_buf_puts(&str, GIT_CAP_OFS_DELTA " "); if (git_buf_oom(&str)) return -1; len = strlen("XXXXwant ") + GIT_OID_HEXSZ + 1 /* NUL */ + git_buf_len(&str) + 1 /* LF */; if (len > 0xffff) { giterr_set(GITERR_NET, "Tried to produce packet with invalid length %" PRIuZ, len); return -1; } git_buf_grow_by(buf, len); git_oid_fmt(oid, &head->oid); git_buf_printf(buf, "%04xwant %s %s\n", (unsigned int)len, oid, git_buf_cstr(&str)); git_buf_free(&str); GITERR_CHECK_ALLOC_BUF(buf); return 0; }
safe
93
template<typename tp, typename tf, typename tc, typename to> CImg<T>& draw_object3d(LibBoard::Board& board, const float x0, const float y0, const float z0, const CImg<tp>& vertices, const CImgList<tf>& primitives, const CImgList<tc>& colors, const CImg<to>& opacities, const unsigned int render_type=4, const bool is_double_sided=false, const float focale=700, const float lightx=0, const float lighty=0, const float lightz=-5e8, const float specular_lightness=0.2f, const float specular_shininess=0.1f, const float g_opacity=1) { return draw_object3d(board,x0,y0,z0,vertices,primitives,colors,opacities,render_type, is_double_sided,focale,lightx,lighty,lightz, specular_lightness,specular_shininess,g_opacity,CImg<floatT>::empty());
safe
94
GF_Err gf_isom_update_sample(GF_ISOFile *movie, u32 trackNumber, u32 sampleNumber, GF_ISOSample *sample, Bool data_only) { GF_Err e; GF_TrackBox *trak; e = CanAccessMovie(movie, GF_ISOM_OPEN_EDIT); if (e) return e; trak = gf_isom_get_track_from_file(movie, trackNumber); if (!trak) return GF_BAD_PARAM; e = unpack_track(trak); if (e) return e; //block for hint tracks if (trak->Media->handler->handlerType == GF_ISOM_MEDIA_HINT) return GF_BAD_PARAM; //REWRITE ANY OD STUFF if (trak->Media->handler->handlerType == GF_ISOM_MEDIA_OD) { GF_ISOSample *od_sample = NULL; e = Media_ParseODFrame(trak->Media, sample, &od_sample); if (!e) e = Media_UpdateSample(trak->Media, sampleNumber, od_sample, data_only); if (od_sample) gf_isom_sample_del(&od_sample); } else { e = Media_UpdateSample(trak->Media, sampleNumber, sample, data_only); } if (e) return e; if (!movie->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); gf_isom_disable_inplace_rewrite(movie); return GF_OK; }
safe
95
void fslib_duplicate(const char *full_path) { assert(full_path); struct stat s; if (stat(full_path, &s) != 0 || s.st_uid != 0 || access(full_path, R_OK)) return; char *dest_dir = build_dest_dir(full_path); // don't copy it if the file is already there char *ptr = strrchr(full_path, '/'); if (!ptr) return; ptr++; if (*ptr == '\0') return; char *name; if (asprintf(&name, "%s/%s", dest_dir, ptr) == -1) errExit("asprintf"); if (stat(name, &s) == 0) { free(name); return; } free(name); if (arg_debug || arg_debug_private_lib) printf(" copying %s to private %s\n", full_path, dest_dir); sbox_run(SBOX_ROOT| SBOX_SECCOMP, 4, PATH_FCOPY, "--follow-link", full_path, dest_dir); report_duplication(full_path); lib_cnt++; }
safe
96
static int sdp_parse_fmtp_config_h264(AVFormatContext *s, AVStream *stream, PayloadContext *h264_data, const char *attr, const char *value) { AVCodecParameters *par = stream->codecpar; if (!strcmp(attr, "packetization-mode")) { av_log(s, AV_LOG_DEBUG, "RTP Packetization Mode: %d\n", atoi(value)); h264_data->packetization_mode = atoi(value); /* * Packetization Mode: * 0 or not present: Single NAL mode (Only nals from 1-23 are allowed) * 1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed. * 2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), * and 29 (FU-B) are allowed. */ if (h264_data->packetization_mode > 1) av_log(s, AV_LOG_ERROR, "Interleaved RTP mode is not supported yet.\n"); } else if (!strcmp(attr, "profile-level-id")) { if (strlen(value) == 6) parse_profile_level_id(s, h264_data, value); } else if (!strcmp(attr, "sprop-parameter-sets")) { int ret; if (*value == 0 || value[strlen(value) - 1] == ',') { av_log(s, AV_LOG_WARNING, "Missing PPS in sprop-parameter-sets, ignoring\n"); return 0; } par->extradata_size = 0; av_freep(&par->extradata); ret = ff_h264_parse_sprop_parameter_sets(s, &par->extradata, &par->extradata_size, value); av_log(s, AV_LOG_DEBUG, "Extradata set to %p (size: %d)\n", par->extradata, par->extradata_size); return ret; } return 0; }
safe
97
nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla, u8 family, bool autoload) { const struct nft_chain_type *type; type = __nf_tables_chain_type_lookup(nla, family); if (type != NULL) return type; lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (autoload) { if (nft_request_module(net, "nft-chain-%u-%.*s", family, nla_len(nla), (const char *)nla_data(nla)) == -EAGAIN) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-ENOENT); }
safe
98
parsecreateopres(netdissect_options *ndo, const uint32_t *dp, int verbose) { int er; if (!(dp = parsestatus(ndo, dp, &er))) return (0); if (er) dp = parse_wcc_data(ndo, dp, verbose); else { ND_TCHECK(dp[0]); if (!EXTRACT_32BITS(&dp[0])) return (dp + 1); dp++; if (!(dp = parsefh(ndo, dp, 1))) return (0); if (verbose) { if (!(dp = parse_post_op_attr(ndo, dp, verbose))) return (0); if (ndo->ndo_vflag > 1) { ND_PRINT((ndo, " dir attr:")); dp = parse_wcc_data(ndo, dp, verbose); } } } return (dp); trunc: return (NULL); }
safe
99
End of preview. Expand in Data Studio

A minified, clean and annotated version of DiverseVul

Dataset Summary

This is a minified, clean and deduplicated version of the DiverseVul dataset.
We publish this version to help practionners in their code vulnerability detection research.

Data Structure & Overview

  • Number of samples: 23847
  • Features: func (the C/C++ code)cwe (the CWE weakness, see table below)
  • Supported Programming Languages: C/C++
  • Supported CWE Weaknesses:
    Label Description
    CWE-119 Improper Restriction of Operations within the Bounds of a Memory Buffer
    CWE-125 Out-of-bounds Read
    CWE-20 Improper Input Validation
    CWE-416 Use After Free
    CWE-703 Improper Check or Handling of Exceptional Conditions
    CWE-787 Out-of-bounds Write
    safe Safe code

Cleaning & Preprocessing

The original dataset contains 18,945 vulnerable functions spanning 150 CWEs and 330,492 non-vulnerable functions extracted from 7,514 commits, according to the original paper.

We constructed the minified version through the following steps: We cleaned the original dataset by removing null values and removing deduplicates (keeping the first occurance only). We then randomly sampled 19271 'safe' functions that have between 50 and 595 tokens (tokenized using ModernBERT-base's tokenizer, special tokens excluded). The majority of function have between 200 and 595, a small fraction that have between 50 and 200 tokens were randomly added for the sake of diversification. The safe label's token distribution is as follows:

2f3bbf96-e2af-46e6-9757-8b671d955e9e.png

To construct the unsafe labels, we only kept the six most frequent CWE weaknesses (see table above).
We also noticed that the unsafe functions are very long (token wise): The average token length was around 1200 and the median token length was arround 512.
We built this dataset to use it for fine-tuning ModernBERT-base. For compute reasons, we decided to only keep functions that have less than 595 tokens, just like the safe label. In case you want different token lengths, we provide the github repository to re construct the dataset. The unsafe labels have the following token distribution:

219c31b0-b352-48b2-9326-fa651a0ec964.png

Application

We publish this minified dataset to help researchers with low compute resources in their code vulnerability research projects.
This dataset was also used to fine tune ThreatDetect-C-Cpp.

More Details & Acknowledgements

You can find all the code on our Github Repository. We deeply thank the mainteners of the DiverseVul dataset.

Downloads last month
134

Models trained or fine-tuned on lemon42-ai/minified-diverseful-multilabels