Dataset Viewer
Auto-converted to Parquet
CVE ID
stringlengths
13
16
CVE Page
stringlengths
45
48
CWE ID
stringclasses
85 values
fixed_func
stringlengths
14
241k
func
stringlengths
14
241k
vul
int8
0
1
__index_level_0__
int64
0
151k
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2017-7586/
NOT_APPLICABLE
psf_get_date_str (char *str, int maxlen) { time_t current ; struct tm timedata, *tmptr ; time (&current) ; #if defined (HAVE_GMTIME_R) /* If the re-entrant version is available, use it. */ tmptr = gmtime_r (&current, &timedata) ; #elif defined (HAVE_GMTIME) /* Otherwise use the standard one and copy the data to local storage. */ tmptr = gmtime (&current) ; memcpy (&timedata, tmptr, sizeof (timedata)) ; #else tmptr = NULL ; #endif if (tmptr) snprintf (str, maxlen, "%4d-%02d-%02d %02d:%02d:%02d UTC", 1900 + timedata.tm_year, timedata.tm_mon, timedata.tm_mday, timedata.tm_hour, timedata.tm_min, timedata.tm_sec) ; else snprintf (str, maxlen, "Unknown date") ; return ; } /* psf_get_date_str */
psf_get_date_str (char *str, int maxlen) { time_t current ; struct tm timedata, *tmptr ; time (&current) ; #if defined (HAVE_GMTIME_R) /* If the re-entrant version is available, use it. */ tmptr = gmtime_r (&current, &timedata) ; #elif defined (HAVE_GMTIME) /* Otherwise use the standard one and copy the data to local storage. */ tmptr = gmtime (&current) ; memcpy (&timedata, tmptr, sizeof (timedata)) ; #else tmptr = NULL ; #endif if (tmptr) snprintf (str, maxlen, "%4d-%02d-%02d %02d:%02d:%02d UTC", 1900 + timedata.tm_year, timedata.tm_mon, timedata.tm_mday, timedata.tm_hour, timedata.tm_min, timedata.tm_sec) ; else snprintf (str, maxlen, "Unknown date") ; return ; } /* psf_get_date_str */
0
0
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2018-18352/
NOT_APPLICABLE
void MultibufferDataSource::CreateResourceLoader(int64_t first_byte_position, int64_t last_byte_position) { DCHECK(render_task_runner_->BelongsToCurrentThread()); SetReader(new MultiBufferReader( url_data()->multibuffer(), first_byte_position, last_byte_position, base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_))); reader_->SetIsClientAudioElement(is_client_audio_element_); UpdateBufferSizes(); }
void MultibufferDataSource::CreateResourceLoader(int64_t first_byte_position, int64_t last_byte_position) { DCHECK(render_task_runner_->BelongsToCurrentThread()); SetReader(new MultiBufferReader( url_data()->multibuffer(), first_byte_position, last_byte_position, base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_))); reader_->SetIsClientAudioElement(is_client_audio_element_); UpdateBufferSizes(); }
0
1
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2010-1166/
NOT_APPLICABLE
fbStore_a2r2g2b2 (FbBits *bits, const CARD32 *values, int x, int width, miIndexedPtr indexed) { int i; CARD8 *pixel = ((CARD8 *) bits) + x; for (i = 0; i < width; ++i) { Splita(READ(values + i)); WRITE(pixel++, ((a ) & 0xc0) | ((r >> 2) & 0x30) | ((g >> 4) & 0x0c) | ((b >> 6) )); } }
fbStore_a2r2g2b2 (FbBits *bits, const CARD32 *values, int x, int width, miIndexedPtr indexed) { int i; CARD8 *pixel = ((CARD8 *) bits) + x; for (i = 0; i < width; ++i) { Splita(READ(values + i)); WRITE(pixel++, ((a ) & 0xc0) | ((r >> 2) & 0x30) | ((g >> 4) & 0x0c) | ((b >> 6) )); } }
0
2
CVE-2017-11462
https://www.cvedetails.com/cve/CVE-2017-11462/
CWE-415
gss_get_mic (minor_status, context_handle, qop_req, message_buffer, msg_token) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_qop_t qop_req; gss_buffer_t message_buffer; gss_buffer_t msg_token; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_get_mic_args(minor_status, context_handle, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_get_mic) { status = mech->gss_get_mic( minor_status, ctx->internal_ctx_id, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
gss_get_mic (minor_status, context_handle, qop_req, message_buffer, msg_token) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_qop_t qop_req; gss_buffer_t message_buffer; gss_buffer_t msg_token; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_get_mic_args(minor_status, context_handle, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_get_mic) { status = mech->gss_get_mic( minor_status, ctx->internal_ctx_id, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
1
5
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2013-0910/
NOT_APPLICABLE
void PluginServiceImpl::RegisterFilePathWatcher(FilePathWatcher* watcher, const FilePath& path) { bool result = watcher->Watch(path, false, base::Bind(&NotifyPluginDirChanged)); DCHECK(result); }
void PluginServiceImpl::RegisterFilePathWatcher(FilePathWatcher* watcher, const FilePath& path) { bool result = watcher->Watch(path, false, base::Bind(&NotifyPluginDirChanged)); DCHECK(result); }
0
6
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2015-5195/
NOT_APPLICABLE
record_loop_stats( double offset, /* offset */ double freq, /* frequency (PPM) */ double jitter, /* jitter */ double wander, /* wander (PPM) */ int spoll ) { l_fp now; u_long day; if (!stats_control) return; get_systime(&now); filegen_setup(&loopstats, now.l_ui); day = now.l_ui / 86400 + MJD_1900; now.l_ui %= 86400; if (loopstats.fp != NULL) { fprintf(loopstats.fp, "%lu %s %.9f %.3f %.9f %.6f %d\n", day, ulfptoa(&now, 3), offset, freq * 1e6, jitter, wander * 1e6, spoll); fflush(loopstats.fp); } }
record_loop_stats( double offset, /* offset */ double freq, /* frequency (PPM) */ double jitter, /* jitter */ double wander, /* wander (PPM) */ int spoll ) { l_fp now; u_long day; if (!stats_control) return; get_systime(&now); filegen_setup(&loopstats, now.l_ui); day = now.l_ui / 86400 + MJD_1900; now.l_ui %= 86400; if (loopstats.fp != NULL) { fprintf(loopstats.fp, "%lu %s %.9f %.3f %.9f %.6f %d\n", day, ulfptoa(&now, 3), offset, freq * 1e6, jitter, wander * 1e6, spoll); fflush(loopstats.fp); } }
0
7
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2017-18222/
NOT_APPLICABLE
static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en) { u32 clr_vlue = 0xfffffffful; u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue); dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue); }
static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en) { u32 clr_vlue = 0xfffffffful; u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue); dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue); }
0
8
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2018-6077/
NOT_APPLICABLE
void BaseRenderingContext2D::clip(Path2D* dom_path, const String& winding_rule_string) { ClipInternal(dom_path->GetPath(), winding_rule_string); }
void BaseRenderingContext2D::clip(Path2D* dom_path, const String& winding_rule_string) { ClipInternal(dom_path->GetPath(), winding_rule_string); }
0
9
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2014-6269/
NOT_APPLICABLE
int http_process_request(struct session *s, struct channel *req, int an_bit) { struct http_txn *txn = &s->txn; struct http_msg *msg = &txn->req; struct connection *cli_conn = objt_conn(req->prod->end); if (unlikely(msg->msg_state < HTTP_MSG_BODY)) { /* we need more data */ channel_dont_connect(req); return 0; } DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n", now_ms, __FUNCTION__, s, req, req->rex, req->wex, req->flags, req->buf->i, req->analysers); if (s->fe->comp || s->be->comp) select_compression_request_header(s, req->buf); /* * Right now, we know that we have processed the entire headers * and that unwanted requests have been filtered out. We can do * whatever we want with the remaining request. Also, now we * may have separate values for ->fe, ->be. */ /* * If HTTP PROXY is set we simply get remote server address parsing * incoming request. Note that this requires that a connection is * allocated on the server side. */ if ((s->be->options & PR_O_HTTP_PROXY) && !(s->flags & SN_ADDR_SET)) { struct connection *conn; char *path; /* Note that for now we don't reuse existing proxy connections */ if (unlikely((conn = si_alloc_conn(req->cons, 0)) == NULL)) { txn->req.msg_state = HTTP_MSG_ERROR; txn->status = 500; req->analysers = 0; stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_500)); if (!(s->flags & SN_ERR_MASK)) s->flags |= SN_ERR_RESOURCE; if (!(s->flags & SN_FINST_MASK)) s->flags |= SN_FINST_R; return 0; } path = http_get_path(txn); url2sa(req->buf->p + msg->sl.rq.u, path ? path - (req->buf->p + msg->sl.rq.u) : msg->sl.rq.u_l, &conn->addr.to, NULL); /* if the path was found, we have to remove everything between * req->buf->p + msg->sl.rq.u and path (excluded). If it was not * found, we need to replace from req->buf->p + msg->sl.rq.u for * u_l characters by a single "/". */ if (path) { char *cur_ptr = req->buf->p; char *cur_end = cur_ptr + txn->req.sl.rq.l; int delta; delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u, path, NULL, 0); http_msg_move_end(&txn->req, delta); cur_end += delta; if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL) goto return_bad_req; } else { char *cur_ptr = req->buf->p; char *cur_end = cur_ptr + txn->req.sl.rq.l; int delta; delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u, req->buf->p + msg->sl.rq.u + msg->sl.rq.u_l, "/", 1); http_msg_move_end(&txn->req, delta); cur_end += delta; if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL) goto return_bad_req; } } /* * 7: Now we can work with the cookies. * Note that doing so might move headers in the request, but * the fields will stay coherent and the URI will not move. * This should only be performed in the backend. */ if ((s->be->cookie_name || s->be->appsession_name || s->fe->capture_name) && !(txn->flags & (TX_CLDENY|TX_CLTARPIT))) manage_client_side_cookies(s, req); /* * 8: the appsession cookie was looked up very early in 1.2, * so let's do the same now. */ /* It needs to look into the URI unless persistence must be ignored */ if ((txn->sessid == NULL) && s->be->appsession_name && !(s->flags & SN_IGNORE_PRST)) { get_srv_from_appsession(s, req->buf->p + msg->sl.rq.u, msg->sl.rq.u_l); } /* add unique-id if "header-unique-id" is specified */ if (!LIST_ISEMPTY(&s->fe->format_unique_id)) { if ((s->unique_id = pool_alloc2(pool2_uniqueid)) == NULL) goto return_bad_req; s->unique_id[0] = '\0'; build_logline(s, s->unique_id, UNIQUEID_LEN, &s->fe->format_unique_id); } if (s->fe->header_unique_id && s->unique_id) { chunk_printf(&trash, "%s: %s", s->fe->header_unique_id, s->unique_id); if (trash.len < 0) goto return_bad_req; if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, trash.len) < 0)) goto return_bad_req; } /* * 9: add X-Forwarded-For if either the frontend or the backend * asks for it. */ if ((s->fe->options | s->be->options) & PR_O_FWDFOR) { struct hdr_ctx ctx = { .idx = 0 }; if (!((s->fe->options | s->be->options) & PR_O_FF_ALWAYS) && http_find_header2(s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_name : s->fe->fwdfor_hdr_name, s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_len : s->fe->fwdfor_hdr_len, req->buf->p, &txn->hdr_idx, &ctx)) { /* The header is set to be added only if none is present * and we found it, so don't do anything. */ } else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) { /* Add an X-Forwarded-For header unless the source IP is * in the 'except' network range. */ if ((!s->fe->except_mask.s_addr || (((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->fe->except_mask.s_addr) != s->fe->except_net.s_addr) && (!s->be->except_mask.s_addr || (((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->be->except_mask.s_addr) != s->be->except_net.s_addr)) { int len; unsigned char *pn; pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr; /* Note: we rely on the backend to get the header name to be used for * x-forwarded-for, because the header is really meant for the backends. * However, if the backend did not specify any option, we have to rely * on the frontend's header name. */ if (s->be->fwdfor_hdr_len) { len = s->be->fwdfor_hdr_len; memcpy(trash.str, s->be->fwdfor_hdr_name, len); } else { len = s->fe->fwdfor_hdr_len; memcpy(trash.str, s->fe->fwdfor_hdr_name, len); } len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]); if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0)) goto return_bad_req; } } else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET6) { /* FIXME: for the sake of completeness, we should also support * 'except' here, although it is mostly useless in this case. */ int len; char pn[INET6_ADDRSTRLEN]; inet_ntop(AF_INET6, (const void *)&((struct sockaddr_in6 *)(&cli_conn->addr.from))->sin6_addr, pn, sizeof(pn)); /* Note: we rely on the backend to get the header name to be used for * x-forwarded-for, because the header is really meant for the backends. * However, if the backend did not specify any option, we have to rely * on the frontend's header name. */ if (s->be->fwdfor_hdr_len) { len = s->be->fwdfor_hdr_len; memcpy(trash.str, s->be->fwdfor_hdr_name, len); } else { len = s->fe->fwdfor_hdr_len; memcpy(trash.str, s->fe->fwdfor_hdr_name, len); } len += snprintf(trash.str + len, trash.size - len, ": %s", pn); if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0)) goto return_bad_req; } } /* * 10: add X-Original-To if either the frontend or the backend * asks for it. */ if ((s->fe->options | s->be->options) & PR_O_ORGTO) { /* FIXME: don't know if IPv6 can handle that case too. */ if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) { /* Add an X-Original-To header unless the destination IP is * in the 'except' network range. */ conn_get_to_addr(cli_conn); if (cli_conn->addr.to.ss_family == AF_INET && ((!s->fe->except_mask_to.s_addr || (((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->fe->except_mask_to.s_addr) != s->fe->except_to.s_addr) && (!s->be->except_mask_to.s_addr || (((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->be->except_mask_to.s_addr) != s->be->except_to.s_addr))) { int len; unsigned char *pn; pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr; /* Note: we rely on the backend to get the header name to be used for * x-original-to, because the header is really meant for the backends. * However, if the backend did not specify any option, we have to rely * on the frontend's header name. */ if (s->be->orgto_hdr_len) { len = s->be->orgto_hdr_len; memcpy(trash.str, s->be->orgto_hdr_name, len); } else { len = s->fe->orgto_hdr_len; memcpy(trash.str, s->fe->orgto_hdr_name, len); } len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]); if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0)) goto return_bad_req; } } } /* 11: add "Connection: close" or "Connection: keep-alive" if needed and not yet set. * If an "Upgrade" token is found, the header is left untouched in order not to have * to deal with some servers bugs : some of them fail an Upgrade if anything but * "Upgrade" is present in the Connection header. */ if (!(txn->flags & TX_HDR_CONN_UPG) && (((txn->flags & TX_CON_WANT_MSK) != TX_CON_WANT_TUN) || ((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL || (s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL))) { unsigned int want_flags = 0; if (msg->flags & HTTP_MSGF_VER_11) { if (((txn->flags & TX_CON_WANT_MSK) >= TX_CON_WANT_SCL || ((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL || (s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL)) && !((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA)) want_flags |= TX_CON_CLO_SET; } else { if (((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL && ((s->fe->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL && (s->be->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL)) || ((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA)) want_flags |= TX_CON_KAL_SET; } if (want_flags != (txn->flags & (TX_CON_CLO_SET|TX_CON_KAL_SET))) http_change_connection_header(txn, msg, want_flags); } /* If we have no server assigned yet and we're balancing on url_param * with a POST request, we may be interested in checking the body for * that parameter. This will be done in another analyser. */ if (!(s->flags & (SN_ASSIGNED|SN_DIRECT)) && s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL && (msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) { channel_dont_connect(req); req->analysers |= AN_REQ_HTTP_BODY; } if (msg->flags & HTTP_MSGF_XFER_LEN) { req->analysers |= AN_REQ_HTTP_XFER_BODY; #ifdef TCP_QUICKACK /* We expect some data from the client. Unless we know for sure * we already have a full request, we have to re-enable quick-ack * in case we previously disabled it, otherwise we might cause * the client to delay further data. */ if ((s->listener->options & LI_O_NOQUICKACK) && cli_conn && conn_ctrl_ready(cli_conn) && ((msg->flags & HTTP_MSGF_TE_CHNK) || (msg->body_len > req->buf->i - txn->req.eoh - 2))) setsockopt(cli_conn->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one)); #endif } /************************************************************* * OK, that's finished for the headers. We have done what we * * could. Let's switch to the DATA state. * ************************************************************/ req->analyse_exp = TICK_ETERNITY; req->analysers &= ~an_bit; /* if the server closes the connection, we want to immediately react * and close the socket to save packets and syscalls. */ if (!(req->analysers & AN_REQ_HTTP_XFER_BODY)) req->cons->flags |= SI_FL_NOHALF; s->logs.tv_request = now; /* OK let's go on with the BODY now */ return 1; return_bad_req: /* let's centralize all bad requests */ if (unlikely(msg->msg_state == HTTP_MSG_ERROR) || msg->err_pos >= 0) { /* we detected a parsing error. We want to archive this request * in the dedicated proxy area for later troubleshooting. */ http_capture_bad_message(&s->fe->invalid_req, s, msg, msg->msg_state, s->fe); } txn->req.msg_state = HTTP_MSG_ERROR; txn->status = 400; req->analysers = 0; stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400)); s->fe->fe_counters.failed_req++; if (s->listener->counters) s->listener->counters->failed_req++; if (!(s->flags & SN_ERR_MASK)) s->flags |= SN_ERR_PRXCOND; if (!(s->flags & SN_FINST_MASK)) s->flags |= SN_FINST_R; return 0; }
int http_process_request(struct session *s, struct channel *req, int an_bit) { struct http_txn *txn = &s->txn; struct http_msg *msg = &txn->req; struct connection *cli_conn = objt_conn(req->prod->end); if (unlikely(msg->msg_state < HTTP_MSG_BODY)) { /* we need more data */ channel_dont_connect(req); return 0; } DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n", now_ms, __FUNCTION__, s, req, req->rex, req->wex, req->flags, req->buf->i, req->analysers); if (s->fe->comp || s->be->comp) select_compression_request_header(s, req->buf); /* * Right now, we know that we have processed the entire headers * and that unwanted requests have been filtered out. We can do * whatever we want with the remaining request. Also, now we * may have separate values for ->fe, ->be. */ /* * If HTTP PROXY is set we simply get remote server address parsing * incoming request. Note that this requires that a connection is * allocated on the server side. */ if ((s->be->options & PR_O_HTTP_PROXY) && !(s->flags & SN_ADDR_SET)) { struct connection *conn; char *path; /* Note that for now we don't reuse existing proxy connections */ if (unlikely((conn = si_alloc_conn(req->cons, 0)) == NULL)) { txn->req.msg_state = HTTP_MSG_ERROR; txn->status = 500; req->analysers = 0; stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_500)); if (!(s->flags & SN_ERR_MASK)) s->flags |= SN_ERR_RESOURCE; if (!(s->flags & SN_FINST_MASK)) s->flags |= SN_FINST_R; return 0; } path = http_get_path(txn); url2sa(req->buf->p + msg->sl.rq.u, path ? path - (req->buf->p + msg->sl.rq.u) : msg->sl.rq.u_l, &conn->addr.to, NULL); /* if the path was found, we have to remove everything between * req->buf->p + msg->sl.rq.u and path (excluded). If it was not * found, we need to replace from req->buf->p + msg->sl.rq.u for * u_l characters by a single "/". */ if (path) { char *cur_ptr = req->buf->p; char *cur_end = cur_ptr + txn->req.sl.rq.l; int delta; delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u, path, NULL, 0); http_msg_move_end(&txn->req, delta); cur_end += delta; if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL) goto return_bad_req; } else { char *cur_ptr = req->buf->p; char *cur_end = cur_ptr + txn->req.sl.rq.l; int delta; delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u, req->buf->p + msg->sl.rq.u + msg->sl.rq.u_l, "/", 1); http_msg_move_end(&txn->req, delta); cur_end += delta; if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL) goto return_bad_req; } } /* * 7: Now we can work with the cookies. * Note that doing so might move headers in the request, but * the fields will stay coherent and the URI will not move. * This should only be performed in the backend. */ if ((s->be->cookie_name || s->be->appsession_name || s->fe->capture_name) && !(txn->flags & (TX_CLDENY|TX_CLTARPIT))) manage_client_side_cookies(s, req); /* * 8: the appsession cookie was looked up very early in 1.2, * so let's do the same now. */ /* It needs to look into the URI unless persistence must be ignored */ if ((txn->sessid == NULL) && s->be->appsession_name && !(s->flags & SN_IGNORE_PRST)) { get_srv_from_appsession(s, req->buf->p + msg->sl.rq.u, msg->sl.rq.u_l); } /* add unique-id if "header-unique-id" is specified */ if (!LIST_ISEMPTY(&s->fe->format_unique_id)) { if ((s->unique_id = pool_alloc2(pool2_uniqueid)) == NULL) goto return_bad_req; s->unique_id[0] = '\0'; build_logline(s, s->unique_id, UNIQUEID_LEN, &s->fe->format_unique_id); } if (s->fe->header_unique_id && s->unique_id) { chunk_printf(&trash, "%s: %s", s->fe->header_unique_id, s->unique_id); if (trash.len < 0) goto return_bad_req; if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, trash.len) < 0)) goto return_bad_req; } /* * 9: add X-Forwarded-For if either the frontend or the backend * asks for it. */ if ((s->fe->options | s->be->options) & PR_O_FWDFOR) { struct hdr_ctx ctx = { .idx = 0 }; if (!((s->fe->options | s->be->options) & PR_O_FF_ALWAYS) && http_find_header2(s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_name : s->fe->fwdfor_hdr_name, s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_len : s->fe->fwdfor_hdr_len, req->buf->p, &txn->hdr_idx, &ctx)) { /* The header is set to be added only if none is present * and we found it, so don't do anything. */ } else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) { /* Add an X-Forwarded-For header unless the source IP is * in the 'except' network range. */ if ((!s->fe->except_mask.s_addr || (((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->fe->except_mask.s_addr) != s->fe->except_net.s_addr) && (!s->be->except_mask.s_addr || (((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->be->except_mask.s_addr) != s->be->except_net.s_addr)) { int len; unsigned char *pn; pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr; /* Note: we rely on the backend to get the header name to be used for * x-forwarded-for, because the header is really meant for the backends. * However, if the backend did not specify any option, we have to rely * on the frontend's header name. */ if (s->be->fwdfor_hdr_len) { len = s->be->fwdfor_hdr_len; memcpy(trash.str, s->be->fwdfor_hdr_name, len); } else { len = s->fe->fwdfor_hdr_len; memcpy(trash.str, s->fe->fwdfor_hdr_name, len); } len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]); if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0)) goto return_bad_req; } } else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET6) { /* FIXME: for the sake of completeness, we should also support * 'except' here, although it is mostly useless in this case. */ int len; char pn[INET6_ADDRSTRLEN]; inet_ntop(AF_INET6, (const void *)&((struct sockaddr_in6 *)(&cli_conn->addr.from))->sin6_addr, pn, sizeof(pn)); /* Note: we rely on the backend to get the header name to be used for * x-forwarded-for, because the header is really meant for the backends. * However, if the backend did not specify any option, we have to rely * on the frontend's header name. */ if (s->be->fwdfor_hdr_len) { len = s->be->fwdfor_hdr_len; memcpy(trash.str, s->be->fwdfor_hdr_name, len); } else { len = s->fe->fwdfor_hdr_len; memcpy(trash.str, s->fe->fwdfor_hdr_name, len); } len += snprintf(trash.str + len, trash.size - len, ": %s", pn); if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0)) goto return_bad_req; } } /* * 10: add X-Original-To if either the frontend or the backend * asks for it. */ if ((s->fe->options | s->be->options) & PR_O_ORGTO) { /* FIXME: don't know if IPv6 can handle that case too. */ if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) { /* Add an X-Original-To header unless the destination IP is * in the 'except' network range. */ conn_get_to_addr(cli_conn); if (cli_conn->addr.to.ss_family == AF_INET && ((!s->fe->except_mask_to.s_addr || (((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->fe->except_mask_to.s_addr) != s->fe->except_to.s_addr) && (!s->be->except_mask_to.s_addr || (((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->be->except_mask_to.s_addr) != s->be->except_to.s_addr))) { int len; unsigned char *pn; pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr; /* Note: we rely on the backend to get the header name to be used for * x-original-to, because the header is really meant for the backends. * However, if the backend did not specify any option, we have to rely * on the frontend's header name. */ if (s->be->orgto_hdr_len) { len = s->be->orgto_hdr_len; memcpy(trash.str, s->be->orgto_hdr_name, len); } else { len = s->fe->orgto_hdr_len; memcpy(trash.str, s->fe->orgto_hdr_name, len); } len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]); if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0)) goto return_bad_req; } } } /* 11: add "Connection: close" or "Connection: keep-alive" if needed and not yet set. * If an "Upgrade" token is found, the header is left untouched in order not to have * to deal with some servers bugs : some of them fail an Upgrade if anything but * "Upgrade" is present in the Connection header. */ if (!(txn->flags & TX_HDR_CONN_UPG) && (((txn->flags & TX_CON_WANT_MSK) != TX_CON_WANT_TUN) || ((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL || (s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL))) { unsigned int want_flags = 0; if (msg->flags & HTTP_MSGF_VER_11) { if (((txn->flags & TX_CON_WANT_MSK) >= TX_CON_WANT_SCL || ((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL || (s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL)) && !((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA)) want_flags |= TX_CON_CLO_SET; } else { if (((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL && ((s->fe->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL && (s->be->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL)) || ((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA)) want_flags |= TX_CON_KAL_SET; } if (want_flags != (txn->flags & (TX_CON_CLO_SET|TX_CON_KAL_SET))) http_change_connection_header(txn, msg, want_flags); } /* If we have no server assigned yet and we're balancing on url_param * with a POST request, we may be interested in checking the body for * that parameter. This will be done in another analyser. */ if (!(s->flags & (SN_ASSIGNED|SN_DIRECT)) && s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL && (msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) { channel_dont_connect(req); req->analysers |= AN_REQ_HTTP_BODY; } if (msg->flags & HTTP_MSGF_XFER_LEN) { req->analysers |= AN_REQ_HTTP_XFER_BODY; #ifdef TCP_QUICKACK /* We expect some data from the client. Unless we know for sure * we already have a full request, we have to re-enable quick-ack * in case we previously disabled it, otherwise we might cause * the client to delay further data. */ if ((s->listener->options & LI_O_NOQUICKACK) && cli_conn && conn_ctrl_ready(cli_conn) && ((msg->flags & HTTP_MSGF_TE_CHNK) || (msg->body_len > req->buf->i - txn->req.eoh - 2))) setsockopt(cli_conn->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one)); #endif } /************************************************************* * OK, that's finished for the headers. We have done what we * * could. Let's switch to the DATA state. * ************************************************************/ req->analyse_exp = TICK_ETERNITY; req->analysers &= ~an_bit; /* if the server closes the connection, we want to immediately react * and close the socket to save packets and syscalls. */ if (!(req->analysers & AN_REQ_HTTP_XFER_BODY)) req->cons->flags |= SI_FL_NOHALF; s->logs.tv_request = now; /* OK let's go on with the BODY now */ return 1; return_bad_req: /* let's centralize all bad requests */ if (unlikely(msg->msg_state == HTTP_MSG_ERROR) || msg->err_pos >= 0) { /* we detected a parsing error. We want to archive this request * in the dedicated proxy area for later troubleshooting. */ http_capture_bad_message(&s->fe->invalid_req, s, msg, msg->msg_state, s->fe); } txn->req.msg_state = HTTP_MSG_ERROR; txn->status = 400; req->analysers = 0; stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400)); s->fe->fe_counters.failed_req++; if (s->listener->counters) s->listener->counters->failed_req++; if (!(s->flags & SN_ERR_MASK)) s->flags |= SN_ERR_PRXCOND; if (!(s->flags & SN_FINST_MASK)) s->flags |= SN_FINST_R; return 0; }
0
10
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2013-7421/
NOT_APPLICABLE
void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) { aes_enc_blk(ctx, dst, src); }
void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) { aes_enc_blk(ctx, dst, src); }
0
11
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2016-5219/
NOT_APPLICABLE
error::Error GLES2DecoderPassthroughImpl::DoDeletePathsCHROMIUM(GLuint path, GLsizei range) { NOTIMPLEMENTED(); return error::kNoError; }
error::Error GLES2DecoderPassthroughImpl::DoDeletePathsCHROMIUM(GLuint path, GLsizei range) { NOTIMPLEMENTED(); return error::kNoError; }
0
12
CVE-2018-6063
https://www.cvedetails.com/cve/CVE-2018-6063/
CWE-787
void VideoCaptureImpl::OnBufferCreated(int32_t buffer_id, mojo::ScopedSharedBufferHandle handle) { DVLOG(1) << __func__ << " buffer_id: " << buffer_id; DCHECK(io_thread_checker_.CalledOnValidThread()); DCHECK(handle.is_valid()); base::SharedMemoryHandle memory_handle; size_t memory_size = 0; mojo::UnwrappedSharedMemoryHandleProtection protection; const MojoResult result = mojo::UnwrapSharedMemoryHandle( std::move(handle), &memory_handle, &memory_size, &protection); DCHECK_EQ(MOJO_RESULT_OK, result); DCHECK_GT(memory_size, 0u); // TODO(https://crbug.com/803136): We should also be able to assert that the // unwrapped handle was shared for read-only mapping. That condition is not // currently guaranteed to be met. std::unique_ptr<base::SharedMemory> shm( new base::SharedMemory(memory_handle, true /* read_only */)); if (!shm->Map(memory_size)) { DLOG(ERROR) << "OnBufferCreated: Map failed."; return; } const bool inserted = client_buffers_ .insert(std::make_pair(buffer_id, new ClientBuffer(std::move(shm), memory_size))) .second; DCHECK(inserted); }
void VideoCaptureImpl::OnBufferCreated(int32_t buffer_id, mojo::ScopedSharedBufferHandle handle) { DVLOG(1) << __func__ << " buffer_id: " << buffer_id; DCHECK(io_thread_checker_.CalledOnValidThread()); DCHECK(handle.is_valid()); base::SharedMemoryHandle memory_handle; size_t memory_size = 0; bool read_only_flag = false; const MojoResult result = mojo::UnwrapSharedMemoryHandle( std::move(handle), &memory_handle, &memory_size, &read_only_flag); DCHECK_EQ(MOJO_RESULT_OK, result); DCHECK_GT(memory_size, 0u); std::unique_ptr<base::SharedMemory> shm( new base::SharedMemory(memory_handle, true /* read_only */)); if (!shm->Map(memory_size)) { DLOG(ERROR) << "OnBufferCreated: Map failed."; return; } const bool inserted = client_buffers_ .insert(std::make_pair(buffer_id, new ClientBuffer(std::move(shm), memory_size))) .second; DCHECK(inserted); }
1
13
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2011-2843/
NOT_APPLICABLE
void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { scoped_refptr<VideoFrame> video_frame; AVPacket packet; av_init_packet(&packet); packet.data = const_cast<uint8*>(buffer->GetData()); packet.size = buffer->GetDataSize(); PipelineStatistics statistics; statistics.video_bytes_decoded = buffer->GetDataSize(); codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); av_frame_->reordered_opaque = codec_context_->reordered_opaque; int frame_decoded = 0; int result = avcodec_decode_video2(codec_context_, av_frame_.get(), &frame_decoded, &packet); if (result < 0) { LOG(ERROR) << "Error decoding a video frame with timestamp: " << buffer->GetTimestamp().InMicroseconds() << " us, duration: " << buffer->GetDuration().InMicroseconds() << " us, packet size: " << buffer->GetDataSize() << " bytes"; event_handler_->OnError(); return; } if (frame_decoded == 0) { if (buffer->IsEndOfStream()) { // We had started flushing. event_handler_->ConsumeVideoFrame(video_frame, statistics); output_eos_reached_ = true; } else { ReadInput(); } return; } if (!av_frame_->data[VideoFrame::kYPlane] || !av_frame_->data[VideoFrame::kUPlane] || !av_frame_->data[VideoFrame::kVPlane]) { event_handler_->OnError(); return; } DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. AVRational doubled_time_base; doubled_time_base.num = frame_rate_denominator_; doubled_time_base.den = frame_rate_numerator_ * 2; base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque); base::TimeDelta duration = ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict); DCHECK(frame_queue_available_.size()); video_frame = frame_queue_available_.front(); frame_queue_available_.pop_front(); size_t height = codec_context_->height; CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get(), height); CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get(), height); CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get(), height); video_frame->SetTimestamp(timestamp); video_frame->SetDuration(duration); pending_output_buffers_--; event_handler_->ConsumeVideoFrame(video_frame, statistics); }
void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { scoped_refptr<VideoFrame> video_frame; AVPacket packet; av_init_packet(&packet); packet.data = const_cast<uint8*>(buffer->GetData()); packet.size = buffer->GetDataSize(); PipelineStatistics statistics; statistics.video_bytes_decoded = buffer->GetDataSize(); codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); av_frame_->reordered_opaque = codec_context_->reordered_opaque; int frame_decoded = 0; int result = avcodec_decode_video2(codec_context_, av_frame_.get(), &frame_decoded, &packet); if (result < 0) { LOG(ERROR) << "Error decoding a video frame with timestamp: " << buffer->GetTimestamp().InMicroseconds() << " us, duration: " << buffer->GetDuration().InMicroseconds() << " us, packet size: " << buffer->GetDataSize() << " bytes"; event_handler_->OnError(); return; } if (frame_decoded == 0) { if (buffer->IsEndOfStream()) { // We had started flushing. event_handler_->ConsumeVideoFrame(video_frame, statistics); output_eos_reached_ = true; } else { ReadInput(); } return; } if (!av_frame_->data[VideoFrame::kYPlane] || !av_frame_->data[VideoFrame::kUPlane] || !av_frame_->data[VideoFrame::kVPlane]) { event_handler_->OnError(); return; } DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. AVRational doubled_time_base; doubled_time_base.num = frame_rate_denominator_; doubled_time_base.den = frame_rate_numerator_ * 2; base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque); base::TimeDelta duration = ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict); DCHECK(frame_queue_available_.size()); video_frame = frame_queue_available_.front(); frame_queue_available_.pop_front(); size_t height = codec_context_->height; CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get(), height); CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get(), height); CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get(), height); video_frame->SetTimestamp(timestamp); video_frame->SetDuration(duration); pending_output_buffers_--; event_handler_->ConsumeVideoFrame(video_frame, statistics); }
0
14
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2018-6159/
NOT_APPLICABLE
void Performance::BuildJSONValue(V8ObjectBuilder& builder) const { builder.AddNumber("timeOrigin", timeOrigin()); }
void Performance::BuildJSONValue(V8ObjectBuilder& builder) const { builder.AddNumber("timeOrigin", timeOrigin()); }
0
15
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2011-2861/
NOT_APPLICABLE
void RenderView::OnRedo() { if (!webview()) return; webview()->focusedFrame()->executeCommand(WebString::fromUTF8("Redo")); }
void RenderView::OnRedo() { if (!webview()) return; webview()->focusedFrame()->executeCommand(WebString::fromUTF8("Redo")); }
0
16
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2017-5016/
NOT_APPLICABLE
void HTMLFormControlElement::didChangeForm() { ListedElement::didChangeForm(); formOwnerSetNeedsValidityCheck(); if (formOwner() && isConnected() && canBeSuccessfulSubmitButton()) formOwner()->invalidateDefaultButtonStyle(); }
void HTMLFormControlElement::didChangeForm() { ListedElement::didChangeForm(); formOwnerSetNeedsValidityCheck(); if (formOwner() && isConnected() && canBeSuccessfulSubmitButton()) formOwner()->invalidateDefaultButtonStyle(); }
0
18
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2016-0824/
NOT_APPLICABLE
IMPEG2D_ERROR_CODES_T impeg2d_pre_pic_dec_proc(dec_state_t *ps_dec) { WORD32 u4_get_disp; pic_buf_t *ps_disp_pic; IMPEG2D_ERROR_CODES_T e_error = (IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE; u4_get_disp = 0; ps_disp_pic = NULL; /* Field Picture */ if(ps_dec->u2_picture_structure != FRAME_PICTURE) { ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 31) >> 5; if(ps_dec->u2_num_flds_decoded == 0) { pic_buf_t *ps_pic_buf; u4_get_disp = 1; ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id); if (NULL == ps_pic_buf) { return IMPEG2D_NO_FREE_BUF_ERR; } impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP); impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF); ps_pic_buf->u4_ts = ps_dec->u4_inp_ts; ps_pic_buf->e_pic_type = ps_dec->e_pic_type; ps_dec->ps_cur_pic = ps_pic_buf; ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y; ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u; ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v; } if(ps_dec->u2_picture_structure == TOP_FIELD) { ps_dec->u2_fld_parity = TOP; } else { ps_dec->u2_fld_parity = BOTTOM; } ps_dec->u2_field_dct = 0; ps_dec->u2_read_dct_type = 0; ps_dec->u2_read_motion_type = 1; ps_dec->u2_fld_pic = 1; ps_dec->u2_frm_pic = 0; ps_dec->ps_func_forw_or_back = gas_impeg2d_func_fld_fw_or_bk; ps_dec->ps_func_bi_direct = gas_impeg2d_func_fld_bi_direct; } /* Frame Picture */ else { pic_buf_t *ps_pic_buf; ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 15) >> 4; u4_get_disp = 1; ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id); if (NULL == ps_pic_buf) { return IMPEG2D_NO_FREE_BUF_ERR; } impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP); impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF); ps_pic_buf->u4_ts = ps_dec->u4_inp_ts; ps_pic_buf->e_pic_type = ps_dec->e_pic_type; ps_dec->ps_cur_pic = ps_pic_buf; ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y; ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u; ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v; if(ps_dec->u2_frame_pred_frame_dct == 0) { ps_dec->u2_read_dct_type = 1; ps_dec->u2_read_motion_type = 1; } else { ps_dec->u2_read_dct_type = 0; ps_dec->u2_read_motion_type = 0; ps_dec->u2_motion_type = 2; ps_dec->u2_field_dct = 0; } ps_dec->u2_fld_parity = TOP; ps_dec->u2_fld_pic = 0; ps_dec->u2_frm_pic = 1; ps_dec->ps_func_forw_or_back = gas_impeg2d_func_frm_fw_or_bk; ps_dec->ps_func_bi_direct = gas_impeg2d_func_frm_bi_direct; } ps_dec->u2_def_dc_pred[Y_LUMA] = 128 << ps_dec->u2_intra_dc_precision; ps_dec->u2_def_dc_pred[U_CHROMA] = 128 << ps_dec->u2_intra_dc_precision; ps_dec->u2_def_dc_pred[V_CHROMA] = 128 << ps_dec->u2_intra_dc_precision; ps_dec->u2_num_mbs_left = ps_dec->u2_num_horiz_mb * ps_dec->u2_num_vert_mb; if(u4_get_disp) { if(ps_dec->u4_num_frames_decoded > 1) { ps_disp_pic = impeg2_disp_mgr_get(&ps_dec->s_disp_mgr, &ps_dec->i4_disp_buf_id); } ps_dec->ps_disp_pic = ps_disp_pic; if(ps_disp_pic) { if(1 == ps_dec->u4_share_disp_buf) { ps_dec->ps_disp_frm_buf->pv_y_buf = ps_disp_pic->pu1_y; if(IV_YUV_420P == ps_dec->i4_chromaFormat) { ps_dec->ps_disp_frm_buf->pv_u_buf = ps_disp_pic->pu1_u; ps_dec->ps_disp_frm_buf->pv_v_buf = ps_disp_pic->pu1_v; } else { UWORD8 *pu1_buf; pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[1]; ps_dec->ps_disp_frm_buf->pv_u_buf = pu1_buf; pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[2]; ps_dec->ps_disp_frm_buf->pv_v_buf = pu1_buf; } } } } switch(ps_dec->e_pic_type) { case I_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_i_slice; break; } case D_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_d_slice; break; } case P_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice; ps_dec->pu2_mb_type = gau2_impeg2d_p_mb_type; break; } case B_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice; ps_dec->pu2_mb_type = gau2_impeg2d_b_mb_type; break; } default: return IMPEG2D_INVALID_PIC_TYPE; } /*************************************************************************/ /* Set the reference pictures */ /*************************************************************************/ /* Error resilience: If forward and backward pictures are going to be NULL*/ /* then assign both to the current */ /* if one of them NULL then we will assign the non null to the NULL one */ if(ps_dec->e_pic_type == P_PIC) { if (NULL == ps_dec->as_recent_fld[1][0].pu1_y) { ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf; } if (NULL == ps_dec->as_recent_fld[1][1].pu1_y) { impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1], ps_dec->u2_frame_width); } ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[1][0]; ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[1][1]; } else if(ps_dec->e_pic_type == B_PIC) { if((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y)) { ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf; impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1], ps_dec->u2_frame_width); ps_dec->as_recent_fld[0][0] = ps_dec->s_cur_frm_buf; ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1]; } else if ((NULL != ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y)) { ps_dec->as_recent_fld[0][0] = ps_dec->as_recent_fld[1][0]; ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1]; } else if ((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL != ps_dec->as_recent_fld[0][0].pu1_y)) { ps_dec->as_recent_fld[1][0] = ps_dec->as_recent_fld[0][0]; ps_dec->as_recent_fld[1][1] = ps_dec->as_recent_fld[0][1]; } ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[0][0]; ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[0][1]; ps_dec->as_ref_buf[BACK][TOP] = ps_dec->as_recent_fld[1][0]; ps_dec->as_ref_buf[BACK][BOTTOM] = ps_dec->as_recent_fld[1][1]; } return e_error; }
IMPEG2D_ERROR_CODES_T impeg2d_pre_pic_dec_proc(dec_state_t *ps_dec) { WORD32 u4_get_disp; pic_buf_t *ps_disp_pic; IMPEG2D_ERROR_CODES_T e_error = (IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE; u4_get_disp = 0; ps_disp_pic = NULL; /* Field Picture */ if(ps_dec->u2_picture_structure != FRAME_PICTURE) { ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 31) >> 5; if(ps_dec->u2_num_flds_decoded == 0) { pic_buf_t *ps_pic_buf; u4_get_disp = 1; ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id); if (NULL == ps_pic_buf) { return IMPEG2D_NO_FREE_BUF_ERR; } impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP); impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF); ps_pic_buf->u4_ts = ps_dec->u4_inp_ts; ps_pic_buf->e_pic_type = ps_dec->e_pic_type; ps_dec->ps_cur_pic = ps_pic_buf; ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y; ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u; ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v; } if(ps_dec->u2_picture_structure == TOP_FIELD) { ps_dec->u2_fld_parity = TOP; } else { ps_dec->u2_fld_parity = BOTTOM; } ps_dec->u2_field_dct = 0; ps_dec->u2_read_dct_type = 0; ps_dec->u2_read_motion_type = 1; ps_dec->u2_fld_pic = 1; ps_dec->u2_frm_pic = 0; ps_dec->ps_func_forw_or_back = gas_impeg2d_func_fld_fw_or_bk; ps_dec->ps_func_bi_direct = gas_impeg2d_func_fld_bi_direct; } /* Frame Picture */ else { pic_buf_t *ps_pic_buf; ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 15) >> 4; u4_get_disp = 1; ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id); if (NULL == ps_pic_buf) { return IMPEG2D_NO_FREE_BUF_ERR; } impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP); impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF); ps_pic_buf->u4_ts = ps_dec->u4_inp_ts; ps_pic_buf->e_pic_type = ps_dec->e_pic_type; ps_dec->ps_cur_pic = ps_pic_buf; ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y; ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u; ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v; if(ps_dec->u2_frame_pred_frame_dct == 0) { ps_dec->u2_read_dct_type = 1; ps_dec->u2_read_motion_type = 1; } else { ps_dec->u2_read_dct_type = 0; ps_dec->u2_read_motion_type = 0; ps_dec->u2_motion_type = 2; ps_dec->u2_field_dct = 0; } ps_dec->u2_fld_parity = TOP; ps_dec->u2_fld_pic = 0; ps_dec->u2_frm_pic = 1; ps_dec->ps_func_forw_or_back = gas_impeg2d_func_frm_fw_or_bk; ps_dec->ps_func_bi_direct = gas_impeg2d_func_frm_bi_direct; } ps_dec->u2_def_dc_pred[Y_LUMA] = 128 << ps_dec->u2_intra_dc_precision; ps_dec->u2_def_dc_pred[U_CHROMA] = 128 << ps_dec->u2_intra_dc_precision; ps_dec->u2_def_dc_pred[V_CHROMA] = 128 << ps_dec->u2_intra_dc_precision; ps_dec->u2_num_mbs_left = ps_dec->u2_num_horiz_mb * ps_dec->u2_num_vert_mb; if(u4_get_disp) { if(ps_dec->u4_num_frames_decoded > 1) { ps_disp_pic = impeg2_disp_mgr_get(&ps_dec->s_disp_mgr, &ps_dec->i4_disp_buf_id); } ps_dec->ps_disp_pic = ps_disp_pic; if(ps_disp_pic) { if(1 == ps_dec->u4_share_disp_buf) { ps_dec->ps_disp_frm_buf->pv_y_buf = ps_disp_pic->pu1_y; if(IV_YUV_420P == ps_dec->i4_chromaFormat) { ps_dec->ps_disp_frm_buf->pv_u_buf = ps_disp_pic->pu1_u; ps_dec->ps_disp_frm_buf->pv_v_buf = ps_disp_pic->pu1_v; } else { UWORD8 *pu1_buf; pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[1]; ps_dec->ps_disp_frm_buf->pv_u_buf = pu1_buf; pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[2]; ps_dec->ps_disp_frm_buf->pv_v_buf = pu1_buf; } } } } switch(ps_dec->e_pic_type) { case I_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_i_slice; break; } case D_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_d_slice; break; } case P_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice; ps_dec->pu2_mb_type = gau2_impeg2d_p_mb_type; break; } case B_PIC: { ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice; ps_dec->pu2_mb_type = gau2_impeg2d_b_mb_type; break; } default: return IMPEG2D_INVALID_PIC_TYPE; } /*************************************************************************/ /* Set the reference pictures */ /*************************************************************************/ /* Error resilience: If forward and backward pictures are going to be NULL*/ /* then assign both to the current */ /* if one of them NULL then we will assign the non null to the NULL one */ if(ps_dec->e_pic_type == P_PIC) { if (NULL == ps_dec->as_recent_fld[1][0].pu1_y) { ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf; } if (NULL == ps_dec->as_recent_fld[1][1].pu1_y) { impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1], ps_dec->u2_frame_width); } ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[1][0]; ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[1][1]; } else if(ps_dec->e_pic_type == B_PIC) { if((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y)) { ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf; impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1], ps_dec->u2_frame_width); ps_dec->as_recent_fld[0][0] = ps_dec->s_cur_frm_buf; ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1]; } else if ((NULL != ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y)) { ps_dec->as_recent_fld[0][0] = ps_dec->as_recent_fld[1][0]; ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1]; } else if ((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL != ps_dec->as_recent_fld[0][0].pu1_y)) { ps_dec->as_recent_fld[1][0] = ps_dec->as_recent_fld[0][0]; ps_dec->as_recent_fld[1][1] = ps_dec->as_recent_fld[0][1]; } ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[0][0]; ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[0][1]; ps_dec->as_ref_buf[BACK][TOP] = ps_dec->as_recent_fld[1][0]; ps_dec->as_ref_buf[BACK][BOTTOM] = ps_dec->as_recent_fld[1][1]; } return e_error; }
0
19
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2013-1929/
NOT_APPLICABLE
static void tg3_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct tg3 *tp = netdev_priv(dev); release_firmware(tp->fw); tg3_reset_task_cancel(tp); if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } unregister_netdev(dev); if (tp->aperegs) { iounmap(tp->aperegs); tp->aperegs = NULL; } if (tp->regs) { iounmap(tp->regs); tp->regs = NULL; } free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } }
static void tg3_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct tg3 *tp = netdev_priv(dev); release_firmware(tp->fw); tg3_reset_task_cancel(tp); if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } unregister_netdev(dev); if (tp->aperegs) { iounmap(tp->aperegs); tp->aperegs = NULL; } if (tp->regs) { iounmap(tp->regs); tp->regs = NULL; } free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } }
0
20
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2016-10165/
NOT_APPLICABLE
void *Type_LUT16_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag) { cmsUInt8Number InputChannels, OutputChannels, CLUTpoints; cmsPipeline* NewLUT = NULL; cmsUInt32Number nTabSize; cmsFloat64Number Matrix[3*3]; cmsUInt16Number InputEntries, OutputEntries; *nItems = 0; if (!_cmsReadUInt8Number(io, &InputChannels)) return NULL; if (!_cmsReadUInt8Number(io, &OutputChannels)) return NULL; if (!_cmsReadUInt8Number(io, &CLUTpoints)) return NULL; // 255 maximum if (!_cmsReadUInt8Number(io, NULL)) return NULL; if (InputChannels > cmsMAXCHANNELS) goto Error; if (OutputChannels > cmsMAXCHANNELS) goto Error; NewLUT = cmsPipelineAlloc(self ->ContextID, InputChannels, OutputChannels); if (NewLUT == NULL) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[0])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[1])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[2])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[3])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[4])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[5])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[6])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[7])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[8])) goto Error; if ((InputChannels == 3) && !_cmsMAT3isIdentity((cmsMAT3*) Matrix)) { if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocMatrix(self ->ContextID, 3, 3, Matrix, NULL))) goto Error; } if (!_cmsReadUInt16Number(io, &InputEntries)) goto Error; if (!_cmsReadUInt16Number(io, &OutputEntries)) goto Error; if (InputEntries > 0x7FFF || OutputEntries > 0x7FFF) goto Error; if (CLUTpoints == 1) goto Error; // Impossible value, 0 for no CLUT and then 2 at least if (!Read16bitTables(self ->ContextID, io, NewLUT, InputChannels, InputEntries)) goto Error; nTabSize = uipow(OutputChannels, CLUTpoints, InputChannels); if (nTabSize == (cmsUInt32Number) -1) goto Error; if (nTabSize > 0) { cmsUInt16Number *T; T = (cmsUInt16Number*) _cmsCalloc(self ->ContextID, nTabSize, sizeof(cmsUInt16Number)); if (T == NULL) goto Error; if (!_cmsReadUInt16Array(io, nTabSize, T)) { _cmsFree(self ->ContextID, T); goto Error; } if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocCLut16bit(self ->ContextID, CLUTpoints, InputChannels, OutputChannels, T))) { _cmsFree(self ->ContextID, T); goto Error; } _cmsFree(self ->ContextID, T); } if (!Read16bitTables(self ->ContextID, io, NewLUT, OutputChannels, OutputEntries)) goto Error; *nItems = 1; return NewLUT; Error: if (NewLUT != NULL) cmsPipelineFree(NewLUT); return NULL; cmsUNUSED_PARAMETER(SizeOfTag); }
void *Type_LUT16_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag) { cmsUInt8Number InputChannels, OutputChannels, CLUTpoints; cmsPipeline* NewLUT = NULL; cmsUInt32Number nTabSize; cmsFloat64Number Matrix[3*3]; cmsUInt16Number InputEntries, OutputEntries; *nItems = 0; if (!_cmsReadUInt8Number(io, &InputChannels)) return NULL; if (!_cmsReadUInt8Number(io, &OutputChannels)) return NULL; if (!_cmsReadUInt8Number(io, &CLUTpoints)) return NULL; // 255 maximum if (!_cmsReadUInt8Number(io, NULL)) return NULL; if (InputChannels > cmsMAXCHANNELS) goto Error; if (OutputChannels > cmsMAXCHANNELS) goto Error; NewLUT = cmsPipelineAlloc(self ->ContextID, InputChannels, OutputChannels); if (NewLUT == NULL) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[0])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[1])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[2])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[3])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[4])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[5])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[6])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[7])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[8])) goto Error; if ((InputChannels == 3) && !_cmsMAT3isIdentity((cmsMAT3*) Matrix)) { if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocMatrix(self ->ContextID, 3, 3, Matrix, NULL))) goto Error; } if (!_cmsReadUInt16Number(io, &InputEntries)) goto Error; if (!_cmsReadUInt16Number(io, &OutputEntries)) goto Error; if (InputEntries > 0x7FFF || OutputEntries > 0x7FFF) goto Error; if (CLUTpoints == 1) goto Error; // Impossible value, 0 for no CLUT and then 2 at least if (!Read16bitTables(self ->ContextID, io, NewLUT, InputChannels, InputEntries)) goto Error; nTabSize = uipow(OutputChannels, CLUTpoints, InputChannels); if (nTabSize == (cmsUInt32Number) -1) goto Error; if (nTabSize > 0) { cmsUInt16Number *T; T = (cmsUInt16Number*) _cmsCalloc(self ->ContextID, nTabSize, sizeof(cmsUInt16Number)); if (T == NULL) goto Error; if (!_cmsReadUInt16Array(io, nTabSize, T)) { _cmsFree(self ->ContextID, T); goto Error; } if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocCLut16bit(self ->ContextID, CLUTpoints, InputChannels, OutputChannels, T))) { _cmsFree(self ->ContextID, T); goto Error; } _cmsFree(self ->ContextID, T); } if (!Read16bitTables(self ->ContextID, io, NewLUT, OutputChannels, OutputEntries)) goto Error; *nItems = 1; return NewLUT; Error: if (NewLUT != NULL) cmsPipelineFree(NewLUT); return NULL; cmsUNUSED_PARAMETER(SizeOfTag); }
0
21
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2016-3839/
NOT_APPLICABLE
static int start_audio_datapath(struct a2dp_stream_common *common) { INFO("state %d", common->state); if (common->ctrl_fd == AUDIO_SKT_DISCONNECTED) { INFO("%s AUDIO_SKT_DISCONNECTED", __func__); return -1; } int oldstate = common->state; common->state = AUDIO_A2DP_STATE_STARTING; int a2dp_status = a2dp_command(common, A2DP_CTRL_CMD_START); if (a2dp_status < 0) { ERROR("%s Audiopath start failed (status %d)", __func__, a2dp_status); common->state = oldstate; return -1; } else if (a2dp_status == A2DP_CTRL_ACK_INCALL_FAILURE) { ERROR("%s Audiopath start failed - in call, move to suspended", __func__); common->state = oldstate; return -1; } /* connect socket if not yet connected */ if (common->audio_fd == AUDIO_SKT_DISCONNECTED) { common->audio_fd = skt_connect(A2DP_DATA_PATH, common->buffer_sz); if (common->audio_fd < 0) { common->state = oldstate; return -1; } common->state = AUDIO_A2DP_STATE_STARTED; } return 0; }
static int start_audio_datapath(struct a2dp_stream_common *common) { INFO("state %d", common->state); if (common->ctrl_fd == AUDIO_SKT_DISCONNECTED) { INFO("%s AUDIO_SKT_DISCONNECTED", __func__); return -1; } int oldstate = common->state; common->state = AUDIO_A2DP_STATE_STARTING; int a2dp_status = a2dp_command(common, A2DP_CTRL_CMD_START); if (a2dp_status < 0) { ERROR("%s Audiopath start failed (status %d)", __func__, a2dp_status); common->state = oldstate; return -1; } else if (a2dp_status == A2DP_CTRL_ACK_INCALL_FAILURE) { ERROR("%s Audiopath start failed - in call, move to suspended", __func__); common->state = oldstate; return -1; } /* connect socket if not yet connected */ if (common->audio_fd == AUDIO_SKT_DISCONNECTED) { common->audio_fd = skt_connect(A2DP_DATA_PATH, common->buffer_sz); if (common->audio_fd < 0) { common->state = oldstate; return -1; } common->state = AUDIO_A2DP_STATE_STARTED; } return 0; }
0
22
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2017-10971/
NOT_APPLICABLE
UpdateTouchesForGrab(DeviceIntPtr mouse) { int i; if (!mouse->touch || mouse->deviceGrab.fromPassiveGrab) return; for (i = 0; i < mouse->touch->num_touches; i++) { TouchPointInfoPtr ti = mouse->touch->touches + i; TouchListener *listener = &ti->listeners[0]; GrabPtr grab = mouse->deviceGrab.grab; if (ti->active && CLIENT_BITS(listener->listener) == grab->resource) { listener->listener = grab->resource; listener->level = grab->grabtype; listener->state = LISTENER_IS_OWNER; listener->window = grab->window; if (grab->grabtype == CORE || grab->grabtype == XI || !xi2mask_isset(grab->xi2mask, mouse, XI_TouchBegin)) listener->type = LISTENER_POINTER_GRAB; else listener->type = LISTENER_GRAB; if (listener->grab) FreeGrab(listener->grab); listener->grab = AllocGrab(grab); } } }
UpdateTouchesForGrab(DeviceIntPtr mouse) { int i; if (!mouse->touch || mouse->deviceGrab.fromPassiveGrab) return; for (i = 0; i < mouse->touch->num_touches; i++) { TouchPointInfoPtr ti = mouse->touch->touches + i; TouchListener *listener = &ti->listeners[0]; GrabPtr grab = mouse->deviceGrab.grab; if (ti->active && CLIENT_BITS(listener->listener) == grab->resource) { listener->listener = grab->resource; listener->level = grab->grabtype; listener->state = LISTENER_IS_OWNER; listener->window = grab->window; if (grab->grabtype == CORE || grab->grabtype == XI || !xi2mask_isset(grab->xi2mask, mouse, XI_TouchBegin)) listener->type = LISTENER_POINTER_GRAB; else listener->type = LISTENER_GRAB; if (listener->grab) FreeGrab(listener->grab); listener->grab = AllocGrab(grab); } } }
0
23
NOT_APPLICABLE
https://www.cvedetails.com/cve/CVE-2013-0910/
NOT_APPLICABLE
virtual ResourceContext* GetResourceContext() { return context_; }
virtual ResourceContext* GetResourceContext() { return context_; }
0
24
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
41