query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
addPlanToResponse creates a physicalPlanInfo that adds p as the parent of info.
|
func addPlanToResponse(parent PhysicalPlan, info *physicalPlanInfo) *physicalPlanInfo {
np := parent.Copy()
np.SetChildren(info.p)
ret := &physicalPlanInfo{p: np, cost: info.cost, count: info.count, reliable: info.reliable}
if _, ok := parent.(*MaxOneRow); ok {
ret.count = 1
ret.reliable = true
}
return ret
}
|
[
"func CreateCreateExecutionPlanResponse() (response *CreateExecutionPlanResponse) {\n\tresponse = &CreateExecutionPlanResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (a *MealPlanningApiService) AddToMealPlan(ctx context.Context, username string) ApiAddToMealPlanRequest {\n\treturn ApiAddToMealPlanRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tusername: username,\n\t}\n}",
"func createPlanTest(e *httpexpect.Expect, t *testing.T) (pID int) {\n\ttestCases := []testCase{\n\t\tnotAdminTestCase,\n\t\t{\n\t\t\tToken: testCtx.Admin.Token,\n\t\t\tStatus: http.StatusInternalServerError,\n\n\t\t\tSent: []byte(`{Plu}`),\n\t\t\tBodyContains: []string{\"Création de plan, décodage :\"}},\n\t\t{\n\t\t\tToken: testCtx.Admin.Token,\n\t\t\tStatus: http.StatusBadRequest,\n\t\t\tSent: []byte(`{\"Descript\":null}`),\n\t\t\tBodyContains: []string{\"Création d'un plan : Name incorrect\"}},\n\t\t{\n\t\t\tToken: testCtx.Admin.Token,\n\t\t\tStatus: http.StatusCreated,\n\t\t\tIDName: `\"id\"`,\n\t\t\tSent: []byte(`{\"name\":\"Essai de plan\", \"descript\":\"Essai de description\",\"first_year\":2015,\"last_year\":2025}`),\n\t\t\tBodyContains: []string{\"Plan\", `\"name\":\"Essai de plan\"`, `\"first_year\":2015`,\n\t\t\t\t`\"last_year\":2025`, `\"descript\":\"Essai de description\"`}},\n\t}\n\tf := func(tc testCase) *httpexpect.Response {\n\t\treturn e.POST(\"/api/plans\").WithHeader(\"Authorization\", \"Bearer \"+tc.Token).\n\t\t\tWithBytes(tc.Sent).Expect()\n\t}\n\tfor _, r := range chkTestCases(testCases, f, \"CreatePlan\", &pID) {\n\t\tt.Error(r)\n\t}\n\treturn pID\n}",
"func (c *Client) Plan(environment, module, planname string) (*Plan, error) {\n\tpayload := &Plan{}\n\treq := c.resty.R().\n\t\tSetResult(payload).\n\t\tSetPathParams(map[string]string{\n\t\t\t\"module\": module,\n\t\t\t\"planname\": planname,\n\t\t})\n\tif environment != \"\" {\n\t\treq.SetQueryParam(\"environment\", environment)\n\t}\n\n\treplacer := strings.NewReplacer(\"{module}\", module, \"{planname}\", planname)\n\n\tr, err := req.Get(orchPlanName)\n\tif err = ProcessError(r, err, fmt.Sprintf(\"%s error: %s\", replacer.Replace(orchPlanName), r.Status())); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn payload, nil\n}",
"func (spc *InsuranceContract) LinkPolicyToPlan(ctx contractapi.TransactionContextInterface, policyID string, planID string) (string, error) {\n\t// get plan info\n\tvar plan Plans\n\tplanBytes, err := ctx.GetStub().GetState(planID)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to read from world state: %v\", err)\n\t}\n\t//check if ID already exists (return the state of the ID by checking the world state)\n\tif planBytes == nil {\n\t\treturn \"\", fmt.Errorf(\"plan does not exists for planID %s\", planID)\n\t}\n\n\terr = json.Unmarshal(planBytes, &plan)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshall planbytes error: %v\", err)\n\t}\n\t// // get policy info\n\t// var policy Policy\n\t// policybytes, err := ctx.GetStub().GetState(policyID)\n\t// if err != nil {\n\t// \treturn \"\", fmt.Errorf(\"failed to read from world state: %v\", err)\n\t// }\n\t// //check if ID already exists (return the state of the ID by checking the world state)\n\t// if policybytes != nil {\n\t// \treturn \"\", fmt.Errorf(\"confirmed the plan already exists for planID %s\", planID)\n\t// }\n\n\t// json.Unmarshal(policybytes, &policy)\n\n\tplan.PlanOptions = append(plan.PlanOptions, policyID)\n\tplanBytes, err = json.Marshal(plan)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = ctx.GetStub().PutState(planID, planBytes)\n\tif err != nil {\n\t\treturn \"\", err\n\n\t}\n\n\treturn \"Policy addded to Plan\", nil\n}",
"func (o *ReconciliationTargetResource) HasPlan() bool {\n\tif o != nil && o.Plan != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (k msgServer) CreateRatioPlan(goCtx context.Context, msg *types.MsgCreateRatioPlan) (*types.MsgCreateRatioPlanResponse, error) {\n\tctx := sdk.UnwrapSDKContext(goCtx)\n\n\tif err := msg.ValidateBasic(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tratioPlan := k.Keeper.CreateRatioPlan(ctx, msg, types.PlanTypePrivate)\n\n\tctx.EventManager().EmitEvents(sdk.Events{\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeCreateRatioPlan,\n\t\t\tsdk.NewAttribute(types.AttributeKeyFarmingPoolAddress, msg.GetFarmingPoolAddress()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyRewardPoolAddress, ratioPlan.RewardPoolAddress),\n\t\t\tsdk.NewAttribute(types.AttributeKeyStartTime, msg.StartTime.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyEndTime, msg.EndTime.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyEpochRatio, fmt.Sprint(msg.EpochRatio)),\n\t\t),\n\t})\n\n\treturn &types.MsgCreateRatioPlanResponse{}, nil\n}",
"func (s *Server) SavePlan() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar requestDto Plan\n\t\tdecodeErr := json.NewDecoder(r.Body).Decode(&requestDto)\n\t\tif decodeErr != nil {\n\t\t\tSendErrorJSON(http.StatusBadRequest, decodeErr.Error(), w)\n\t\t\treturn\n\t\t}\n\t\tdbSession := s.Session.Copy()\n\t\tdefer dbSession.Close()\n\t\tplans := dbSession.DB(PLAN_DB_NAME).C(PLAN_COLLECTION_NAME)\n\n\t\t//Check if plan with this title already exists\n\t\tsameTitlePlans := []Plan{}\n\t\tcheckTitleErr := plans.Find(bson.M{\"createdBy\": requestDto.CreatedBy, \"title\": requestDto.Title}).All(&sameTitlePlans)\n\t\tif checkTitleErr != nil || len(sameTitlePlans) > 0 {\n\t\t\tSendErrorJSON(http.StatusBadRequest, ERR_TITLE_ALREADY_EXISTS, w)\n\t\t\treturn\n\t\t}\n\n\t\t//Save plan with generated id\n\t\trequestDto.Id = bson.NewObjectId()\n\t\tsaveError := plans.Insert(&requestDto)\n\t\tif saveError != nil {\n\t\t\tSendErrorJSON(http.StatusBadRequest, ERR_NO_PLAN_FOUND, w)\n\t\t} else {\n\t\t\tNewResponse(http.StatusOK, requestDto).SendJSON(w)\n\t\t}\n\t\treturn\n\t}\n}",
"func (o *OrganizationRequest) GetPlanOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Plan, true\n}",
"func NewAddResponse(result int) *goa_starterpb.AddResponse {\n\tmessage := &goa_starterpb.AddResponse{}\n\tmessage.Field = int32(result)\n\treturn message\n}",
"func NewPlanCommand(cfgFactory config.Factory) *cobra.Command {\n\tplanCmd := &cobra.Command{\n\t\tUse: \"plan\",\n\t\tShort: \"List phases\",\n\t\tLong: cmdLong[1:],\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcfg, err := cfgFactory()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp := &phase.Cmd{Config: cfg}\n\t\t\tphases, err := p.Plan()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttw := util.NewTabWriter(cmd.OutOrStdout())\n\t\t\tdefer tw.Flush()\n\t\t\tfmt.Fprintf(tw, \"GROUP\\tPHASE\\n\")\n\t\t\tfor group, phaseList := range phases {\n\t\t\t\tfmt.Fprintf(tw, \"%s\\t\\n\", group)\n\t\t\t\tfor _, phase := range phaseList {\n\t\t\t\t\tfmt.Fprintf(tw, \"\\t%s\\n\", phase)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\treturn planCmd\n}",
"func (s *TxnStore) AddResponse(id uint64, res *http.Response, body []byte, edited bool) error {\n\t// Body is already read and closed, we will add it later\n\tresDump, err := httputil.DumpResponse(res, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresDump = append(resDump, body...)\n\n\terr = s.Update(func(txn *badger.Txn) error {\n\t\t// TODO: what if the key already exists\n\t\treturn txn.Set(Key{ID: id, Type: ResType, Edited: edited}.Bytes(), resDump)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s.OnUpdate != nil {\n\t\ts.OnUpdate(id)\n\t}\n\treturn nil\n}",
"func (m *MockPoller) AddMockResponse(repo string, in pollingv1.PollStatus, out pollingv1.PollStatus) {\n\tm.responses[mockKey(repo, in)] = out\n}",
"func (s *addOnsImpl) Create(planCode string, a AddOn) (*Response, *AddOn, error) {\n\taction := fmt.Sprintf(\"plans/%s/add_ons\", planCode)\n\treq, err := s.client.newRequest(\"POST\", action, nil, a)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar dst AddOn\n\tresp, err := s.client.do(req, &dst)\n\n\treturn resp, &dst, err\n}",
"func StorePlan(plan *Plan) (err error) {\n\tos.Remove(\"/tmp/plan\")\n\tf, err := os.Create(\"/tmp/plan\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tjson.NewEncoder(f).Encode(plan)\n\treturn err\n}",
"func (a *MealPlanningApiService) AddMealPlanTemplate(ctx context.Context, username string) ApiAddMealPlanTemplateRequest {\n\treturn ApiAddMealPlanTemplateRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tusername: username,\n\t}\n}",
"func (m *VirtualEndpointServicePlansCloudPcServicePlanItemRequestBuilder) ToPatchRequestInformation(ctx context.Context, body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CloudPcServicePlanable, requestConfiguration *VirtualEndpointServicePlansCloudPcServicePlanItemRequestBuilderPatchRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {\n requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()\n requestInfo.UrlTemplate = m.BaseRequestBuilder.UrlTemplate\n requestInfo.PathParameters = m.BaseRequestBuilder.PathParameters\n requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.PATCH\n requestInfo.Headers.Add(\"Accept\", \"application/json\")\n err := requestInfo.SetContentFromParsable(ctx, m.BaseRequestBuilder.RequestAdapter, \"application/json\", body)\n if err != nil {\n return nil, err\n }\n if requestConfiguration != nil {\n requestInfo.Headers.AddAll(requestConfiguration.Headers)\n requestInfo.AddRequestOptions(requestConfiguration.Options)\n }\n return requestInfo, nil\n}",
"func (o *ReconciliationTargetResource) SetPlan(v []ReconciliationTargetPlan) {\n\to.Plan = v\n}",
"func AddMealPlan(q Queryable, mp *mpdata.MealPlan) (err error) {\n\tresult, err := q.Exec(\"INSERT INTO mealplan VALUES (NULL, ?, ?, ?)\", mp.Notes, mp.StartDate, mp.EndDate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmpID, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmp.ID = uint64(mpID)\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
enforceProperty creates a physicalPlanInfo that satisfies the required property by adding sort or limit as the parent of the given physical plan.
|
func enforceProperty(prop *requiredProperty, info *physicalPlanInfo) *physicalPlanInfo {
if info.p == nil {
return info
}
if len(prop.props) != 0 {
items := make([]*ByItems, 0, len(prop.props))
for _, col := range prop.props {
items = append(items, &ByItems{Expr: col.col, Desc: col.desc})
}
sort := Sort{
ByItems: items,
ExecLimit: prop.limit,
}.init(info.p.Allocator(), info.p.context())
sort.SetSchema(info.p.Schema())
info = addPlanToResponse(sort, info)
count := info.count
if prop.limit != nil {
count = float64(prop.limit.Offset + prop.limit.Count)
info.reliable = true
}
info.cost += sortCost(count)
} else if prop.limit != nil {
limit := Limit{Offset: prop.limit.Offset, Count: prop.limit.Count}.init(info.p.Allocator(), info.p.context())
limit.SetSchema(info.p.Schema())
info = addPlanToResponse(limit, info)
info.reliable = true
}
if prop.limit != nil && float64(prop.limit.Count) < info.count {
info.count = float64(prop.limit.Count)
}
return info
}
|
[
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func addPlanToResponse(parent PhysicalPlan, info *physicalPlanInfo) *physicalPlanInfo {\n\tnp := parent.Copy()\n\tnp.SetChildren(info.p)\n\tret := &physicalPlanInfo{p: np, cost: info.cost, count: info.count, reliable: info.reliable}\n\tif _, ok := parent.(*MaxOneRow); ok {\n\t\tret.count = 1\n\t\tret.reliable = true\n\t}\n\treturn ret\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}",
"func removeLimit(prop *requiredProperty) *requiredProperty {\n\tret := &requiredProperty{\n\t\tprops: prop.props,\n\t\tsortKeyLen: prop.sortKeyLen,\n\t}\n\treturn ret\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}",
"func CreatePhysicalNode(id NodeID, spec PhysicalProcedureSpec) *PhysicalPlanNode {\n\treturn &PhysicalPlanNode{\n\t\tid: id,\n\t\tSpec: spec,\n\t}\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func TurnNominalSortIntoProj(p PhysicalPlan, onlyColumn bool, orderByItems []*util.ByItems) PhysicalPlan {\n\tif onlyColumn {\n\t\treturn p.Children()[0]\n\t}\n\n\tnumOrderByItems := len(orderByItems)\n\tchildPlan := p.Children()[0]\n\n\tbottomProjSchemaCols := make([]*expression.Column, 0, len(childPlan.Schema().Columns)+numOrderByItems)\n\tbottomProjExprs := make([]expression.Expression, 0, len(childPlan.Schema().Columns)+numOrderByItems)\n\tfor _, col := range childPlan.Schema().Columns {\n\t\tnewCol := col.Clone().(*expression.Column)\n\t\tnewCol.Index = childPlan.Schema().ColumnIndex(newCol)\n\t\tbottomProjSchemaCols = append(bottomProjSchemaCols, newCol)\n\t\tbottomProjExprs = append(bottomProjExprs, newCol)\n\t}\n\n\tfor _, item := range orderByItems {\n\t\titemExpr := item.Expr\n\t\tif _, isScalarFunc := itemExpr.(*expression.ScalarFunction); !isScalarFunc {\n\t\t\tcontinue\n\t\t}\n\t\tbottomProjExprs = append(bottomProjExprs, itemExpr)\n\t\tnewArg := &expression.Column{\n\t\t\tUniqueID: p.SCtx().GetSessionVars().AllocPlanColumnID(),\n\t\t\tRetType: itemExpr.GetType(),\n\t\t\tIndex: len(bottomProjSchemaCols),\n\t\t}\n\t\tbottomProjSchemaCols = append(bottomProjSchemaCols, newArg)\n\t}\n\n\tchildProp := p.GetChildReqProps(0).CloneEssentialFields()\n\tbottomProj := PhysicalProjection{\n\t\tExprs: bottomProjExprs,\n\t\tAvoidColumnEvaluator: false,\n\t}.Init(p.SCtx(), childPlan.StatsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), p.SelectBlockOffset(), childProp)\n\tbottomProj.SetSchema(expression.NewSchema(bottomProjSchemaCols...))\n\tbottomProj.SetChildren(childPlan)\n\n\ttopProjExprs := make([]expression.Expression, 0, childPlan.Schema().Len())\n\tfor i := range childPlan.Schema().Columns {\n\t\tcol := childPlan.Schema().Columns[i].Clone().(*expression.Column)\n\t\tcol.Index = i\n\t\ttopProjExprs = append(topProjExprs, col)\n\t}\n\ttopProj := PhysicalProjection{\n\t\tExprs: topProjExprs,\n\t\tAvoidColumnEvaluator: false,\n\t}.Init(p.SCtx(), childPlan.StatsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), p.SelectBlockOffset(), childProp)\n\ttopProj.SetSchema(childPlan.Schema().Clone())\n\ttopProj.SetChildren(bottomProj)\n\n\tif origChildProj, isChildProj := childPlan.(*PhysicalProjection); isChildProj {\n\t\trefine4NeighbourProj(bottomProj, origChildProj)\n\t}\n\n\treturn topProj\n}",
"func (plan *Plan) AssignPropertiesFromPlan(source *v1alpha1api20201201storage.Plan) error {\n\n\t// Name\n\tif source.Name != nil {\n\t\tname := *source.Name\n\t\tplan.Name = &name\n\t} else {\n\t\tplan.Name = nil\n\t}\n\n\t// Product\n\tif source.Product != nil {\n\t\tproduct := *source.Product\n\t\tplan.Product = &product\n\t} else {\n\t\tplan.Product = nil\n\t}\n\n\t// PromotionCode\n\tif source.PromotionCode != nil {\n\t\tpromotionCode := *source.PromotionCode\n\t\tplan.PromotionCode = &promotionCode\n\t} else {\n\t\tplan.PromotionCode = nil\n\t}\n\n\t// Publisher\n\tif source.Publisher != nil {\n\t\tpublisher := *source.Publisher\n\t\tplan.Publisher = &publisher\n\t} else {\n\t\tplan.Publisher = nil\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (configuration immutableConfiguration) newProperty(requiredName string, optionalValue interface{}, orphanFlag bool) Property {\n\n\tvalue := optionalValue\n\treturn immutableProperty{iName: requiredName, iValue: value, iOrphan: orphanFlag}\n}",
"func convertLimitOffsetToCount(prop *requiredProperty) *requiredProperty {\n\tret := &requiredProperty{\n\t\tprops: prop.props,\n\t\tsortKeyLen: prop.sortKeyLen,\n\t}\n\tif prop.limit != nil {\n\t\tret.limit = &Limit{\n\t\t\tCount: prop.limit.Offset + prop.limit.Count,\n\t\t}\n\t}\n\treturn ret\n}",
"func (configuration *mutableConfiguration) newProperty(requiredName string, optionalValue interface{}, orphanFlag bool) Property {\n\n\tvalue := optionalValue\n\treturn &mutableProperty{iName: requiredName, iValue: value, iOrphan: orphanFlag}\n}",
"func validateHeapProperty(queue GoQueue) error {\n\n\tqLen := len(queue.items)\n\n\tif len(queue.items) <= 1 {\n\t\treturn nil\n\t}\n\n\tfor i, item := range queue.items {\n\t\t//The root item has no parent. We'll apply a different check.\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\titemParent := queue.items[computeParentIdx(i)]\n\t\tif comp, err := item.k.CompareTo(itemParent.k); err == nil && comp < 0 {\n\t\t\treturn fmt.Errorf(\"Heap property violated: item %+v had parent %+v.\", item, itemParent)\n\t\t}\n\t}\n\n\t//Checks for the root element\n\tlIdx, rIdx := computeChildIndices(0)\n\tif lIdx <= qLen {\n\t\tif comp, err := queue.items[0].k.CompareTo(queue.items[lIdx].k); err == nil && comp > 0 {\n\t\t\treturn fmt.Errorf(\"Heap property violated: root %+v had child %+v\", queue.items[0], queue.items[lIdx])\n\t\t}\n\n\t}\n\tif rIdx <= qLen {\n\t\tif comp, err := queue.items[0].k.CompareTo(queue.items[rIdx].k); err == nil && comp > 0 {\n\t\t\treturn fmt.Errorf(\"Heap property violated: root %+v had child %+v\", queue.items[0], queue.items[rIdx])\n\t\t}\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
removeLimit removes the limit from prop.
|
func removeLimit(prop *requiredProperty) *requiredProperty {
ret := &requiredProperty{
props: prop.props,
sortKeyLen: prop.sortKeyLen,
}
return ret
}
|
[
"func (o *GetRecofrienderV2DismissedPageParams) SetLimit(limit int64) {\n\to.Limit = limit\n}",
"func (m *ComparatorMutation) ClearComparatorrulelimit() {\n\tm.clearedcomparatorrulelimit = true\n}",
"func (l *Limiter) Remove(id string) error {\n\treturn l.removeLimit(id)\n}",
"func (m *RuleMutation) ClearRulelimitrule() {\n\tm.clearedrulelimitrule = true\n}",
"func (m *ComparatorMutation) RemoveComparatorrulelimitIDs(ids ...int) {\n\tif m.removedcomparatorrulelimit == nil {\n\t\tm.removedcomparatorrulelimit = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm.removedcomparatorrulelimit[ids[i]] = struct{}{}\n\t}\n}",
"func (l *Limiter) QPSUnlimit(label string) {\n\tl.qpsLimiter.Delete(label)\n}",
"func (p *Parameter) setLimit(limit int) *Parameter {\n\tp.Limit = limit\n\treturn p\n}",
"func (in *ActionIncomingPaymentIndexInput) SetLimit(value int64) *ActionIncomingPaymentIndexInput {\n\tin.Limit = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"Limit\"] = nil\n\treturn in\n}",
"func (m *RuleMutation) RemoveRulelimitruleIDs(ids ...int) {\n\tif m.removedrulelimitrule == nil {\n\t\tm.removedrulelimitrule = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm.removedrulelimitrule[ids[i]] = struct{}{}\n\t}\n}",
"func PullLimit(v int) option {\n\treturn func(o *Options) option {\n\t\tprevious := o.pullLimit\n\t\to.pullLimit = v\n\t\treturn PullLimit(previous)\n\t}\n}",
"func MaxAllowedLimit(maxAllowed int) func(*listingConfig) {\n\treturn func(l *listingConfig) {\n\t\to := listing.DecodeMaxAllowedLimit(maxAllowed)\n\t\tl.optionsDecoder = append(l.optionsDecoder, o)\n\t}\n}",
"func (o *GetChamberStateParams) SetLimit(limit *int64) {\n\to.Limit = limit\n}",
"func (s *PreciseStrategy) SetLimit(limit int) {\n\tif limit < 1 {\n\t\tlimit = 1\n\t}\n\ts.mu.Lock()\n\ts.limit = int32(limit)\n\ts.mu.Unlock()\n}",
"func (o *FilteredListParams) SetLimit(limit *int32) {\n\to.Limit = limit\n}",
"func (o *QueryRulesMixin0Params) SetLimit(limit *int64) {\n\to.Limit = limit\n}",
"func (o QuotaExceededInfoResponseOutput) Limit() pulumi.Float64Output {\n\treturn o.ApplyT(func(v QuotaExceededInfoResponse) float64 { return v.Limit }).(pulumi.Float64Output)\n}",
"func (n *ClusterMetric) Limit(limit int) {\n\n\tif limit == 0 {\n\t\treturn\n\t}\n\n\tfor metricType, samples := range n.Metrics {\n\t\tif len(samples) < limit {\n\t\t\tcontinue\n\t\t}\n\t\tn.Metrics[metricType] = samples[:limit]\n\t}\n}",
"func (d *Dao) DelLimitUser(c context.Context, mid int64) (affect int64, err error) {\n\tres, err := d.db.Exec(c, _delLimitUserSQL, mid)\n\tif err != nil {\n\t\tlog.Error(\"del limit user(%d) error(%v)\", mid, err)\n\t\treturn\n\t}\n\treturn res.RowsAffected()\n}",
"func (opts *PqParams) SetPopLimit(v int64) *PqParams {\n\tif v < 0 {\n\t\tpanic(\"Value must be positive\")\n\t}\n\topts.popLimit = v\n\treturn opts\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convertLimitOffsetToCount changes the limit(offset, count) in prop to limit(0, offset + count).
|
func convertLimitOffsetToCount(prop *requiredProperty) *requiredProperty {
ret := &requiredProperty{
props: prop.props,
sortKeyLen: prop.sortKeyLen,
}
if prop.limit != nil {
ret.limit = &Limit{
Count: prop.limit.Offset + prop.limit.Count,
}
}
return ret
}
|
[
"func nextOffsetLimit(totalCount, offset, limit int64) (int64, int64) {\n\tnextLimit := limit\n\tif totalCount < offset+limit {\n\t\tnextLimit = totalCount - offset\n\t}\n\tnextOffset := offset + nextLimit\n\treturn nextOffset, nextLimit\n}",
"func prevOffsetLimit(totalCount, offset, limit int64) (int64, int64) {\n\tvar prevOffset int64\n\tif offset > limit {\n\t\tprevOffset = offset - limit\n\t}\n\tprevLimit := limit\n\tif offset-limit <= 0 {\n\t\tprevLimit = offset\n\t}\n\treturn prevOffset, prevLimit\n}",
"func listLimitCount(count int32, limit uint32) int32 {\n\t// requested count is zero?\n\t// this should not happen but let's return the max range than\n\tif count == 0 {\n\t\treturn int32(limit)\n\t}\n\n\t// is the count already inside the limit range (e.g. correct input)?\n\t// return the valid original\n\tif (count > 0 && uint32(count) < limit) || (count < 0 && count > -int32(limit)) {\n\t\treturn count\n\t}\n\n\t// the count is over the limit\n\t// so we return the limit being the max. value allowed\n\t// adjusted to the original direction\n\tif count < 0 {\n\t\treturn -int32(limit)\n\t}\n\n\treturn int32(limit)\n}",
"func LimitOffset(limit string, offset string) string {\n\tlimits, _ := strconv.Atoi(limit)\n\toffsets, _ := strconv.Atoi(offset)\n\tif limit == \"\" && offsets > 0 {\n\t\treturn fmt.Sprintf(\" OFFSET %s\", offset)\n\t}\n\tif offset == \"\" && limits > 0 {\n\t\treturn fmt.Sprintf(\" LIMIT %s\", limit)\n\t}\n\tif limits > 0 && offsets > 0 {\n\t\tlimit := strconv.Itoa(limits)\n\t\toffset := strconv.Itoa(offsets)\n\t\treturn fmt.Sprintf(\" LIMIT %s OFFSET %s\", limit, offset)\n\t}\n\treturn \"\"\n}",
"func parseOffsetLimit(q url.Values) (int64, int64) {\n\tsLimit := q.Get(\"limit\")\n\tif sLimit == \"\" {\n\t\tsLimit = \"100\"\n\t}\n\tsOffset := q.Get(\"offset\")\n\tif sOffset == \"\" {\n\t\tsOffset = \"0\"\n\t}\n\tlimit, err := strconv.ParseInt(sLimit, 10, 64)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\toffset, err := strconv.ParseInt(sOffset, 10, 64)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\treturn offset, limit\n}",
"func effectiveLimit(limit int, offset int, total int) int {\n\tswitch {\n\tcase limit == 0:\n\t\treturn -1\n\tcase limit > total-offset:\n\t\treturn total - offset\n\tdefault:\n\t\treturn limit\n\t}\n}",
"func (p *PhysicalPlan) AddLimit(count int64, offset int64, exprCtx ExprContext) error {\n\tif count < 0 {\n\t\treturn errors.Errorf(\"negative limit\")\n\t}\n\tif offset < 0 {\n\t\treturn errors.Errorf(\"negative offset\")\n\t}\n\t// limitZero is set to true if the limit is a legitimate LIMIT 0 requested by\n\t// the user. This needs to be tracked as a separate condition because DistSQL\n\t// uses count=0 to mean no limit, not a limit of 0. Normally, DistSQL will\n\t// short circuit 0-limit plans, but wrapped local planNodes sometimes need to\n\t// be fully-executed despite having 0 limit, so if we do in fact have a\n\t// limit-0 case when there's local planNodes around, we add an empty plan\n\t// instead of completely eliding the 0-limit plan.\n\tlimitZero := false\n\tif count == 0 {\n\t\tcount = 1\n\t\tlimitZero = true\n\t}\n\n\tif len(p.ResultRouters) == 1 {\n\t\t// We only have one processor producing results. Just update its PostProcessSpec.\n\t\t// SELECT FROM (SELECT OFFSET 10 LIMIT 1000) OFFSET 5 LIMIT 20 becomes\n\t\t// SELECT OFFSET 10+5 LIMIT min(1000, 20).\n\t\tpost := p.GetLastStagePost()\n\t\tif offset != 0 {\n\t\t\tswitch {\n\t\t\tcase post.Limit > 0 && post.Limit <= uint64(offset):\n\t\t\t\t// The previous limit is not enough to reach the offset; we know there\n\t\t\t\t// will be no results. For example:\n\t\t\t\t// SELECT * FROM (SELECT * FROM .. LIMIT 5) OFFSET 10\n\t\t\t\tcount = 1\n\t\t\t\tlimitZero = true\n\n\t\t\tcase post.Offset > math.MaxUint64-uint64(offset):\n\t\t\t\t// The sum of the offsets would overflow. There is no way we'll ever\n\t\t\t\t// generate enough rows.\n\t\t\t\tcount = 1\n\t\t\t\tlimitZero = true\n\n\t\t\tdefault:\n\t\t\t\t// If we're collapsing an offset into a stage that already has a limit,\n\t\t\t\t// we have to be careful, since offsets always are applied first, before\n\t\t\t\t// limits. So, if the last stage already has a limit, we subtract the\n\t\t\t\t// offset from that limit to preserve correctness.\n\t\t\t\t//\n\t\t\t\t// As an example, consider the requirement of applying an offset of 3 on\n\t\t\t\t// top of a limit of 10. In this case, we need to emit 7 result rows. But\n\t\t\t\t// just propagating the offset blindly would produce 10 result rows, an\n\t\t\t\t// incorrect result.\n\t\t\t\tpost.Offset += uint64(offset)\n\t\t\t\tif post.Limit > 0 {\n\t\t\t\t\t// Note that this can't fall below 1 - we would have already caught this\n\t\t\t\t\t// case above.\n\t\t\t\t\tpost.Limit -= uint64(offset)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif count != math.MaxInt64 && (post.Limit == 0 || post.Limit > uint64(count)) {\n\t\t\tpost.Limit = uint64(count)\n\t\t}\n\t\tp.SetLastStagePost(post, p.GetResultTypes())\n\t\tif limitZero {\n\t\t\tif err := p.AddFilter(tree.DBoolFalse, exprCtx, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// We have multiple processors producing results. We will add a single\n\t// processor stage that limits. As an optimization, we also set a\n\t// \"local\" limit on each processor producing results.\n\tif count != math.MaxInt64 {\n\t\tpost := p.GetLastStagePost()\n\t\t// If we have OFFSET 10 LIMIT 5, we may need as much as 15 rows from any\n\t\t// processor.\n\t\tlocalLimit := uint64(count + offset)\n\t\tif post.Limit == 0 || post.Limit > localLimit {\n\t\t\tpost.Limit = localLimit\n\t\t\tp.SetLastStagePost(post, p.GetResultTypes())\n\t\t}\n\t}\n\n\tpost := execinfrapb.PostProcessSpec{\n\t\tOffset: uint64(offset),\n\t}\n\tif count != math.MaxInt64 {\n\t\tpost.Limit = uint64(count)\n\t}\n\tp.AddSingleGroupStage(\n\t\tp.GatewayNodeID,\n\t\texecinfrapb.ProcessorCoreUnion{Noop: &execinfrapb.NoopCoreSpec{}},\n\t\tpost,\n\t\tp.GetResultTypes(),\n\t)\n\tif limitZero {\n\t\tif err := p.AddFilter(tree.DBoolFalse, exprCtx, nil); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (d *Dao) LimitUserCount(c context.Context) (count int64, err error) {\n\trow := d.db.QueryRow(c, _LimitUserCountSQL)\n\tif err = row.Scan(&count); err != nil {\n\t\tlog.Error(\"cacu limit user count error(%v)\", err)\n\t\tif err == sql.ErrNoRows {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn\n}",
"func (b *Builder) buildLimitOffset(ev memo.ExprView) (execPlan, error) {\n\tinput, err := b.buildRelational(ev.Child(0))\n\tif err != nil {\n\t\treturn execPlan{}, err\n\t}\n\tvalueExpr := ev.Child(1)\n\tif valueExpr.Operator() != opt.ConstOp {\n\t\treturn execPlan{}, errors.Errorf(\"only constant LIMIT/OFFSET supported\")\n\t}\n\tdatum := valueExpr.Private().(tree.Datum)\n\tvalue, ok := datum.(*tree.DInt)\n\tif !ok {\n\t\treturn execPlan{}, errors.Errorf(\"non-integer LIMIT/OFFSET\")\n\t}\n\tvar limit, offset int64\n\tif ev.Operator() == opt.LimitOp {\n\t\tlimit, offset = int64(*value), 0\n\t} else {\n\t\tlimit, offset = math.MaxInt64, int64(*value)\n\t}\n\tnode, err := b.factory.ConstructLimit(input.root, limit, offset)\n\tif err != nil {\n\t\treturn execPlan{}, err\n\t}\n\treturn execPlan{root: node, outputCols: input.outputCols}, nil\n}",
"func (s *SelectStmt) LimitOffset(limit, offset int) *SelectStmt {\n\tvar t = *s\n\tt.limit = &limit\n\tt.offset = &offset\n\treturn &t\n}",
"func (p *StreamGetParams) Limit(bytes, count int) *StreamGetParams {\n\tp = p.clone()\n\n\tif bytes < 0 {\n\t\tbytes = 0\n\t}\n\tif count < 0 {\n\t\tcount = 0\n\t}\n\n\tp.r.ByteCount, p.r.LogCount = int32(bytes), int32(count)\n\treturn p\n}",
"func (qs *QueryStatement) parseLimit(limitClause *parser.LimitClauseContext) {\n\tif nil != limitClause {\n\t\tlimit, _ := strconv.ParseInt(limitClause.L_INT().GetText(), 10, 32)\n\t\tqs.limit = int32(limit)\n\t}\n}",
"func (d *Dao) ResLimitCount(c context.Context, state int32) (count int64, err error) {\n\trow := d.db.QueryRow(c, _resLimitCountSQL, state)\n\tif err = row.Scan(&count); err != nil {\n\t\tlog.Error(\"CountNotice row.Scan err (%v)\", err)\n\t\tif err == sql.ErrNoRows {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn\n}",
"func (c *LevelCollection) Limit(limit int) *LevelCollection {\n\treturn &LevelCollection{\n\t\tData: c.Data,\n\t\tPagination: c.Pagination,\n\t\tlimit: limit,\n\t}\n}",
"func NewCountOffsetParams(count int, offsets ...int) url.Values {\n\tif count == 0 {\n\t\tcount = 1\n\t}\n\tparam := url.Values{\n\t\t\"count\": {fmt.Sprint(count)},\n\t}\n\tif len(offsets) > 0 && offsets[0] != 0 {\n\t\tparam.Set(\"offset\", fmt.Sprint(offsets[0]))\n\t}\n\treturn param\n}",
"func (offsetsBySource OffsetsBySource) LimitAge(limit wal.Offset) OffsetsBySource {\n\tresult := make(OffsetsBySource, len(offsetsBySource))\n\tfor source, offset := range offsetsBySource {\n\t\tif limit.After(offset) {\n\t\t\tresult[source] = limit\n\t\t} else {\n\t\t\tresult[source] = offset\n\t\t}\n\t}\n\treturn result\n}",
"func GetOffsetLimit(ctx echo.Context) (offset, limit int, has bool) {\n\thas = false\n\toffset = 0\n\tlimit = 0\n\tstrOffset := ctx.QueryParam(\"offset\")\n\tstrLimit := ctx.QueryParam(\"limit\")\n\tif len(strOffset) == 0 || len(strLimit) == 0 {\n\n\t\thas = false\n\t\treturn\n\t}\n\tvar err error\n\toffset, err = strconv.Atoi(strOffset)\n\tif err != nil {\n\t\toffset = 0\n\t\treturn\n\t}\n\tlimit, err = strconv.Atoi(strLimit)\n\tif err != nil {\n\t\tlimit = 0\n\t\treturn\n\t}\n\thas = true\n\treturn\n}",
"func (n *ClusterMetric) Limit(limit int) {\n\n\tif limit == 0 {\n\t\treturn\n\t}\n\n\tfor metricType, samples := range n.Metrics {\n\t\tif len(samples) < limit {\n\t\t\tcontinue\n\t\t}\n\t\tn.Metrics[metricType] = samples[:limit]\n\t}\n}",
"func (cq *CollectionQuery) Limit(n int) *CollectionQuery {\n\tc := *cq\n\n\tc.pipes = append(c.pipes, &bson.M{\n\t\t\"$limit\": n,\n\t})\n\n\treturn &c\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanSemi converts the semi join to physicalPlanInfo.
|
func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {
lChild := p.children[0].(LogicalPlan)
rChild := p.children[1].(LogicalPlan)
allLeft := true
for _, col := range prop.props {
if !lChild.Schema().Contains(col.col) {
allLeft = false
}
}
join := PhysicalHashSemiJoin{
WithAux: LeftOuterSemiJoin == p.JoinType,
EqualConditions: p.EqualConditions,
LeftConditions: p.LeftConditions,
RightConditions: p.RightConditions,
OtherConditions: p.OtherConditions,
Anti: p.anti,
}.init(p.allocator, p.ctx)
join.SetSchema(p.schema)
lProp := prop
if !allLeft {
lProp = &requiredProperty{}
}
if p.JoinType == SemiJoin {
lProp = removeLimit(lProp)
}
lInfo, err := lChild.convert2PhysicalPlan(lProp)
if err != nil {
return nil, errors.Trace(err)
}
rInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
resultInfo := join.matchProperty(prop, lInfo, rInfo)
if p.JoinType == SemiJoin {
resultInfo.count = lInfo.count * selectionFactor
} else {
resultInfo.count = lInfo.count
}
if !allLeft {
resultInfo = enforceProperty(prop, resultInfo)
} else if p.JoinType == SemiJoin {
resultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)
}
return resultInfo, nil
}
|
[
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func newSemiJoin(lhs, rhs logicalPlan, vars map[string]int) *semiJoin {\n\treturn &semiJoin{\n\t\trhs: rhs,\n\t\tlhs: lhs,\n\t\tvars: vars,\n\t}\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}",
"func (n *Node) transformSemiTreeToList() *Node {\n\n\tif n.Token() != token.SEMI {\n\t\treturn n\n\t}\n\n\t// rename \";\" to \"stmts\", cuz it's just a chain of statements, actually.\n\t// (; ...) ==> (stmts ...)\n\tn.lexeme = n.lexeme.Rewrite(token.STMTS, \"stmts\")\n\n\tfirst := n.firstChild()\n\n\t// unnest statements\n\t// (stmts (; (; (:= a 1) (:= b 2)) (:= c 3)))\n\t// ==> (stmts (:= a 1) (:= b 2) (:= c 3))\n\tif first != nil && (first.Token() == token.SEMI || first.Token() == token.STMTS) {\n\t\tn = n.raiseFirstChildren()\n\t}\n\n\tif len(n.children) == 1 {\n\t\treturn n.children[0]\n\t}\n\n\treturn n\n}",
"func TracePlan(v *Vertex, smoothing bool) *common.Plan {\n\tbranch := make([]*Edge, 0)\n\tif v == nil {\n\t\treturn nil\n\t}\n\tif v.ParentEdge == nil {\n\t\tPrintError(\"Nil parent edge\")\n\t}\n\tif v.ParentEdge.Start == nil {\n\t\tPrintError(\"Nil parent edge Start\")\n\t}\n\n\tif smoothing {\n\t\t// smoothing\n\t\tv.ParentEdge.Smooth()\n\t}\n\n\t// only cycle should be in Start vertex\n\tfor cur := v; cur.ParentEdge.Start != cur; cur = cur.ParentEdge.Start {\n\t\tbranch = append(branch, cur.ParentEdge)\n\t}\n\n\t// reverse the Plan order (this might look dumb)\n\ts := branch\n\tfor i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {\n\t\ts[i], s[j] = s[j], s[i]\n\t}\n\tbranch = s\n\n\tif Verbose && len(branch) > 0 {\n\t\tPrintLog(\"Current tree: \")\n\t\tPrintLog(branch[0].Start.State.String())\n\t\tfor _, x := range branch {\n\t\t\tPrintLog(x.End.State.String())\n\t\t}\n\t\tPrintLog(\"Done printing tree.\")\n\t}\n\n\tp := new(common.Plan)\n\tp.Start = Start\n\t//LastPlan = make([]*Vertex, len(branch))\n\t// p.AppendState(&Start) // yes this is necessary\n\tfor _, e := range branch {\n\t\t//LastPlan[i] = e.End\n\t\tp.AppendPlan(GetPlan(e))\n\t\tp.AppendState(e.End.State)\n\t}\n\treturn p\n}",
"func (spc *InsuranceContract) LinkPolicyToPlan(ctx contractapi.TransactionContextInterface, policyID string, planID string) (string, error) {\n\t// get plan info\n\tvar plan Plans\n\tplanBytes, err := ctx.GetStub().GetState(planID)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to read from world state: %v\", err)\n\t}\n\t//check if ID already exists (return the state of the ID by checking the world state)\n\tif planBytes == nil {\n\t\treturn \"\", fmt.Errorf(\"plan does not exists for planID %s\", planID)\n\t}\n\n\terr = json.Unmarshal(planBytes, &plan)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshall planbytes error: %v\", err)\n\t}\n\t// // get policy info\n\t// var policy Policy\n\t// policybytes, err := ctx.GetStub().GetState(policyID)\n\t// if err != nil {\n\t// \treturn \"\", fmt.Errorf(\"failed to read from world state: %v\", err)\n\t// }\n\t// //check if ID already exists (return the state of the ID by checking the world state)\n\t// if policybytes != nil {\n\t// \treturn \"\", fmt.Errorf(\"confirmed the plan already exists for planID %s\", planID)\n\t// }\n\n\t// json.Unmarshal(policybytes, &policy)\n\n\tplan.PlanOptions = append(plan.PlanOptions, policyID)\n\tplanBytes, err = json.Marshal(plan)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = ctx.GetStub().PutState(planID, planBytes)\n\tif err != nil {\n\t\treturn \"\", err\n\n\t}\n\n\treturn \"Policy addded to Plan\", nil\n}",
"func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*mppTask, *mppTask) {\n\tlp := lTask.p\n\tif _, ok := lp.(*PhysicalExchangeReceiver); ok {\n\t\tlp = lp.Children()[0].Children()[0]\n\t}\n\trp := rTask.p\n\tif _, ok := rp.(*PhysicalExchangeReceiver); ok {\n\t\trp = rp.Children()[0].Children()[0]\n\t}\n\t// to mark if any partition key needs to convert\n\tlMask := make([]bool, len(lTask.hashCols))\n\trMask := make([]bool, len(rTask.hashCols))\n\tcTypes := make([]*types.FieldType, len(lTask.hashCols))\n\tlChanged := false\n\trChanged := false\n\tfor i := range lTask.hashCols {\n\t\tlKey := lTask.hashCols[i]\n\t\trKey := rTask.hashCols[i]\n\t\tcType, lConvert, rConvert := negotiateCommonType(lKey.Col.RetType, rKey.Col.RetType)\n\t\tif lConvert {\n\t\t\tlMask[i] = true\n\t\t\tcTypes[i] = cType\n\t\t\tlChanged = true\n\t\t}\n\t\tif rConvert {\n\t\t\trMask[i] = true\n\t\t\tcTypes[i] = cType\n\t\t\trChanged = true\n\t\t}\n\t}\n\tif !lChanged && !rChanged {\n\t\treturn lTask, rTask\n\t}\n\tvar lProj, rProj *PhysicalProjection\n\tif lChanged {\n\t\tlProj = getProj(p.SCtx(), lp)\n\t\tlp = lProj\n\t}\n\tif rChanged {\n\t\trProj = getProj(p.SCtx(), rp)\n\t\trp = rProj\n\t}\n\n\tlPartKeys := make([]*property.MPPPartitionColumn, 0, len(rTask.hashCols))\n\trPartKeys := make([]*property.MPPPartitionColumn, 0, len(lTask.hashCols))\n\tfor i := range lTask.hashCols {\n\t\tlKey := lTask.hashCols[i]\n\t\trKey := rTask.hashCols[i]\n\t\tif lMask[i] {\n\t\t\tcType := cTypes[i].Clone()\n\t\t\tcType.SetFlag(lKey.Col.RetType.GetFlag())\n\t\t\tlCast := expression.BuildCastFunction(p.SCtx(), lKey.Col, cType)\n\t\t\tlKey = &property.MPPPartitionColumn{Col: appendExpr(lProj, lCast), CollateID: lKey.CollateID}\n\t\t}\n\t\tif rMask[i] {\n\t\t\tcType := cTypes[i].Clone()\n\t\t\tcType.SetFlag(rKey.Col.RetType.GetFlag())\n\t\t\trCast := expression.BuildCastFunction(p.SCtx(), rKey.Col, cType)\n\t\t\trKey = &property.MPPPartitionColumn{Col: appendExpr(rProj, rCast), CollateID: rKey.CollateID}\n\t\t}\n\t\tlPartKeys = append(lPartKeys, lKey)\n\t\trPartKeys = append(rPartKeys, rKey)\n\t}\n\t// if left or right child changes, we need to add enforcer.\n\tif lChanged {\n\t\tnlTask := lTask.copy().(*mppTask)\n\t\tnlTask.p = lProj\n\t\tnlTask = nlTask.enforceExchanger(&property.PhysicalProperty{\n\t\t\tTaskTp: property.MppTaskType,\n\t\t\tMPPPartitionTp: property.HashType,\n\t\t\tMPPPartitionCols: lPartKeys,\n\t\t})\n\t\tlTask = nlTask\n\t}\n\tif rChanged {\n\t\tnrTask := rTask.copy().(*mppTask)\n\t\tnrTask.p = rProj\n\t\tnrTask = nrTask.enforceExchanger(&property.PhysicalProperty{\n\t\t\tTaskTp: property.MppTaskType,\n\t\t\tMPPPartitionTp: property.HashType,\n\t\t\tMPPPartitionCols: rPartKeys,\n\t\t})\n\t\trTask = nrTask\n\t}\n\treturn lTask, rTask\n}",
"func (p *DataSource) tryToConvert2DummyScan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tsel, isSel := p.parents[0].(*Selection)\n\tif !isSel {\n\t\treturn nil, nil\n\t}\n\n\tfor _, cond := range sel.Conditions {\n\t\tif con, ok := cond.(*expression.Constant); ok {\n\t\t\tresult, err := expression.EvalBool([]expression.Expression{con}, nil, p.ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif !result {\n\t\t\t\tdual := TableDual{}.init(p.allocator, p.ctx)\n\t\t\t\tdual.SetSchema(p.schema)\n\t\t\t\tinfo := &physicalPlanInfo{p: dual}\n\t\t\t\tp.storePlanInfo(prop, info)\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}",
"func (rq SQLQuery) FetchPlan(plan FetchPlan) SQLQuery {\n\trq.plan = string(plan)\n\treturn rq\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanLeft converts the left join to physicalPlanInfo.
|
func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {
lChild := p.children[0].(LogicalPlan)
rChild := p.children[1].(LogicalPlan)
allLeft := true
for _, col := range prop.props {
if !lChild.Schema().Contains(col.col) {
allLeft = false
}
}
join := PhysicalHashJoin{
EqualConditions: p.EqualConditions,
LeftConditions: p.LeftConditions,
RightConditions: p.RightConditions,
OtherConditions: p.OtherConditions,
SmallTable: 1,
// TODO: decide concurrency by data size.
Concurrency: JoinConcurrency,
DefaultValues: p.DefaultValues,
}.init(p.allocator, p.ctx)
join.SetSchema(p.schema)
if innerJoin {
join.JoinType = InnerJoin
} else {
join.JoinType = LeftOuterJoin
}
lProp := prop
if !allLeft {
lProp = &requiredProperty{}
}
var lInfo *physicalPlanInfo
var err error
if innerJoin {
lInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))
} else {
lInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))
}
if err != nil {
return nil, errors.Trace(err)
}
rInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
resultInfo := join.matchProperty(prop, lInfo, rInfo)
if !allLeft {
resultInfo = enforceProperty(prop, resultInfo)
} else {
resultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)
}
return resultInfo, nil
}
|
[
"func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) {\n\tnewRHS := ajoin.LeftExpr\n\t// If the LHS is a join, we have to parenthesize it.\n\t// Otherwise, it can be used as is.\n\tif _, ok := newRHS.(*sqlparser.JoinTableExpr); ok {\n\t\tnewRHS = &sqlparser.ParenTableExpr{\n\t\t\tExprs: sqlparser.TableExprs{newRHS},\n\t\t}\n\t}\n\tajoin.LeftExpr, ajoin.RightExpr = ajoin.RightExpr, newRHS\n\tajoin.Join = sqlparser.LeftJoinType\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (ds *joinDataSet) leftJoin() joinDataSet {\n\texp := ResultChunk{\n\t\tColumns: ds.expResults.Columns,\n\t\tValues: append(ds.expResults.Values, ds.expLeftJoin.Values...),\n\t\toffsets: append(ds.expResults.offsets, ds.expLeftJoin.offsets...),\n\t\tFacts: append(ds.expResults.Facts, ds.expLeftJoin.Facts...),\n\t}\n\tleft := joinDataSet{\n\t\tname: ds.name + \"_left\",\n\t\tjoinVars: ds.joinVars,\n\t\tjoinType: parser.MatchOptional,\n\t\tleft: ds.left,\n\t\tright: ds.right,\n\t\tinputBinder: ds.inputBinder,\n\t\texpResults: exp,\n\t}\n\treturn left\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func (hj *hashJoiner) leftJoin(ctx context.Context) {\n\tleftIdentityKeysUsed := make(map[string]struct{})\n\n\thj.runEqJoin(func(identityKey string, offset uint32, fs FactSet, rowValues []Value) {\n\t\tleftIdentityKeysUsed[identityKey] = struct{}{}\n\t\thj.outputTo.add(ctx, offset, fs, rowValues)\n\t})\n\n\tfor key, factsets := range hj.leftJoinValues {\n\t\tif _, exists := leftIdentityKeysUsed[key]; !exists {\n\t\t\t// this list of FactSets from the left wasn't joined to any\n\t\t\t// right factSets, so emit the left join version of them\n\t\t\tfor _, left := range factsets {\n\t\t\t\thj.outputTo.add(ctx, left.offset, left.fact, hj.joiner(left.vals, nil))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func LeftOuterJoin(baseTable string, baseColumn string, joinTable string, joinColumn string) qm.QueryMod {\n\treturn qm.LeftOuterJoin(fmt.Sprintf(\"%s ON %s.%s=%s.%s\",\n\t\tjoinTable,\n\t\tjoinTable,\n\t\tjoinColumn,\n\t\tbaseTable,\n\t\tbaseColumn))\n}",
"func LeftJoin(table Table, predicates ...Predicate) JoinTable {\n\treturn JoinTable{\n\t\tJoinType: JoinTypeLeft,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t}\n}",
"func (r readableTableInterfaceImpl) LEFT_JOIN(table ReadableTable, onCondition BoolExpression) joinSelectUpdateTable {\n\treturn newJoinTable(r.parent, table, jet.LeftJoin, onCondition)\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func CaseSQLByPassLeftJoin(t *testing.T) {\n\ta := assert.New(t)\n\ttrainSQL := `SELECT f1.user_id, f1.fea1, f2.fea2\nFROM standard_join_test.user_fea1 AS f1 LEFT OUTER JOIN standard_join_test.user_fea2 AS f2\nON f1.user_id = f2.user_id\nWHERE f1.user_id < 3;`\n\n\tconn, err := createRPCConn()\n\ta.NoError(err)\n\tdefer conn.Close()\n\tcli := pb.NewSQLFlowClient(conn)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)\n\tdefer cancel()\n\n\tstream, err := cli.Run(ctx, sqlRequest(trainSQL))\n\tif err != nil {\n\t\ta.Fail(\"Check if the server started successfully. %v\", err)\n\t}\n\t// wait train finish\n\t_, _, _, e := ParseResponse(stream)\n\ta.NoError(e)\n}",
"func (m *Map) rotateLeft(n, p *node) (*node, *node) {\n\t//_ = assertOn && assert(n == p.rn, \"new node is not right child of new parent\")\n\t//_ = assertOn && assert(p == nil || n == p.ln,\n\t//\t\"node is not the left child of parent\")\n\tvar r = n.rn //assume n.rn is already a copy.\n\n\tif p != nil {\n\t\tif n.isLeftChildOf(p) {\n\t\t\tp.ln = r\n\t\t} else {\n\t\t\tp.rn = r\n\t\t}\n\t} /* else {\n\t\tm.root = r\n\t} */\n\n\tn.rn = r.ln //handle anticipated orphaned node\n\tr.ln = n //now orphan it\n\n\treturn n, r\n}",
"func (_this *FrameData) LeftProjectionMatrix() *javascript.Float32Array {\n\tvar ret *javascript.Float32Array\n\tvalue := _this.Value_JS.Get(\"leftProjectionMatrix\")\n\tret = javascript.Float32ArrayFromJS(value)\n\treturn ret\n}",
"func (jc *JoinCondition) LeftJoin(table interface{}) *JoinCondition {\n\treturn jc.join(LeftJoin, table)\n}",
"func NewLeftJoin(table Table, condition Expression) Join {\n\treturn NewJoin(types.LeftJoin, table, condition)\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
replaceColsInPropBySchema replaces the columns in original prop with the columns in schema.
|
func replaceColsInPropBySchema(prop *requiredProperty, schema *expression.Schema) *requiredProperty {
newProps := make([]*columnProp, 0, len(prop.props))
for _, p := range prop.props {
idx := schema.ColumnIndex(p.col)
if idx == -1 {
log.Errorf("Can't find column %s in schema", p.col)
}
newProps = append(newProps, &columnProp{col: schema.Columns[idx], desc: p.desc})
}
return &requiredProperty{
props: newProps,
sortKeyLen: prop.sortKeyLen,
limit: prop.limit,
}
}
|
[
"func (*Plan) ReplaceExprColumns(_ map[string]*expression.Column) {}",
"func (c Cols) WithSchema(schema string) Cols {\n\tc2 := c.clone()\n\n\tfor _, col := range c2 {\n\t\tcol.schema = schema\n\t}\n\n\treturn c2\n}",
"func condenseSchema(currentSchema spec.Schema, openapiSpec map[string]common.OpenAPIDefinition) spec.Schema {\n\tcurrentSchemaProperties := currentSchema.SchemaProps.Properties\n\tfor property, propertySchema := range currentSchemaProperties {\n\n\t\tif propertySchema.SchemaProps.Type.Contains(\"array\") {\n\t\t\tref := propertySchema.Items.Schema.SchemaProps.Ref.String()\n\t\t\tif ref != \"\" {\n\t\t\t\treferencedSchema := getReferenceSchema(ref, propertySchema, openapiSpec)\n\t\t\t\tcondensedRefSchema := condenseSchema(referencedSchema, openapiSpec)\n\t\t\t\tpropertySchema.SchemaProps.Items.Schema.SchemaProps = condensedRefSchema.SchemaProps\n\t\t\t\tcurrentSchemaProperties[property] = propertySchema\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tref := propertySchema.SchemaProps.Ref.String()\n\t\tif ref != \"\" {\n\t\t\treferencedSchema := getReferenceSchema(ref, propertySchema, openapiSpec)\n\t\t\tpropertySchema.SchemaProps = referencedSchema.SchemaProps\n\t\t\tcurrentSchemaProperties[property] = propertySchema\n\t\t\tcondenseSchema(propertySchema, openapiSpec)\n\t\t}\n\t}\n\t// Apply fixes for certain known issues.\n\tcurrentSchema.AdditionalProperties = nil\n\treturn currentSchema\n}",
"func reorderRowBySchema(row Row, rowSchema []Column, newSchema []Column) Row {\n columnSet := make(map[Column]int, len(rowSchema))\n // column -> index in indexSchema\n for j, col := range rowSchema {\n columnSet[col] = j\n }\n\n newRow := make(Row, 0, len(newSchema))\n for _, col := range newSchema {\n if i, exists := columnSet[col]; exists {\n // insert it\n newRow = append(newRow, row[i])\n } else {\n break\n }\n }\n\n return newRow\n}",
"func mergeSchemasField(new, old interface{}) (string, error) {\n\t// Since new and old have the same data structures(a set of strings separated by `schemasFieldSeparator`).\n\t// So same logic is applied to both.\n\tfields := []interface{}{new, old}\n\tvar strFields []string\n\n\t// Iterates over fields to convert each into a string and appends it to `strFields` for later usage.\n\tfor _, field := range fields {\n\t\tswitch field.(type) {\n\t\tcase string:\n\t\t\tstrFields = append(strFields, field.(string))\n\t\tcase nil:\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"unexpected schema field type: %v\", reflect.TypeOf(field))\n\t\t}\n\t}\n\n\t// Schemas cache, used to keep track schemas to be returned(without duplications).\n\tschemas := make(map[string]string, 0)\n\n\t// Iterates over `strFields` to convert each into a slice([]string), then iterates over it in order to\n\t// add each to `schemas` if not present already.\n\tfor _, strField := range strFields {\n\t\tif strField == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tsf := strings.Split(strField, schemasFieldSeparator)\n\t\tfor _, s := range sf {\n\t\t\tif _, found := schemas[s]; !found {\n\t\t\t\tschemas[s] = s\n\t\t\t}\n\t\t}\n\t}\n\n\tvar result []string\n\tfor k, _ := range schemas {\n\t\tresult = append(result, k)\n\t}\n\n\t// Returns a string which contains all the schemas separated by `schemasFieldSeparator`.\n\treturn strings.Join(result, schemasFieldSeparator), nil\n}",
"func preparePossibleProperties(g *memo.Group, propertyMap map[*memo.Group][][]*expression.Column) [][]*expression.Column {\n\tif prop, ok := propertyMap[g]; ok {\n\t\treturn prop\n\t}\n\tgroupPropertyMap := make(map[string][]*expression.Column)\n\tfor elem := g.Equivalents.Front(); elem != nil; elem = elem.Next() {\n\t\texpr := elem.Value.(*memo.GroupExpr)\n\t\tchildrenProperties := make([][][]*expression.Column, len(expr.Children))\n\t\tfor i, child := range expr.Children {\n\t\t\tchildrenProperties[i] = preparePossibleProperties(child, propertyMap)\n\t\t}\n\t\texprProperties := expr.ExprNode.PreparePossibleProperties(expr.Schema(), childrenProperties...)\n\t\tfor _, newPropCols := range exprProperties {\n\t\t\t// Check if the prop has already been in `groupPropertyMap`.\n\t\t\tnewProp := property.PhysicalProperty{SortItems: property.SortItemsFromCols(newPropCols, true)}\n\t\t\tkey := newProp.HashCode()\n\t\t\tif _, ok := groupPropertyMap[string(key)]; !ok {\n\t\t\t\tgroupPropertyMap[string(key)] = newPropCols\n\t\t\t}\n\t\t}\n\t}\n\tresultProps := make([][]*expression.Column, 0, len(groupPropertyMap))\n\tfor _, prop := range groupPropertyMap {\n\t\tresultProps = append(resultProps, prop)\n\t}\n\tpropertyMap[g] = resultProps\n\treturn resultProps\n}",
"func (fc *FacebookConversionAPI) PatchTableSchema(schemaToAdd *Table) error {\n\treturn nil\n}",
"func rewriteSchemaDescs(schemas []*schemadesc.Mutable, descriptorRewrites DescRewriteMap) error {\n\tfor _, sc := range schemas {\n\t\trewrite, ok := descriptorRewrites[sc.ID]\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"missing rewrite for schema %d\", sc.ID)\n\t\t}\n\t\t// Reset the version and modification time on this new descriptor.\n\t\tsc.Version = 1\n\t\tsc.ModificationTime = hlc.Timestamp{}\n\n\t\tsc.ID = rewrite.ID\n\t\tsc.ParentID = rewrite.ParentID\n\t}\n\treturn nil\n}",
"func (s Schema) DeepCopy() Schema {\n\tresult := make(Schema, 0, len(s))\n\tfor _, col := range s {\n\t\tnewCol := *col\n\t\tresult = append(result, &newCol)\n\t}\n\treturn result\n}",
"func schemaToDDL(conv *internal.Conv) error {\n\tfor _, srcTable := range conv.SrcSchema {\n\t\tspTableName, err := internal.GetSpannerTable(conv, srcTable.Name)\n\t\tif err != nil {\n\t\t\tconv.Unexpected(fmt.Sprintf(\"Couldn't map source table %s to Spanner: %s\", srcTable.Name, err))\n\t\t\tcontinue\n\t\t}\n\t\tvar spColNames []string\n\t\tspColDef := make(map[string]ddl.ColumnDef)\n\t\tconv.Issues[srcTable.Name] = make(map[string][]internal.SchemaIssue)\n\t\t// Iterate over columns using ColNames order.\n\t\tfor _, srcColName := range srcTable.ColNames {\n\t\t\tsrcCol := srcTable.ColDefs[srcColName]\n\t\t\tcolName, err := internal.GetSpannerCol(conv, srcTable.Name, srcCol.Name, false)\n\t\t\tif err != nil {\n\t\t\t\tconv.Unexpected(fmt.Sprintf(\"Couldn't map source column %s of table %s to Spanner: %s\", srcTable.Name, srcCol.Name, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tspColNames = append(spColNames, colName)\n\t\t\tty, issues := toSpannerType(conv, srcCol.Type.Name, srcCol.Type.Mods)\n\n\t\t\tif conv.TargetDb == \"experimental_postgres\" { //TODO : Use constant instead. Using string to prevent import cycle\n\t\t\t\tty = overrideExperimentalType(srcCol, ty)\n\t\t\t} else {\n\t\t\t\tif len(srcCol.Type.ArrayBounds) > 1 {\n\t\t\t\t\tty = ddl.Type{Name: ddl.String, Len: ddl.MaxLength}\n\t\t\t\t\tissues = append(issues, internal.MultiDimensionalArray)\n\t\t\t\t}\n\t\t\t\tty.IsArray = len(srcCol.Type.ArrayBounds) == 1\n\t\t\t}\n\t\t\t// TODO: add issues for all elements of srcCol.Ignored.\n\t\t\tif srcCol.Ignored.ForeignKey {\n\t\t\t\tissues = append(issues, internal.ForeignKey)\n\t\t\t}\n\t\t\tif srcCol.Ignored.Default {\n\t\t\t\tissues = append(issues, internal.DefaultValue)\n\t\t\t}\n\t\t\tif len(issues) > 0 {\n\t\t\t\tconv.Issues[srcTable.Name][srcCol.Name] = issues\n\t\t\t}\n\n\t\t\tspColDef[colName] = ddl.ColumnDef{\n\t\t\t\tName: colName,\n\t\t\t\tT: ty,\n\t\t\t\tNotNull: srcCol.NotNull,\n\t\t\t\tComment: \"From: \" + quoteIfNeeded(srcCol.Name) + \" \" + srcCol.Type.Print(),\n\t\t\t}\n\t\t}\n\t\tcomment := \"Spanner schema for source table \" + quoteIfNeeded(srcTable.Name)\n\t\tconv.SpSchema[spTableName] = ddl.CreateTable{\n\t\t\tName: spTableName,\n\t\t\tColNames: spColNames,\n\t\t\tColDefs: spColDef,\n\t\t\tPks: cvtPrimaryKeys(conv, srcTable.Name, srcTable.PrimaryKeys),\n\t\t\tFks: cvtForeignKeys(conv, srcTable.Name, srcTable.ForeignKeys),\n\t\t\tIndexes: cvtIndexes(conv, spTableName, srcTable.Name, srcTable.Indexes),\n\t\t\tComment: comment}\n\t}\n\tinternal.ResolveRefs(conv)\n\treturn nil\n}",
"func fillTableColumns(db *sql.DB, dialect Dialect, schemaName string, tables *map[string]*Table) error {\n\tvar qs string\n\tswitch dialect {\n\tcase DIALECT_MYSQL:\n\t\tqs = `\nSELECT c.TABLE_NAME, c.COLUMN_NAME\nFROM INFORMATION_SCHEMA.COLUMNS AS c\nJOIN INFORMATION_SCHEMA.TABLES AS t\n ON t.TABLE_SCHEMA = c.TABLE_SCHEMA\n AND t.TABLE_NAME = c.TABLE_NAME\nWHERE c.TABLE_SCHEMA = ?\nAND t.TABLE_TYPE = 'BASE TABLE'\nORDER BY c.TABLE_NAME, c.COLUMN_NAME\n`\n\tcase DIALECT_POSTGRESQL:\n\t\tqs = `\nSELECT c.TABLE_NAME, c.COLUMN_NAME\nFROM INFORMATION_SCHEMA.COLUMNS AS c\nJOIN INFORMATION_SCHEMA.TABLES AS t\n ON t.TABLE_SCHEMA = c.TABLE_SCHEMA\n AND t.TABLE_NAME = c.TABLE_NAME\nWHERE c.TABLE_SCHEMA = 'public'\nAND c.TABLE_CATALOG = $1\nAND t.TABLE_TYPE = 'BASE TABLE'\nORDER BY c.TABLE_NAME, c.COLUMN_NAME\n`\n\t}\n\trows, err := db.Query(qs, schemaName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar t *Table\n\tfor rows.Next() {\n\t\tvar tname string\n\t\tvar cname string\n\t\terr = rows.Scan(&tname, &cname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt = (*tables)[tname]\n\t\tif t.columns == nil {\n\t\t\tt.columns = make([]*Column, 0)\n\t\t}\n\t\tc := &Column{tbl: t, name: cname}\n\t\tt.columns = append(t.columns, c)\n\t}\n\treturn nil\n}",
"func (c *Column) DiffSchema(old *Column) *Column {\n\tcopyCol := func(change Diff) *Column {\n\t\tcol := *c\n\t\tcol.SetDiffMode(change)\n\t\treturn &col\n\t}\n\tif old == nil {\n\t\treturn copyCol(Added)\n\t}\n\tif old.SQLType != c.SQLType || old.Default != c.Default {\n\t\treturn copyCol(Changed)\n\t}\n\treturn nil\n}",
"func Replace(old, new interface{}) {\n\tif reflect.ValueOf(old).Kind() != reflect.Ptr {\n\t\tpanic(\"old must be a pointer to a struct\")\n\t}\n\tif reflect.ValueOf(new).Kind() != reflect.Ptr {\n\t\tpanic(\"new must be a pointer to a struct\")\n\t}\n\tif reflect.ValueOf(old).Elem().Kind() != reflect.Struct {\n\t\tpanic(\"old must be a pointer to a struct\")\n\t}\n\tif reflect.ValueOf(new).Elem().Kind() != reflect.Struct {\n\t\tpanic(\"new must be a pointer to a struct\")\n\t}\n\toldReflect := reflect.ValueOf(old).Elem()\n\toldField := reflect.TypeOf(old).Elem()\n\tnewField := reflect.TypeOf(new).Elem()\n\tnewReflect := reflect.ValueOf(new).Elem()\n\tfor i, n := 0, newField.NumField(); i < n; i++ {\n\t\tname := newField.Field(i).Name\n\t\t_, oldHave := oldField.FieldByName(name)\n\t\tif oldHave && !newReflect.FieldByName(name).IsNil() {\n\t\t\toldReflect.FieldByName(name).Set(newReflect.FieldByName(name).Elem())\n\t\t}\n\t}\n}",
"func (s *Split) ApplyToSchema(schema *serializers.Schema, migrationRepo migrations.Repository, _idempotently bool) error {\n\tfor i, candidate := range schema.Splits { // Replace\n\t\tif candidate.Name == *s.name {\n\t\t\tschemaWeights, err := WeightsFromYAML(candidate.Weights)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tschemaWeights.Merge(*s.weights)\n\t\t\tschema.Splits[i].Decided = false\n\t\t\tschema.Splits[i].Weights = schemaWeights.ToYAML()\n\t\t\treturn nil\n\t\t}\n\t}\n\tif s.migrationVersion != nil { // Revive weights from old migration\n\t\tsplit := MostRecentNamed(*s.name, *s.migrationVersion, migrationRepo)\n\t\tif split != nil {\n\t\t\tweights := split.Weights()\n\t\t\tweights.Merge(*s.weights)\n\t\t\tschema.Splits = append(schema.Splits, serializers.SchemaSplit{\n\t\t\t\tName: *s.name,\n\t\t\t\tWeights: weights.ToYAML(),\n\t\t\t\tDecided: false,\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\t}\n\tschemaSplit := serializers.SchemaSplit{ // Create\n\t\tName: *s.name,\n\t\tWeights: s.weights.ToYAML(),\n\t\tDecided: false,\n\t}\n\tschema.Splits = append(schema.Splits, schemaSplit)\n\treturn nil\n}",
"func (*partitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.FieldName, error) {\n\tnames := make([]*types.FieldName, 0, len(ds.TblCols))\n\t// Use DeletableCols to get all the columns.\n\tcolsInfo := ds.table.DeletableCols()\n\tcolsInfoMap := make(map[int64]*table.Column, len(colsInfo))\n\tfor _, c := range colsInfo {\n\t\tcolsInfoMap[c.ID] = c\n\t}\n\tfor _, colExpr := range ds.TblCols {\n\t\tif colExpr.ID == model.ExtraHandleID {\n\t\t\tnames = append(names, &types.FieldName{\n\t\t\t\tDBName: ds.DBName,\n\t\t\t\tTblName: ds.tableInfo.Name,\n\t\t\t\tColName: model.ExtraHandleName,\n\t\t\t\tOrigColName: model.ExtraHandleName,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tif colExpr.ID == model.ExtraPidColID {\n\t\t\tnames = append(names, &types.FieldName{\n\t\t\t\tDBName: ds.DBName,\n\t\t\t\tTblName: ds.tableInfo.Name,\n\t\t\t\tColName: model.ExtraPartitionIdName,\n\t\t\t\tOrigColName: model.ExtraPartitionIdName,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tif colExpr.ID == model.ExtraPhysTblID {\n\t\t\tnames = append(names, &types.FieldName{\n\t\t\t\tDBName: ds.DBName,\n\t\t\t\tTblName: ds.tableInfo.Name,\n\t\t\t\tColName: model.ExtraPhysTblIdName,\n\t\t\t\tOrigColName: model.ExtraPhysTblIdName,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tif colInfo, found := colsInfoMap[colExpr.ID]; found {\n\t\t\tnames = append(names, &types.FieldName{\n\t\t\t\tDBName: ds.DBName,\n\t\t\t\tTblName: ds.tableInfo.Name,\n\t\t\t\tColName: colInfo.Name,\n\t\t\t\tOrigTblName: ds.tableInfo.Name,\n\t\t\t\tOrigColName: colInfo.Name,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\treturn nil, errors.Trace(fmt.Errorf(\"information of column %v is not found\", colExpr.String()))\n\t}\n\treturn names, nil\n}",
"func migrateProposals(store storetypes.KVStore, cdc codec.BinaryCodec) error {\n\tpropStore := prefix.NewStore(store, v1.ProposalsKeyPrefix)\n\n\titer := propStore.Iterator(nil, nil)\n\tdefer iter.Close()\n\n\tfor ; iter.Valid(); iter.Next() {\n\t\tvar oldProp govv1beta1.Proposal\n\t\terr := cdc.Unmarshal(iter.Value(), &oldProp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnewProp, err := convertToNewProposal(oldProp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbz, err := cdc.Marshal(&newProp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Set new value on store.\n\t\tpropStore.Set(iter.Key(), bz)\n\t}\n\n\treturn nil\n}",
"func (d *Driver) Schema(obj interface{}) error {\n\n\tnSliceTyp := reflect.SliceOf(reflect.TypeOf(obj))\n\n\t//newSlice := reflect.MakeSlice(nSliceTyp, 0, 1)\n\tnewSlice := reflect.New(nSliceTyp)\n\n\td.CollPtr = newSlice.Interface() // Pointer\n\n\treturn nil\n}",
"func Migrate(old *Table, new interface{}, tableName string, safe bool) (res []string, newMap Table, err error) {\n\tnewMap, err = StructMap(new)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif old == nil {\n\t\treturn []string{CreateTable(new, tableName)}, newMap, nil\n\t}\n\toldMap := *old\n\tnewMap, err = StructMap(new)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tqueries := []string{}\n\tfor key, oldval := range oldMap {\n\t\tnewval, ok := newMap[key]\n\t\tswitch {\n\t\tcase !ok:\n\t\t\t//doesnt exist in new\n\t\t\toldIsField, _ := isField(oldval.DB)\n\t\t\tif !safe && oldIsField {\n\t\t\t\tqueries = append(queries, fmt.Sprintf(\"alter table %v drop column %q\", tableName, oldval.JSON))\n\t\t\t}\n\t\tcase ok:\n\t\t\t//exists in both\n\t\t\tif oldval.JSON != newval.JSON { // name changed\n\n\t\t\t\toldIsField, _ := isField(oldval.DB)\n\t\t\t\tnewIsField, _ := isField(newval.DB)\n\t\t\t\tif oldIsField && newIsField {\n\t\t\t\t\t//rename\n\t\t\t\t\tqueries = append(queries, fmt.Sprintf(\"alter table %v rename column %q to %q\", tableName, oldval.JSON, newval.JSON))\n\t\t\t\t}\n\t\t\t}\n\t\t\tif oldval.DB != newval.DB {\n\t\t\t\t// fmt.Println(\"parsing db changes for\", key)\n\t\t\t\tqs, err := parseMigration(tableName, newval.JSON, oldval.DB, newval.DB, safe)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t\tqueries = append(queries, qs...)\n\t\t\t}\n\t\t}\n\t}\n\tfor key, newval := range newMap {\n\t\tif _, ok := oldMap[key]; !ok {\n\t\t\tok, field := isField(newval.DB)\n\t\t\tif ok {\n\t\t\t\tqueries = append(queries, fmt.Sprintf(\"alter table %v add column %q %v\", tableName, newval.JSON, field))\n\t\t\t}\n\t\t}\n\t}\n\treturn queries, newMap, nil\n}",
"func TestGetCleanTableSchema(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\trows *mockRows\n\t\texpect map[string]interface{}\n\t}{\n\t\t// mysql 5\n\t\t{\n\t\t\tname: \"mysql_5_table_schema\",\n\t\t\trows: &mockRows{\n\t\t\t\tt: t,\n\t\t\t\tdata: [][]interface{}{\n\t\t\t\t\t{\"table_schema\", \"mysqlslap\"},\n\t\t\t\t\t{\"table_name\", \"t1\"},\n\t\t\t\t\t{\"table_type\", \"BASE TABLE\"},\n\t\t\t\t\t{\"engine\", \"InnoDB\"},\n\t\t\t\t\t{\"version\", \"10\"},\n\t\t\t\t\t{\"table_rows\", \"571\"},\n\t\t\t\t\t{\"data_length\", \"13156352\"},\n\t\t\t\t\t{\"index_length\", \"0\"},\n\t\t\t\t\t{\"data_free\", \"4194304\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: map[string]interface{}{\n\t\t\t\t\"table_schema\": \"mysqlslap\",\n\t\t\t\t\"table_name\": \"t1\",\n\t\t\t\t\"table_type\": \"BASE TABLE\",\n\t\t\t\t\"engine\": \"InnoDB\",\n\t\t\t\t\"version\": \"10\",\n\t\t\t\t\"table_rows\": \"571\",\n\t\t\t\t\"data_length\": \"13156352\",\n\t\t\t\t\"index_length\": \"0\",\n\t\t\t\t\"data_free\": \"4194304\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tres := getCleanSchemaData(tc.rows)\n\n\t\t\tfor k, v := range tc.expect {\n\t\t\t\tswitch x := v.(type) {\n\t\t\t\tcase int64:\n\t\t\t\t\ttu.Assert(t, res[k] != nil, \"key %s should not be nil\", k)\n\t\t\t\t\ttu.Equals(t, x, res[k].(int64))\n\t\t\t\tcase string:\n\t\t\t\t\ttu.Equals(t, x, res[k].(string))\n\t\t\t\tdefault:\n\t\t\t\t\tt.Logf(\"%s is type %s\", k, reflect.TypeOf(v))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanRight converts the right join to physicalPlanInfo.
|
func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {
lChild := p.children[0].(LogicalPlan)
rChild := p.children[1].(LogicalPlan)
allRight := true
for _, col := range prop.props {
if !rChild.Schema().Contains(col.col) {
allRight = false
}
}
join := PhysicalHashJoin{
EqualConditions: p.EqualConditions,
LeftConditions: p.LeftConditions,
RightConditions: p.RightConditions,
OtherConditions: p.OtherConditions,
// TODO: decide concurrency by data size.
Concurrency: JoinConcurrency,
DefaultValues: p.DefaultValues,
}.init(p.allocator, p.ctx)
join.SetSchema(p.schema)
if innerJoin {
join.JoinType = InnerJoin
} else {
join.JoinType = RightOuterJoin
}
lInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
rProp := prop
if !allRight {
rProp = &requiredProperty{}
} else {
rProp = replaceColsInPropBySchema(rProp, rChild.Schema())
}
var rInfo *physicalPlanInfo
if innerJoin {
rInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))
} else {
rInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))
}
if err != nil {
return nil, errors.Trace(err)
}
resultInfo := join.matchProperty(prop, lInfo, rInfo)
if !allRight {
resultInfo = enforceProperty(prop, resultInfo)
} else {
resultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)
}
return resultInfo, nil
}
|
[
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (r readableTableInterfaceImpl) RIGHT_JOIN(table ReadableTable, onCondition BoolExpression) joinSelectUpdateTable {\n\treturn newJoinTable(r.parent, table, jet.RightJoin, onCondition)\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (stmt *SelectStmt) RightLateralJoin(rs *SelectStmt, as string, conds ...WhereCondition) *SelectStmt {\n\treturn stmt.Join(RightLateralJoin, as, rs, conds...)\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (d *Div) convertLeftRight(ctx *sql.Context, left interface{}, right interface{}) (interface{}, interface{}) {\n\ttyp := d.internalType()\n\tlIsTimeType := types.IsTime(d.Left.Type())\n\trIsTimeType := types.IsTime(d.Right.Type())\n\n\tif types.IsFloat(typ) {\n\t\tleft = convertValueToType(ctx, typ, left, lIsTimeType)\n\t} else {\n\t\tleft = convertToDecimalValue(left, lIsTimeType)\n\t}\n\n\tif types.IsFloat(typ) {\n\t\tright = convertValueToType(ctx, typ, right, rIsTimeType)\n\t} else {\n\t\tright = convertToDecimalValue(right, rIsTimeType)\n\t}\n\n\treturn left, right\n}",
"func (m *Map) rotateRight(n, p *node) (*node, *node) {\n\t//_ = assertOn && assert(l == n.ln, \"new node is not left child of new parent\")\n\t//_ = assertOn && assert(p == nil || n == p.rn,\n\t//\t\"node is not the right child of parent\")\n\tvar l = n.ln //assume n.ln is already a copy.\n\n\tif p != nil {\n\t\tif n.isLeftChildOf(p) {\n\t\t\tp.ln = l\n\t\t} else {\n\t\t\tp.rn = l\n\t\t}\n\t} /* else {\n\t\tm.root = l\n\t} */\n\n\tn.ln = l.rn //handle anticipated orphaned node\n\tl.rn = n //now orphan it\n\n\treturn n, l\n}",
"func (stmt *SelectStmt) RightJoin(table string, conds ...WhereCondition) *SelectStmt {\n\treturn stmt.Join(RightJoin, table, nil, conds...)\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder {\n\treturn b.JoinClause(\"RIGHT JOIN \"+join, rest...)\n}",
"func NewRightJoin(table Table, condition Expression) Join {\n\treturn NewJoin(types.RightJoin, table, condition)\n}",
"func (_this *FrameData) RightProjectionMatrix() *javascript.Float32Array {\n\tvar ret *javascript.Float32Array\n\tvalue := _this.Value_JS.Get(\"rightProjectionMatrix\")\n\tret = javascript.Float32ArrayFromJS(value)\n\treturn ret\n}",
"func (q SelectQuery) RightJoin(table Table, predicate Predicate, predicates ...Predicate) SelectQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeRight,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
buildSelectionWithConds will build a selection use the conditions of join and convert one side's column to correlated column. If the inner side is one selection then one data source, the inner child should be the data source other than the selection. This is called when build nested loop join.
|
func (p *LogicalJoin) buildSelectionWithConds(leftAsOuter bool) (*Selection, []*expression.CorrelatedColumn) {
var (
outerSchema *expression.Schema
innerChild Plan
innerConditions []expression.Expression
)
if leftAsOuter {
outerSchema = p.children[0].Schema()
innerConditions = p.RightConditions
innerChild = p.children[1]
} else {
outerSchema = p.children[1].Schema()
innerConditions = p.LeftConditions
innerChild = p.children[0]
}
if sel, ok := innerChild.(*Selection); ok {
innerConditions = append(innerConditions, sel.Conditions...)
innerChild = sel.children[0]
}
corCols := make([]*expression.CorrelatedColumn, 0, outerSchema.Len())
for _, col := range outerSchema.Columns {
corCol := &expression.CorrelatedColumn{Column: *col, Data: new(types.Datum)}
corCol.Column.ResolveIndices(outerSchema)
corCols = append(corCols, corCol)
}
selection := Selection{}.init(p.allocator, p.ctx)
selection.SetSchema(innerChild.Schema().Clone())
selection.SetChildren(innerChild)
conds := make([]expression.Expression, 0, len(p.EqualConditions)+len(innerConditions)+len(p.OtherConditions))
for _, cond := range p.EqualConditions {
newCond := expression.ConvertCol2CorCol(cond, corCols, outerSchema)
conds = append(conds, newCond)
}
selection.Conditions = conds
// Currently only eq conds will be considered when we call checkScanController, and innerConds from the below sel may contain correlated column,
// which will have side effect when we do check. So we do check before append other conditions into selection.
selection.controllerStatus = selection.checkScanController()
if selection.controllerStatus == notController {
return nil, nil
}
for _, cond := range innerConditions {
conds = append(conds, cond)
}
for _, cond := range p.OtherConditions {
newCond := expression.ConvertCol2CorCol(cond, corCols, outerSchema)
newCond.ResolveIndices(innerChild.Schema())
conds = append(conds, newCond)
}
selection.Conditions = conds
return selection, corCols
}
|
[
"func getGroupJoin(inner interface{},\r\n\touterKeySelector OneArgsFunc,\r\n\tinnerKeySelector OneArgsFunc,\r\n\tresultSelector func(interface{}, []interface{}) interface{}, isLeftJoin bool) stepAction {\r\n\r\n\treturn getJoinImpl(inner, outerKeySelector, innerKeySelector,\r\n\t\tfunc(outerkv *hKeyValue, innerList []interface{}, results *[]interface{}) {\r\n\t\t\t*results = appendToSlice1(*results, resultSelector(outerkv.value, innerList))\r\n\t\t}, func(outerkv *hKeyValue, results *[]interface{}) {\r\n\t\t\t*results = appendToSlice1(*results, resultSelector(outerkv.value, []interface{}{}))\r\n\t\t}, isLeftJoin)\r\n}",
"func (m *PlannerDefault) WalkSelect(p *Select) error {\n\n\t// u.Debugf(\"VisitSelect ctx:%p %+v\", p.Ctx, p.Stmt)\n\n\tneedsFinalProject := true\n\n\tif len(p.Stmt.From) == 0 {\n\n\t\treturn m.WalkLiteralQuery(p)\n\n\t} else if len(p.Stmt.From) == 1 {\n\n\t\tp.Stmt.From[0].Source = p.Stmt // TODO: move to a Finalize() in query parser/planner\n\n\t\tvar srcPlan *Source\n\n\t\tif p.Stmt.Where != nil && p.Stmt.Where.Source != nil { // Where subquery\n\t\t\tnegate := false\n\t\t\tvar parentJoin expr.Node\n\t\t\tif n, ok := p.Stmt.Where.Expr.(*expr.BinaryNode); ok {\n\t\t\t\tparentJoin = n.Args[0]\n\t\t\t} else if n2, ok2 := p.Stmt.Where.Expr.(*expr.UnaryNode); ok2 {\n\t\t\t\tparentJoin = n2.Arg\n\t\t\t\tnegate = true\n\t\t\t}\n\t\t\tp.Stmt.From[0].AddJoin(parentJoin)\n\n\t\t\tvar err error\n\t\t\tsrcPlan, err = NewSource(m.Ctx, p.Stmt.From[0], false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//p.From = append(p.From, srcPlan)\n\t\t\tsub := p.Stmt.Where.Source\n\t\t\t// Inject join criteria (JoinNodes, JoinExpr) on source for subquery (back to parent)\n\t\t\tsubSqlSrc := sub.From[0]\n\t\t\terr = m.Planner.WalkSourceSelect(srcPlan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsubSrc := rel.NewSqlSource(subSqlSrc.Name)\n\t\t\tsubSrc.Rewrite(sub)\n\t\t\tcols := subSrc.UnAliasedColumns()\n\t\t\tvar childJoin expr.Node\n\t\t\tif len(cols) > 1 {\n\t\t\t\treturn fmt.Errorf(\"subquery must contain only 1 select column for join\")\n\t\t\t}\n\t\t\tfor _, v := range cols {\n\t\t\t\tchildJoin = v.Expr\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif childJoin == nil {\n\t\t\t\treturn fmt.Errorf(\"subquery must contain at least 1 select column for join\")\n\t\t\t}\n\t\t\tp.Stmt.From[0].AddJoin(childJoin)\n\t\t\tsubSrc.AddJoin(childJoin)\n\t\t\tsubSrcPlan, err := NewSource(m.Ctx, subSrc, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tsubSrc.AddJoin(childJoin)\n\t\t\tif negate {\n\t\t\t\tsubSrc.JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"!=\"), parentJoin, childJoin)\n\t\t\t\tp.Stmt.From[0].JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"!=\"), parentJoin, childJoin)\n\t\t\t} else {\n\t\t\t\tsubSrc.JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"=\"), parentJoin, childJoin)\n\t\t\t\tp.Stmt.From[0].JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"=\"), parentJoin, childJoin)\n\t\t\t}\n\t\t\terr = m.Planner.WalkSourceSelect(subSrcPlan)\n\t\t\tif err != nil {\n\t\t\t\tu.Errorf(\"Could not visitsubselect %v %s\", err, subSrcPlan)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsubQueryTask := NewJoinMerge(srcPlan, subSrcPlan, srcPlan.Stmt, subSrcPlan.Stmt)\n\t\t\tp.Add(subQueryTask)\n\t\t} else {\n\t\t\tvar err error\n\t\t\tsrcPlan, err = NewSource(m.Ctx, p.Stmt.From[0], true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.From = append(p.From, srcPlan)\n\t\t\tp.Add(srcPlan)\n\t\t\terr = m.Planner.WalkSourceSelect(srcPlan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif srcPlan.Complete && !needsFinalProjection(p.Stmt) {\n\t\t\tgoto finalProjection\n\t\t}\n\n\t} else {\n\n\t\tvar prevSource *Source\n\t\tvar prevTask Task\n\n\t\tfor i, from := range p.Stmt.From {\n\n\t\t\t// Need to rewrite the From statement to ensure all fields necessary to support\n\t\t\t// joins, wheres, etc exist but is standalone query\n\t\t\tfrom.Rewrite(p.Stmt)\n\t\t\tsrcPlan, err := NewSource(m.Ctx, from, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\terr = m.Planner.WalkSourceSelect(srcPlan)\n\t\t\tif err != nil {\n\t\t\t\tu.Errorf(\"Could not visitsubselect %v %s\", err, from)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// now fold into previous task\n\t\t\tif i != 0 {\n\t\t\t\tfrom.Seekable = true\n\t\t\t\t// fold this source into previous\n\t\t\t\tcurMergeTask := NewJoinMerge(prevTask, srcPlan, prevSource.Stmt, srcPlan.Stmt)\n\t\t\t\tprevTask = curMergeTask\n\t\t\t} else {\n\t\t\t\tprevTask = srcPlan\n\t\t\t}\n\t\t\tprevSource = srcPlan\n\t\t\t//u.Debugf(\"got task: %T\", lastSource)\n\t\t}\n\t\tp.Add(prevTask)\n\n\t}\n\n\tif p.Stmt.Where != nil {\n\t\tswitch {\n\t\tcase p.Stmt.Where.Source != nil:\n\t\t\t// SELECT id from article WHERE id in (select article_id from comments where comment_ct > 50);\n\t\t\tu.Warnf(\"Found un-supported subquery: %#v\", p.Stmt.Where)\n\t\t\treturn ErrNotImplemented\n\t\tcase p.Stmt.Where.Expr != nil:\n\t\t\tp.Add(NewWhere(p.Stmt))\n\t\tdefault:\n\t\t\tu.Warnf(\"Found un-supported where type: %#v\", p.Stmt.Where)\n\t\t\treturn fmt.Errorf(\"Unsupported Where Type\")\n\t\t}\n\t}\n\n\tif p.Stmt.IsAggQuery() {\n\t\t//u.Debugf(\"Adding aggregate/group by? %#v\", m.Planner)\n\t\tp.Add(NewGroupBy(p.Stmt))\n\t\tneedsFinalProject = false\n\t}\n\n\tif p.Stmt.Having != nil {\n\t\tp.Add(NewHaving(p.Stmt))\n\t}\n\n\tif len(p.Stmt.OrderBy) > 0 {\n\t\tp.Add(NewOrder(p.Stmt))\n\t}\n\n\tif needsFinalProject {\n\t\terr := m.WalkProjectionFinal(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\nfinalProjection:\n\tif m.Ctx.Projection == nil {\n\t\tproj, err := NewProjectionFinal(m.Ctx, p)\n\t\t//u.Infof(\"Projection: %T:%p %T:%p\", proj, proj, proj.Proj, proj.Proj)\n\t\tif err != nil {\n\t\t\tu.Errorf(\"projection error? %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tm.Ctx.Projection = proj\n\t\t//u.Debugf(\"m.Ctx: %p m.Ctx.Projection: %T:%p\", m.Ctx, m.Ctx.Projection, m.Ctx.Projection)\n\t}\n\n\tif p.Stmt.Into != nil{\n\t\tp.Add(NewInto(p.Stmt.Into))\n\t}\n\n\treturn nil\n}",
"func replaceCrossJoins(ctx *sql.Context, a *Analyzer, n sql.Node, scope *plan.Scope, sel RuleSelector) (sql.Node, transform.TreeIdentity, error) {\n\tif !n.Resolved() {\n\t\treturn n, transform.SameTree, nil\n\t}\n\n\treturn transform.Node(n, func(n sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\tf, ok := n.(*plan.Filter)\n\t\tif !ok {\n\t\t\treturn n, transform.SameTree, nil\n\t\t}\n\t\tpredicates := expression.SplitConjunction(f.Expression)\n\t\tmovedPredicates := make(map[int]struct{})\n\t\tnewF, _, err := transform.Node(f, func(n sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\t\tcj, ok := n.(*plan.JoinNode)\n\t\t\tif !ok || !cj.Op.IsCross() {\n\t\t\t\treturn n, transform.SameTree, nil\n\t\t\t}\n\n\t\t\tjoinConjs := make([]int, 0, len(predicates))\n\t\t\tfor i, c := range predicates {\n\t\t\t\tif expressionCoversJoin(c, cj) {\n\t\t\t\t\tjoinConjs = append(joinConjs, i)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(joinConjs) == 0 {\n\t\t\t\treturn n, transform.SameTree, nil\n\t\t\t}\n\n\t\t\tnewExprs := make([]sql.Expression, len(joinConjs))\n\t\t\tfor i, v := range joinConjs {\n\t\t\t\tmovedPredicates[v] = struct{}{}\n\t\t\t\tnewExprs[i] = predicates[v]\n\t\t\t}\n\t\t\treturn plan.NewInnerJoin(cj.Left(), cj.Right(), expression.JoinAnd(newExprs...)), transform.NewTree, nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn f, transform.SameTree, err\n\t\t}\n\n\t\t// only alter the Filter expression tree if we transferred predicates to an InnerJoin\n\t\tif len(movedPredicates) == 0 {\n\t\t\treturn f, transform.SameTree, nil\n\t\t}\n\n\t\t// remove Filter if all expressions were transferred to joins\n\t\tif len(predicates) == len(movedPredicates) {\n\t\t\treturn newF.(*plan.Filter).Child, transform.NewTree, nil\n\t\t}\n\n\t\tnewFilterExprs := make([]sql.Expression, 0, len(predicates)-len(movedPredicates))\n\t\tfor i, e := range predicates {\n\t\t\tif _, ok := movedPredicates[i]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnewFilterExprs = append(newFilterExprs, e)\n\t\t}\n\t\tnewF, err = newF.(*plan.Filter).WithExpressions(expression.JoinAnd(newFilterExprs...))\n\t\treturn newF, transform.NewTree, err\n\t})\n}",
"func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder {\n\treturn b.JoinClause(\"CROSS JOIN \"+join, rest...)\n}",
"func rewriteColumnsInSubqueryOpForJoin(\n\tctx *plancontext.PlanningContext,\n\tinnerOp ops.Operator,\n\touterTree *ApplyJoin,\n\tsubQueryInner *SubQueryInner,\n) (ops.Operator, error) {\n\tvar rewriteError error\n\t// go over the entire expression in the subquery\n\tsqlparser.SafeRewrite(subQueryInner.ExtractedSubquery.Original, nil, func(cursor *sqlparser.Cursor) bool {\n\t\tnode, ok := cursor.Node().(*sqlparser.ColName)\n\t\tif !ok {\n\t\t\treturn true\n\t\t}\n\n\t\t// check whether the column name belongs to the other side of the join tree\n\t\tif !ctx.SemTable.RecursiveDeps(node).IsSolvedBy(TableID(innerOp)) {\n\t\t\treturn true\n\t\t}\n\n\t\t// get the bindVariable for that column name and replace it in the subquery\n\t\ttyp, _, _ := ctx.SemTable.TypeForExpr(node)\n\t\tbindVar := ctx.GetArgumentFor(node, func() string {\n\t\t\treturn ctx.ReservedVars.ReserveColName(node)\n\t\t})\n\t\tcursor.Replace(sqlparser.NewTypedArgument(bindVar, typ))\n\t\t// check whether the bindVariable already exists in the joinVars of the other tree\n\t\t_, alreadyExists := outerTree.Vars[bindVar]\n\t\tif alreadyExists {\n\t\t\treturn true\n\t\t}\n\t\t// if it does not exist, then push this as an output column there and add it to the joinVars\n\t\toffsets, err := innerOp.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(node)})\n\t\tif err != nil {\n\t\t\trewriteError = err\n\t\t\treturn false\n\t\t}\n\t\touterTree.Vars[bindVar] = offsets[0]\n\t\treturn true\n\t})\n\n\t// update the dependencies for the subquery by removing the dependencies from the innerOp\n\ttableSet := ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery]\n\tctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(innerOp))\n\ttableSet = ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery]\n\tctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(innerOp))\n\n\t// return any error while rewriting\n\treturn innerOp, rewriteError\n}",
"func transformJoinApply(ctx *sql.Context, a *Analyzer, n sql.Node, scope *plan.Scope, sel RuleSelector) (sql.Node, transform.TreeIdentity, error) {\n\tswitch n.(type) {\n\tcase *plan.DeleteFrom, *plan.InsertInto:\n\t\treturn n, transform.SameTree, nil\n\t}\n\tvar applyId int\n\n\tret := n\n\tvar err error\n\tsame := transform.NewTree\n\tfor !same {\n\t\t// simplifySubqExpr can merge two scopes, requiring us to either\n\t\t// recurse on the merged scope or perform a fixed-point iteration.\n\t\tret, same, err = transform.Node(ret, func(n sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\t\tvar filters []sql.Expression\n\t\t\tvar child sql.Node\n\t\t\tswitch n := n.(type) {\n\t\t\tcase *plan.Filter:\n\t\t\t\tchild = n.Child\n\t\t\t\tfilters = expression.SplitConjunction(n.Expression)\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tif sel == nil {\n\t\t\t\treturn n, transform.SameTree, nil\n\t\t\t}\n\n\t\t\tsubScope := scope.NewScopeFromSubqueryExpression(n)\n\t\t\tvar matches []applyJoin\n\t\t\tvar newFilters []sql.Expression\n\n\t\t\t// separate decorrelation candidates\n\t\t\tfor _, e := range filters {\n\t\t\t\tif !plan.IsNullRejecting(e) {\n\t\t\t\t\t// TODO: rewrite dual table to permit in-scope joins,\n\t\t\t\t\t// which aren't possible when values are projected\n\t\t\t\t\t// above join filter\n\t\t\t\t\trt := getResolvedTable(n)\n\t\t\t\t\tif rt == nil || plan.IsDualTable(rt.Table) {\n\t\t\t\t\t\tnewFilters = append(newFilters, e)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcandE := e\n\t\t\t\top := plan.JoinTypeSemi\n\t\t\t\tif n, ok := e.(*expression.Not); ok {\n\t\t\t\t\tcandE = n.Child\n\t\t\t\t\top = plan.JoinTypeAnti\n\t\t\t\t}\n\n\t\t\t\tvar sq *plan.Subquery\n\t\t\t\tvar l sql.Expression\n\t\t\t\tvar joinF sql.Expression\n\t\t\t\tvar max1 bool\n\t\t\t\tswitch e := candE.(type) {\n\t\t\t\tcase *plan.InSubquery:\n\t\t\t\t\tsq, _ = e.Right.(*plan.Subquery)\n\t\t\t\t\tl = e.Left\n\n\t\t\t\t\tjoinF = expression.NewEquals(nil, nil)\n\t\t\t\tcase expression.Comparer:\n\t\t\t\t\tsq, _ = e.Right().(*plan.Subquery)\n\t\t\t\t\tl = e.Left()\n\t\t\t\t\tjoinF = e\n\t\t\t\t\tmax1 = true\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tif sq != nil {\n\t\t\t\t\tsq.Query, _, err = fixidx.FixFieldIndexesForNode(ctx, a.LogFn(), scope.NewScopeFromSubqueryExpression(n), sq.Query)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif sq != nil && nodeIsCacheable(sq.Query, len(subScope.Schema())) {\n\t\t\t\t\tmatches = append(matches, applyJoin{l: l, r: sq, op: op, filter: joinF, max1: max1})\n\t\t\t\t} else {\n\t\t\t\t\tnewFilters = append(newFilters, e)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(matches) == 0 {\n\t\t\t\treturn n, transform.SameTree, nil\n\t\t\t}\n\n\t\t\tret := child\n\t\t\tfor _, m := range matches {\n\t\t\t\t// A successful candidate is built with:\n\t\t\t\t// (1) Semi or anti join between the outer scope and (2) conditioned on (3).\n\t\t\t\t// (2) Simplified or unnested subquery (table alias).\n\t\t\t\t// (3) Join condition synthesized from the original correlated expression\n\t\t\t\t// normalized to match changes to (2).\n\t\t\t\tsubq := m.r\n\n\t\t\t\tname := fmt.Sprintf(\"scalarSubq%d\", applyId)\n\t\t\t\tapplyId++\n\n\t\t\t\tsch := subq.Query.Schema()\n\t\t\t\tvar rightF sql.Expression\n\t\t\t\tif len(sch) == 1 {\n\t\t\t\t\tsubqCol := subq.Query.Schema()[0]\n\t\t\t\t\trightF = expression.NewGetFieldWithTable(len(scope.Schema()), subqCol.Type, name, subqCol.Name, subqCol.Nullable)\n\t\t\t\t} else {\n\t\t\t\t\ttup := make(expression.Tuple, len(sch))\n\t\t\t\t\tfor i, c := range sch {\n\t\t\t\t\t\ttup[i] = expression.NewGetFieldWithTable(len(scope.Schema())+i, c.Type, name, c.Name, c.Nullable)\n\t\t\t\t\t}\n\t\t\t\t\trightF = tup\n\t\t\t\t}\n\n\t\t\t\tq, _, err := fixidx.FixFieldIndexesForNode(ctx, a.LogFn(), scope, subq.Query)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t\t}\n\n\t\t\t\tvar newSubq sql.Node = plan.NewSubqueryAlias(name, subq.QueryString, q)\n\t\t\t\tnewSubq, err = simplifySubqExpr(newSubq)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t\t}\n\t\t\t\tif m.max1 {\n\t\t\t\t\tnewSubq = plan.NewMax1Row(newSubq, name)\n\t\t\t\t}\n\n\t\t\t\tcondSch := append(ret.Schema(), newSubq.Schema()...)\n\t\t\t\tfilter, err := m.filter.WithChildren(m.l, rightF)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, transform.SameTree, err\n\t\t\t\t}\n\t\t\t\tfilter, _, err = fixidx.FixFieldIndexes(scope, a.LogFn(), condSch, filter)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, transform.SameTree, err\n\t\t\t\t}\n\t\t\t\tvar comment string\n\t\t\t\tif c, ok := ret.(sql.CommentedNode); ok {\n\t\t\t\t\tcomment = c.Comment()\n\t\t\t\t}\n\t\t\t\tnewJoin := plan.NewJoin(ret, newSubq, m.op, filter)\n\t\t\t\tret = newJoin.WithComment(comment)\n\t\t\t}\n\n\t\t\tif len(newFilters) == 0 {\n\t\t\t\treturn ret, transform.NewTree, nil\n\t\t\t}\n\t\t\treturn plan.NewFilter(expression.JoinAnd(newFilters...), ret), transform.NewTree, nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn n, transform.SameTree, err\n\t\t}\n\t}\n\treturn ret, transform.TreeIdentity(applyId == 0), nil\n}",
"func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) {\n\n\tc.iter.seek(pcell.id.RangeMin())\n\tif c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() {\n\t\t// The index does not contain pcell or any of its descendants.\n\t\treturn\n\t}\n\tif c.iter.CellID() == pcell.id {\n\t\t// The index contains this cell exactly.\n\t\tc.cells = append(c.cells, c.iter.IndexCell())\n\t\treturn\n\t}\n\n\t// Otherwise, split the edge among the four children of pcell.\n\tcenter := pcell.Middle().Lo()\n\n\tif edgeBound.X.Hi < center.X {\n\t\t// Edge is entirely contained in the two left children.\n\t\tc.clipVAxis(edgeBound, center.Y, 0, pcell)\n\t\treturn\n\t} else if edgeBound.X.Lo >= center.X {\n\t\t// Edge is entirely contained in the two right children.\n\t\tc.clipVAxis(edgeBound, center.Y, 1, pcell)\n\t\treturn\n\t}\n\n\tchildBounds := c.splitUBound(edgeBound, center.X)\n\tif edgeBound.Y.Hi < center.Y {\n\t\t// Edge is entirely contained in the two lower children.\n\t\tc.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0])\n\t\tc.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1])\n\t} else if edgeBound.Y.Lo >= center.Y {\n\t\t// Edge is entirely contained in the two upper children.\n\t\tc.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0])\n\t\tc.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1])\n\t} else {\n\t\t// The edge bound spans all four children. The edge itself intersects\n\t\t// at most three children (since no padding is being used).\n\t\tc.clipVAxis(childBounds[0], center.Y, 0, pcell)\n\t\tc.clipVAxis(childBounds[1], center.Y, 1, pcell)\n\t}\n}",
"func (JoinOperator) Dependencies(ev *Evaluator, args []*Expr, _ []*tree.Cursor, auto []*tree.Cursor) []*tree.Cursor {\n\tDEBUG(\"Calculating dependencies for (( join ... ))\")\n\tdeps := []*tree.Cursor{}\n\tif len(args) < 2 {\n\t\tDEBUG(\"Not enough arguments to (( join ... ))\")\n\t\treturn []*tree.Cursor{}\n\t}\n\n\t//skip the separator arg\n\tfor _, arg := range args[1:] {\n\t\tif arg.Type == Literal {\n\t\t\tcontinue\n\t\t}\n\t\tif arg.Type != Reference {\n\t\t\tDEBUG(\"(( join ... )) argument not Literal or Reference type\")\n\t\t\treturn []*tree.Cursor{}\n\t\t}\n\t\t//get the real cursor\n\t\tfinalCursor, err := arg.Resolve(ev.Tree)\n\t\tif err != nil {\n\t\t\tDEBUG(\"Could not resolve to a canonical path '%s'\", arg.String())\n\t\t\treturn []*tree.Cursor{}\n\t\t}\n\t\t//get the list at this location\n\t\tlist, err := finalCursor.Reference.Resolve(ev.Tree)\n\t\tif err != nil {\n\t\t\tDEBUG(\"Could not retrieve object at path '%s'\", arg.String())\n\t\t\treturn []*tree.Cursor{}\n\t\t}\n\t\t//must be a list or a string\n\t\tswitch list.(type) {\n\t\tcase []interface{}:\n\t\t\t//add .* to the end of the cursor so we can glob all the elements\n\t\t\tglobCursor, err := tree.ParseCursor(fmt.Sprintf(\"%s.*\", finalCursor.Reference.String()))\n\t\t\tif err != nil {\n\t\t\t\tDEBUG(\"Could not parse cursor with '.*' appended. This is a BUG\")\n\t\t\t\treturn []*tree.Cursor{}\n\t\t\t}\n\t\t\t//have the cursor library get all the subelements for us\n\t\t\tsubElements, err := globCursor.Glob(ev.Tree)\n\t\t\tif err != nil {\n\t\t\t\tDEBUG(\"Could not retrieve subelements at path '%s'. This may be a BUG.\", arg.String())\n\t\t\t\treturn []*tree.Cursor{}\n\t\t\t}\n\t\t\tdeps = append(deps, subElements...)\n\t\tcase string:\n\t\t\tdeps = append(deps, finalCursor.Reference)\n\t\tdefault:\n\t\t\tDEBUG(\"Unsupported type at object location\")\n\t\t\treturn []*tree.Cursor{}\n\t\t}\n\t}\n\n\t//Append on the auto-generated deps (the operator path args)\n\tfor _, dep := range auto {\n\t\tdeps = append(deps, dep)\n\t}\n\n\tDEBUG(\"Dependencies for (( join ... )):\")\n\tfor i, dep := range deps {\n\t\tDEBUG(\"\\t#%d %s\", i, dep.String())\n\t}\n\treturn deps\n}",
"func (stmt *SelectStmt) ToSQL(rebind bool) (asSQL string, bindings []interface{}) { //nolint: gocognit, gocyclo\n\tvar clauses = []string{\"SELECT\"}\n\n\tif stmt.IsDistinct {\n\t\tclauses = append(clauses, \"DISTINCT\")\n\t\tif len(stmt.DistinctColumns) > 0 {\n\t\t\tclauses = append(clauses, \"ON (\"+strings.Join(stmt.DistinctColumns, \", \")+\")\")\n\t\t}\n\t}\n\n\tif len(stmt.Columns) == 0 {\n\t\tclauses = append(clauses, \"*\")\n\t} else {\n\t\tclauses = append(clauses, strings.Join(stmt.Columns, \", \"))\n\t}\n\n\tif len(stmt.Table) > 0 {\n\t\tclauses = append(clauses, fmt.Sprintf(\"FROM %s\", stmt.Table))\n\t}\n\n\tfor _, join := range stmt.Joins {\n\t\tonClause, joinBindings := parseConditions(join.Conditions)\n\n\t\tif join.ResultSet != nil {\n\t\t\trsSQL, rsBindings := join.ResultSet.ToSQL(false)\n\t\t\tclauses = append(clauses, join.Type.String()+\" (\"+rsSQL+\") \"+join.Table+\" ON \"+onClause)\n\t\t\tbindings = append(bindings, rsBindings...)\n\t\t} else {\n\t\t\tclauses = append(clauses, join.Type.String()+\" \"+join.Table+\" ON \"+onClause)\n\t\t}\n\n\t\t// add the join condition bindings (this MUST happen after adding the clause\n\t\t// itself, because if the join is on a result set then the result set's bindings\n\t\t// need to come first\n\t\tbindings = append(bindings, joinBindings...)\n\t}\n\n\tif len(stmt.Conditions) > 0 {\n\t\twhereClause, whereBindings := parseConditions(stmt.Conditions)\n\t\tbindings = append(bindings, whereBindings...)\n\t\tclauses = append(clauses, fmt.Sprintf(\"WHERE %s\", whereClause))\n\t}\n\n\tif len(stmt.Grouping) > 0 {\n\t\tclauses = append(clauses, fmt.Sprintf(\"GROUP BY %s\", strings.Join(stmt.Grouping, \", \")))\n\t}\n\n\tif len(stmt.GroupConditions) > 0 {\n\t\tgroupByClause, groupBindings := parseConditions(stmt.GroupConditions)\n\t\tbindings = append(bindings, groupBindings...)\n\t\tclauses = append(clauses, fmt.Sprintf(\"HAVING %s\", groupByClause))\n\t}\n\n\tif len(stmt.Ordering) > 0 {\n\t\tvar ordering []string\n\n\t\tfor _, order := range stmt.Ordering {\n\t\t\to, _ := order.ToSQL(false)\n\t\t\tordering = append(ordering, o)\n\t\t}\n\n\t\tclauses = append(clauses, fmt.Sprintf(\"ORDER BY %s\", strings.Join(ordering, \", \")))\n\n\t\tif stmt.orderWithNulls.Enabled {\n\t\t\tif stmt.orderWithNulls.First {\n\t\t\t\tclauses = append(clauses, \"NULLS FIRST\")\n\t\t\t} else {\n\t\t\t\tclauses = append(clauses, \"NULLS LAST\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif stmt.LimitTo > 0 {\n\t\tclauses = append(clauses, fmt.Sprintf(\"LIMIT %d\", stmt.LimitTo))\n\t}\n\n\tif stmt.OffsetFrom > 0 {\n\t\toffset := fmt.Sprintf(\"%d\", stmt.OffsetFrom)\n\t\tif stmt.OffsetRows > 0 {\n\t\t\toffset += fmt.Sprintf(\" %d\", stmt.OffsetRows)\n\t\t}\n\n\t\tclauses = append(clauses, \"OFFSET \"+offset)\n\t}\n\n\tfor _, lock := range stmt.Locks {\n\t\tvar lockStrength string\n\n\t\tswitch lock.Strength {\n\t\tcase LockForUpdate:\n\t\t\tlockStrength = \"FOR UPDATE\"\n\t\tcase LockForNoKeyUpdate:\n\t\t\tlockStrength = \"FOR NO KEY UPDATE\"\n\t\tcase LockForShare:\n\t\t\tlockStrength = \"FOR SHARE\"\n\t\tcase LockForKeyShare:\n\t\t\tlockStrength = \"FOR KEY SHARE\"\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tlockClause := []string{lockStrength}\n\n\t\tif len(lock.Tables) > 0 {\n\t\t\tlockClause = append(lockClause, \"OF \"+strings.Join(lock.Tables, \", \"))\n\t\t}\n\n\t\tif lock.Wait == LockNoWait {\n\t\t\tlockClause = append(lockClause, \"NOWAIT\")\n\t\t} else if lock.Wait == LockSkipLocked {\n\t\t\tlockClause = append(lockClause, \"SKIP LOCKED\")\n\t\t}\n\n\t\tclauses = append(clauses, strings.Join(lockClause, \" \"))\n\t}\n\n\tif len(stmt.Unions) > 0 {\n\t\tcmd := \"UNION\"\n\t\tif stmt.IsUnionAll {\n\t\t\tcmd += \" ALL\"\n\t\t}\n\n\t\tfor _, union := range stmt.Unions {\n\t\t\tu, b := union.ToSQL(false)\n\t\t\tbindings = append(bindings, b...)\n\t\t\tclauses = append(clauses, fmt.Sprintf(\"%s %s\", cmd, u))\n\t\t}\n\t}\n\n\tasSQL = strings.Join(clauses, \" \")\n\n\tif rebind {\n\t\tif db, ok := stmt.queryer.(*sqlx.DB); ok {\n\t\t\tasSQL = db.Rebind(asSQL)\n\t\t} else if tx, ok := stmt.queryer.(*sqlx.Tx); ok {\n\t\t\tasSQL = tx.Rebind(asSQL)\n\t\t}\n\t}\n\n\treturn asSQL, bindings\n}",
"func (self *SelectManager) On(expr interface{}) *SelectManager {\n\tjoins := self.Context.Source.Right\n\n\tif 0 == len(joins) {\n\t\treturn self\n\t}\n\n\tlast := joins[len(joins)-1]\n\n\tswitch last.(type) {\n\tcase *nodes.InnerJoinNode:\n\t\tlast.(*nodes.InnerJoinNode).Right = nodes.On(expr)\n\tcase *nodes.OuterJoinNode:\n\t\tlast.(*nodes.OuterJoinNode).Right = nodes.On(expr)\n\t}\n\n\treturn self\n}",
"func (f *Factory) ConstructJoin(\n\tjoinOp opt.Operator, left, right memo.RelExpr, on memo.FiltersExpr, private *memo.JoinPrivate,\n) memo.RelExpr {\n\tswitch joinOp {\n\tcase opt.InnerJoinOp:\n\t\treturn f.ConstructInnerJoin(left, right, on, private)\n\tcase opt.InnerJoinApplyOp:\n\t\treturn f.ConstructInnerJoinApply(left, right, on, private)\n\tcase opt.LeftJoinOp:\n\t\treturn f.ConstructLeftJoin(left, right, on, private)\n\tcase opt.LeftJoinApplyOp:\n\t\treturn f.ConstructLeftJoinApply(left, right, on, private)\n\tcase opt.RightJoinOp:\n\t\treturn f.ConstructRightJoin(left, right, on, private)\n\tcase opt.FullJoinOp:\n\t\treturn f.ConstructFullJoin(left, right, on, private)\n\tcase opt.SemiJoinOp:\n\t\treturn f.ConstructSemiJoin(left, right, on, private)\n\tcase opt.SemiJoinApplyOp:\n\t\treturn f.ConstructSemiJoinApply(left, right, on, private)\n\tcase opt.AntiJoinOp:\n\t\treturn f.ConstructAntiJoin(left, right, on, private)\n\tcase opt.AntiJoinApplyOp:\n\t\treturn f.ConstructAntiJoinApply(left, right, on, private)\n\t}\n\tpanic(errors.AssertionFailedf(\"unexpected join operator: %v\", log.Safe(joinOp)))\n}",
"func (r Relation) MergeJoin(leftCols []AttrInfo, rightRelation Relationer, rightCols []AttrInfo, joinType JoinType, compType Comparison) Relationer {\n\tright, isRelation := rightRelation.(Relation)\n\n\tif !isRelation {\n\t\tpanic(\"unknown relation type\")\n\t\t// TODO: implement using Relationer.GetRawData()\n\t}\n\n\ttype MergeData struct {\n\t\tLeft *Column\n\t\tRight *Column\n\t\tCompare CompFunc\n\t\tLesser CompFunc\n\t\tEquals CompFunc\n\t}\n\n\tright = right.MergeSort(rightCols, ASC).(Relation)\n\tleft := r.MergeSort(leftCols, ASC).(Relation)\n\toutput := Relation{Columns: []Column{}}\n\n\tleftIndices := []int{}\n\trightIndices := []int{}\n\n\tleftRow, rightRow := 0, 0\n\tmaxLeftRows := left.Columns[0].GetNumRows()\n\tmaxRightRows := right.Columns[0].GetNumRows()\n\tvar mergeData []MergeData\n\n\taddOutputCols := func(base *Relation, tableName string, nullable bool) {\n\t\tif nullable {\n\t\t\tpanic(\"NULL values not implemented\")\n\t\t}\n\t\tfor _, col := range base.Columns {\n\t\t\tsignature := AttrInfo{Name: tableName + \".\" + col.Signature.Name, Enc: col.Signature.Enc, Type: col.Signature.Type}\n\t\t\toutput.Columns = append(output.Columns, NewColumn(signature))\n\t\t}\n\t}\n\n\tgetMergeData := func() []MergeData {\n\t\toutput := []MergeData{}\n\n\t\tfor sigIndex, signature := range leftCols {\n\t\t\tentry := MergeData{}\n\n\t\t\tfor colIndex, col := range left.Columns {\n\t\t\t\tif col.Signature == signature {\n\t\t\t\t\tentry.Left = &left.Columns[colIndex]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor colIndex, col := range right.Columns {\n\t\t\t\tif col.Signature == rightCols[sigIndex] {\n\t\t\t\t\tentry.Right = &right.Columns[colIndex]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif entry.Left == nil || entry.Right == nil {\n\t\t\t\tpanic(\"column not found\")\n\t\t\t}\n\n\t\t\tentry.Equals = compFuncs[signature.Type][EQ]\n\t\t\tentry.Lesser = compFuncs[signature.Type][LT]\n\n\t\t\toutput = append(output, entry)\n\t\t}\n\n\t\treturn output\n\t}\n\n\tisEqual := func(leftIndex int, rightIndex int) bool {\n\t\tfor _, entry := range mergeData {\n\t\t\tleftValue, _ := entry.Left.GetRow(leftIndex)\n\t\t\trightValue, _ := entry.Right.GetRow(rightIndex)\n\t\t\tif !entry.Equals(leftValue, rightValue) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tisLesser := func(leftIndex, rightIndex int) bool {\n\t\tfor _, entry := range mergeData {\n\t\t\tleftValue, _ := entry.Left.GetRow(leftIndex)\n\t\t\trightValue, _ := entry.Right.GetRow(rightIndex)\n\n\t\t\tif entry.Lesser(leftValue, rightValue) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif !entry.Equals(leftValue, rightValue) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\t// entries are equal\n\t\treturn false\n\t}\n\n\tgetNextRow := func(compare func(int, int) bool) int {\n\t\tnextRow := rightRow + 1\n\n\t\tfor nextRow < maxRightRows && compare(leftRow, nextRow) {\n\t\t\tnextRow++\n\t\t}\n\n\t\treturn nextRow\n\t}\n\n\tinnerJoin := func() ([]int, []int) {\n\t\tmergeData = getMergeData()\n\n\t\tfor leftRow < maxLeftRows && rightRow < maxRightRows {\n\t\t\tif isEqual(leftRow, rightRow) {\n\t\t\t\t// leftValue == rightValue\n\t\t\t\tnextRow := getNextRow(isEqual)\n\n\t\t\t\tswitch compType {\n\t\t\t\tcase GT:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase GEQ:\n\t\t\t\t\tfor i := 0; i < nextRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LT:\n\t\t\t\t\tfor i := nextRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LEQ:\n\t\t\t\t\tfor i := rightRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase EQ:\n\t\t\t\t\tfor i := rightRow; i < nextRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase NEQ:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\t\tfor i := nextRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tleftRow++\n\t\t\t} else if isLesser(leftRow, rightRow) {\n\t\t\t\t// leftValue < rightValue\n\t\t\t\tswitch compType {\n\t\t\t\tcase GT, GEQ:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LT, LEQ:\n\t\t\t\t\tfor i := rightRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase NEQ:\n\t\t\t\t\tfor i := 0; i < maxRightRows; i++ {\n\t\t\t\t\t\tif !isEqual(leftRow, i) {\n\t\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tleftRow++\n\t\t\t} else {\n\t\t\t\t// leftValue > rightValue\n\t\t\t\tnextRow := getNextRow(func(l, r int) bool { return !isEqual(l, r) && !isLesser(l, r) })\n\n\t\t\t\tswitch compType {\n\t\t\t\tcase NEQ:\n\t\t\t\t\t// do something?\n\t\t\t\t}\n\n\t\t\t\trightRow = nextRow\n\t\t\t}\n\t\t}\n\n\t\treturn leftIndices, rightIndices\n\t}\n\n\tsemiJoin := func() {\n\t\tmergeData = getMergeData()\n\n\t\tif compType != EQ {\n\t\t\tpanic(\"semi join only supports equality comparison\")\n\t\t}\n\n\t\tfor leftRow < maxLeftRows && rightRow < maxRightRows {\n\t\t\tif isEqual(leftRow, rightRow) {\n\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\n\t\t\t\tleftRow++\n\t\t\t} else if isLesser(leftRow, rightRow) {\n\t\t\t\tleftRow++\n\t\t\t} else {\n\t\t\t\tnextRow := getNextRow(func(l, r int) bool { return !isEqual(l, r) && !isLesser(l, r) })\n\t\t\t\trightRow = nextRow\n\t\t\t}\n\t\t}\n\t}\n\n\tcopyColumn := func(source *Column, dest *Column, indices []int) {\n\t\tfor _, row := range indices {\n\t\t\tvalue, _ := source.GetRow(row)\n\t\t\tdest.AddRow(source.Signature.Type, value)\n\t\t}\n\t}\n\n\tcopyLeftValues := func(indices []int) {\n\t\tfor colIndex := range left.Columns {\n\t\t\tcopyColumn(&left.Columns[colIndex], &output.Columns[colIndex], indices)\n\t\t}\n\t}\n\n\tcopyRightValues := func(indices []int) {\n\t\tnumLeftCols := len(left.Columns)\n\t\tfor colIndex := range right.Columns {\n\t\t\tcopyColumn(&right.Columns[colIndex], &output.Columns[numLeftCols+colIndex], indices)\n\t\t}\n\t}\n\n\tswitch joinType {\n\tcase INNER:\n\t\toutput.Name = r.Name + \" x \" + rightRelation.(Relation).Name\n\t\taddOutputCols(&left, r.Name, false)\n\t\taddOutputCols(&right, rightRelation.(Relation).Name, false)\n\t\tinnerJoin()\n\t\tcopyLeftValues(leftIndices)\n\t\tcopyRightValues(rightIndices)\n\t\tbreak\n\tcase SEMI:\n\t\toutput.Name = r.Name + \" (x \" + rightRelation.(Relation).Name + \")\"\n\t\taddOutputCols(&left, r.Name, false)\n\t\tsemiJoin()\n\t\tcopyLeftValues(leftIndices)\n\t\tbreak\n\tcase LEFTOUTER:\n\t\t// handle null values on left\n\t\tpanic(\"NULL values not implemented\")\n\tcase RIGHTOUTER:\n\t\t// handle null values on right\n\t\tpanic(\"NULL values not implemented\")\n\tdefault:\n\t\tpanic(\"unknown JoinType\")\n\t}\n\n\treturn output\n}",
"func (sg *SQLiteGrammars) CompileSelect() { sg.Builder.PSql = sg.compileSelect() }",
"func Join(g1 Grouping, col1 string, g2 Grouping, col2 string) Grouping {\n\tvar ng GroupingBuilder\n\tfor _, gid := range g1.Tables() {\n\t\tt1, t2 := g1.Table(gid), g2.Table(gid)\n\t\tif t2 == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: Optimize for cases where col1 and/or col2 are\n\t\t// constant.\n\n\t\t// Index col2 in t2.\n\t\tridx := make(map[interface{}][]int)\n\t\trv := reflect.ValueOf(t2.MustColumn(col2))\n\t\tfor i, l := 0, rv.Len(); i < l; i++ {\n\t\t\tv := rv.Index(i).Interface()\n\t\t\tridx[v] = append(ridx[v], i)\n\t\t}\n\n\t\t// For each row in t1, find the matching rows in col2\n\t\t// and build up the row indexes for t1 and t2.\n\t\tidx1, idx2 := []int{}, []int{}\n\t\tlv := reflect.ValueOf(t1.MustColumn(col1))\n\t\tfor i, l := 0, lv.Len(); i < l; i++ {\n\t\t\tr := ridx[lv.Index(i).Interface()]\n\t\t\tfor range r {\n\t\t\t\tidx1 = append(idx1, i)\n\t\t\t}\n\t\t\tidx2 = append(idx2, r...)\n\t\t}\n\n\t\t// Build the joined table.\n\t\tvar nt Builder\n\t\tfor _, col := range t1.Columns() {\n\t\t\tif cv, ok := t1.Const(col); ok {\n\t\t\t\tnt.Add(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnt.Add(col, slice.Select(t1.Column(col), idx1))\n\t\t}\n\t\tfor _, col := range t2.Columns() {\n\t\t\t// Often the join column is the same in both\n\t\t\t// and we can skip it because we added it from\n\t\t\t// the first table.\n\t\t\tif col == col1 && col == col2 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif cv, ok := t2.Const(col); ok {\n\t\t\t\tnt.Add(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnt.Add(col, slice.Select(t2.Column(col), idx2))\n\t\t}\n\n\t\tng.Add(gid, nt.Done())\n\t}\n\treturn ng.Done()\n}",
"func (m *PlannerDefault) WalkSourceSelect(p *Source) error {\n\n\tif p.Stmt.Source != nil {\n\t\t//u.Debugf(\"%p VisitSubselect from.source = %q\", p, p.Stmt.Source)\n\t} else {\n\t\t//u.Debugf(\"%p VisitSubselect from=%q\", p, p)\n\t}\n\n\t// All of this is plan info, ie needs JoinKey\n\tneedsJoinKey := false\n\tif p.Stmt.Source != nil && len(p.Stmt.JoinNodes()) > 0 {\n\t\tneedsJoinKey = true\n\t}\n\n\t// We need to build a ColIndex of source column/select/projection column\n\t//u.Debugf(\"datasource? %#v\", p.Conn)\n\tif p.Conn == nil {\n\t\terr := p.LoadConn()\n\t\tif err != nil {\n\t\t\tu.Errorf(\"no conn? %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif p.Conn == nil {\n\t\t\tif p.Stmt != nil {\n\t\t\t\tif p.Stmt.IsLiteral() {\n\t\t\t\t\t// this is fine\n\t\t\t\t} else {\n\t\t\t\t\tu.Warnf(\"No DataSource found, and not literal query? Source Required for %s\", p.Stmt.String())\n\t\t\t\t\treturn ErrNoDataSource\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tu.Warnf(\"hm no conn, no stmt?....\")\n\t\t\t\treturn ErrNoDataSource\n\t\t\t}\n\t\t}\n\t}\n\n\tif sourcePlanner, hasSourcePlanner := p.Conn.(SourcePlanner); hasSourcePlanner {\n\t\t// Can do our own planning\n\t\tt, err := sourcePlanner.WalkSourceSelect(m.Planner, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif t != nil {\n\t\t\tp.Add(t)\n\t\t}\n\n\t} else {\n\n\t\tif schemaCols, ok := p.Conn.(schema.ConnColumns); ok {\n\t\t\tif err := buildColIndex(schemaCols, p); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"%q Didn't implement schema.ConnColumns: %T\", p.Stmt.SourceName(), p.Conn)\n\t\t}\n\n\t\tif p.Stmt.Source != nil && p.Stmt.Source.Where != nil {\n\t\t\tswitch {\n\t\t\tcase p.Stmt.Source.Where.Expr != nil:\n\t\t\t\tp.Add(NewWhere(p.Stmt.Source))\n\t\t\tdefault:\n\t\t\t\tu.Warnf(\"Found un-supported where type: %#v\", p.Stmt.Source)\n\t\t\t\treturn fmt.Errorf(\"Unsupported Where clause: %q\", p.Stmt)\n\t\t\t}\n\t\t}\n\n\t\t// Add a Non-Final Projection to choose the columns for results\n\t\tif !p.Final {\n\t\t\terr := m.WalkProjectionSource(p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif needsJoinKey {\n\t\tjoinKey := NewJoinKey(p)\n\t\tp.Add(joinKey)\n\t}\n\n\treturn nil\n}",
"func canMergeSubqueryOnColumnSelection(ctx *plancontext.PlanningContext, a, b *Route, predicate *sqlparser.ExtractedSubquery) bool {\n\tleft := predicate.OtherSide\n\topCode := predicate.OpCode\n\tif opCode != int(popcode.PulloutValue) && opCode != int(popcode.PulloutIn) {\n\t\treturn false\n\t}\n\n\tlVindex := findColumnVindex(ctx, a, left)\n\tif lVindex == nil || !lVindex.IsUnique() {\n\t\treturn false\n\t}\n\n\trightSelection := extractSingleColumnSubquerySelection(predicate.Subquery)\n\tif rightSelection == nil {\n\t\treturn false\n\t}\n\n\trVindex := findColumnVindex(ctx, b, rightSelection)\n\tif rVindex == nil {\n\t\treturn false\n\t}\n\treturn rVindex == lVindex\n}",
"func constructPropertyByJoin(join *LogicalJoin) ([][]*requiredProperty, []int, error) {\n\tvar result [][]*requiredProperty\n\tvar condIndex []int\n\n\tif join.EqualConditions == nil {\n\t\treturn nil, nil, nil\n\t}\n\tfor i, cond := range join.EqualConditions {\n\t\tif len(cond.GetArgs()) != 2 {\n\t\t\treturn nil, nil, errors.New(\"unexpected argument count for equal expression\")\n\t\t}\n\t\tlExpr, rExpr := cond.GetArgs()[0], cond.GetArgs()[1]\n\t\t// Only consider raw column reference and cowardly ignore calculations\n\t\t// since we don't know if the function call preserve order\n\t\tlColumn, lOK := lExpr.(*expression.Column)\n\t\trColumn, rOK := rExpr.(*expression.Column)\n\t\tif lOK && rOK && compareTypeForOrder(lColumn.RetType, rColumn.RetType) {\n\t\t\tresult = append(result, []*requiredProperty{generateJoinProp(lColumn), generateJoinProp(rColumn)})\n\t\t\tcondIndex = append(condIndex, i)\n\t\t} else {\n\t\t\tcontinue\n\t\t}\n\t}\n\tif len(result) == 0 {\n\t\treturn nil, nil, nil\n\t}\n\treturn result, condIndex, nil\n}",
"func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error {\n\tswitch ajoin.Join {\n\tcase sqlparser.NormalJoinType, sqlparser.StraightJoinType, sqlparser.LeftJoinType:\n\tcase sqlparser.RightJoinType:\n\t\tconvertToLeftJoin(ajoin)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported: %s\", ajoin.Join.ToString())\n\t}\n\tif err := pb.processTableExpr(ajoin.LeftExpr, reservedVars, where); err != nil {\n\t\treturn err\n\t}\n\trpb := newPrimitiveBuilder(pb.vschema, pb.jt)\n\tif err := rpb.processTableExpr(ajoin.RightExpr, reservedVars, where); err != nil {\n\t\treturn err\n\t}\n\treturn pb.join(rpb, ajoin, reservedVars, where)\n}",
"func queryMulti(node *mnode, from, to []int, result *map[int]Interval, tw *twalker, back bool) {\n\thitsFrom := make([]int, 0, 2)\n\thitsTo := make([]int, 0, 2)\n\tfor i, fromvalue := range from {\n\t\tif !node.segment.Disjoint(fromvalue, to[i]) {\n\t\t\tfor _, pintrvl := range node.overlap {\n\t\t\t\t(*result)[pintrvl.Id] = *pintrvl\n\t\t\t}\n\t\t\thitsFrom = append(hitsFrom, fromvalue)\n\t\t\thitsTo = append(hitsTo, to[i])\n\t\t}\n\t}\n\t// search in children only with overlapping intervals of parent\n\tif len(hitsFrom) != 0 {\n\t\tif node.right != nil {\n\t\t\t// buffered channel tw.queue is a safe counter to limit number of started goroutines\n\t\t\tselect {\n\t\t\tcase tw.queue <- 1:\n\t\t\t\t// create new map for result\n\t\t\t\tnewMap := make(map[int]Interval)\n\t\t\t\t// increment counter of wait group\n\t\t\t\ttw.wait.Add(1)\n\t\t\t\t// start new query in goroutine\n\t\t\t\tgo queryMulti(node.right, from, to, &newMap, tw, true)\n\t\t\tdefault:\n\t\t\t\t// pass-through result map of parent\n\t\t\t\tqueryMulti(node.right, from, to, result, tw, false)\n\t\t\t}\n\t\t}\n\t\tif node.left != nil {\n\t\t\tselect {\n\t\t\tcase tw.queue <- 1:\n\t\t\t\tnewMap := make(map[int]Interval)\n\t\t\t\ttw.wait.Add(1)\n\t\t\t\tgo queryMulti(node.left, from, to, &newMap, tw, true)\n\t\t\tdefault:\n\t\t\t\tqueryMulti(node.left, from, to, result, tw, false)\n\t\t\t}\n\t\t}\n\t}\n\t// if back is true then this method was called with go\n\tif back {\n\t\t// pass the result in the channel\n\t\ttw.result <- result\n\t\t// let wait group know that we are done\n\t\ttw.wait.Done()\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
outerTableCouldINLJ will check the whether is forced to build index nested loop join or outer info is reliable and the count satisfies the condition.
|
func (p *LogicalJoin) outerTableCouldINLJ(outerInfo *physicalPlanInfo, leftAsOuter bool) bool {
var forced bool
if leftAsOuter {
forced = (p.preferINLJ&preferLeftAsOuter) > 0 && p.hasEqualConds()
} else {
forced = (p.preferINLJ&preferRightAsOuter) > 0 && p.hasEqualConds()
}
return forced || (outerInfo.reliable && outerInfo.count <= float64(p.ctx.GetSessionVars().MaxRowCountForINLJ))
}
|
[
"func HasINFOROOMWith(preds ...predicate.Room) predicate.RoomInfo {\n\treturn predicate.RoomInfo(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(INFOROOMInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, INFOROOMTable, INFOROOMColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}",
"func (e *HashJoinExec) filterOuters(outerBuffer *execResult, outerFilterResult []bool) (int, error) {\n\tif e.outerFilter == nil {\n\t\treturn len(outerBuffer.rows), nil\n\t}\n\n\touterFilterResult = outerFilterResult[:0]\n\tfor _, outerRow := range outerBuffer.rows {\n\t\tmatched, err := expression.EvalBool(e.ctx, e.outerFilter, outerRow)\n\t\tif err != nil {\n\t\t\treturn 0, errors.Trace(err)\n\t\t}\n\t\touterFilterResult = append(outerFilterResult, matched)\n\t}\n\n\ti, j := 0, len(outerBuffer.rows)-1\n\tfor i <= j {\n\t\tfor i <= j && outerFilterResult[i] {\n\t\t\ti++\n\t\t}\n\t\tfor i <= j && !outerFilterResult[j] {\n\t\t\tj--\n\t\t}\n\t\tif i <= j {\n\t\t\touterFilterResult[i], outerFilterResult[j] = outerFilterResult[j], outerFilterResult[i]\n\t\t\touterBuffer.rows[i], outerBuffer.rows[j] = outerBuffer.rows[j], outerBuffer.rows[i]\n\t\t}\n\t}\n\treturn i, nil\n}",
"func (iw *indexHashJoinInnerWorker) doJoinInOrder(ctx context.Context, task *indexHashJoinTask, joinResult *indexHashJoinResult, h hash.Hash64, resultCh chan *indexHashJoinResult) (err error) {\n\tdefer func() {\n\t\tif err == nil && joinResult.chk != nil {\n\t\t\tif joinResult.chk.NumRows() > 0 {\n\t\t\t\tselect {\n\t\t\t\tcase resultCh <- joinResult:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tjoinResult.src <- joinResult.chk\n\t\t\t}\n\t\t}\n\t}()\n\tfor i, numChunks := 0, task.innerResult.NumChunks(); i < numChunks; i++ {\n\t\tfor j, chk := 0, task.innerResult.GetChunk(i); j < chk.NumRows(); j++ {\n\t\t\trow := chk.GetRow(j)\n\t\t\tptr := chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(j)}\n\t\t\terr = iw.collectMatchedInnerPtrs4OuterRows(row, ptr, task, h, iw.joinKeyBuf)\n\t\t\tfailpoint.Inject(\"TestIssue31129\", func() {\n\t\t\t\terr = errors.New(\"TestIssue31129\")\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// TODO: matchedInnerRowPtrs and matchedInnerRows can be moved to inner worker.\n\tmatchedInnerRows := make([]chunk.Row, 0, len(task.matchedInnerRowPtrs))\n\tvar hasMatched, hasNull, ok bool\n\tfor chkIdx, innerRowPtrs4Chk := range task.matchedInnerRowPtrs {\n\t\tfor outerRowIdx, innerRowPtrs := range innerRowPtrs4Chk {\n\t\t\tmatchedInnerRows, hasMatched, hasNull = matchedInnerRows[:0], false, false\n\t\t\touterRow := task.outerResult.GetChunk(chkIdx).GetRow(outerRowIdx)\n\t\t\tfor _, ptr := range innerRowPtrs {\n\t\t\t\tmatchedInnerRows = append(matchedInnerRows, task.innerResult.GetRow(ptr))\n\t\t\t}\n\t\t\tiw.rowIter.Reset(matchedInnerRows)\n\t\t\titer := iw.rowIter\n\t\t\tfor iter.Begin(); iter.Current() != iter.End(); {\n\t\t\t\tmatched, isNull, err := iw.joiner.tryToMatchInners(outerRow, iter, joinResult.chk)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\thasMatched, hasNull = matched || hasMatched, isNull || hasNull\n\t\t\t\tif joinResult.chk.IsFull() {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase resultCh <- joinResult:\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn ctx.Err()\n\t\t\t\t\t}\n\t\t\t\t\tjoinResult, ok = iw.getNewJoinResult(ctx)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn errors.New(\"indexHashJoinInnerWorker.doJoinInOrder failed\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !hasMatched {\n\t\t\t\tiw.joiner.onMissMatch(hasNull, outerRow, joinResult.chk)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func HasINFOROOM() predicate.RoomInfo {\n\treturn predicate.RoomInfo(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(INFOROOMTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, INFOROOMTable, INFOROOMColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}",
"func (e *HashJoinExec) fetchOuterRows(ctx context.Context) {\n\tdefer func() {\n\t\tfor _, outerBufferCh := range e.outerBufferChs {\n\t\t\tclose(outerBufferCh)\n\t\t}\n\t\te.workerWaitGroup.Done()\n\t}()\n\n\tbufferCapacity, maxBufferCapacity := 1, 128\n\tfor i, noMoreData := uint(0), false; !noMoreData; i = (i + 1) % e.concurrency {\n\t\touterBuffer := &execResult{rows: make([]Row, 0, bufferCapacity)}\n\n\t\tfor !noMoreData && len(outerBuffer.rows) < bufferCapacity {\n\t\t\tif e.finished.Load().(bool) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\touterRow, err := e.outerExec.Next(ctx)\n\t\t\tif err != nil || outerRow == nil {\n\t\t\t\touterBuffer.err = errors.Trace(err)\n\t\t\t\tnoMoreData = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\touterBuffer.rows = append(outerBuffer.rows, outerRow)\n\t\t}\n\n\t\tif noMoreData && len(outerBuffer.rows) == 0 && outerBuffer.err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tselect {\n\t\t// TODO: Recover the code.\n\t\t// case <-e.ctx.GoCtx().Done():\n\t\t// \treturn\n\t\tcase e.outerBufferChs[i] <- outerBuffer:\n\t\t\tif !noMoreData && bufferCapacity < maxBufferCapacity {\n\t\t\t\tbufferCapacity <<= 1\n\t\t\t}\n\t\t}\n\t}\n}",
"func (v RoomVersionImpl) CheckRestrictedJoinsAllowed() error {\n\treturn v.checkRestrictedJoinAllowedFunc()\n}",
"func (e *HashJoinExec) joinOuterRow(workerID uint, outerRow Row, resultBuffer *execResult) bool {\n\tbuffer := e.hashJoinBuffers[workerID]\n\thasNull, joinKey, err := getJoinKey(e.ctx.GetSessionVars().StmtCtx, e.outerKeys, outerRow, buffer.data, buffer.bytes[:0:cap(buffer.bytes)])\n\tif err != nil {\n\t\tresultBuffer.err = errors.Trace(err)\n\t\treturn false\n\t}\n\n\tif hasNull {\n\t\tresultBuffer.rows, resultBuffer.err = e.resultGenerators[0].emit(outerRow, nil, resultBuffer.rows)\n\t\tresultBuffer.err = errors.Trace(resultBuffer.err)\n\t\treturn true\n\t}\n\n\te.hashTableValBufs[workerID] = e.hashTable.Get(joinKey, e.hashTableValBufs[workerID][:0])\n\tvalues := e.hashTableValBufs[workerID]\n\tif len(values) == 0 {\n\t\tresultBuffer.rows, resultBuffer.err = e.resultGenerators[0].emit(outerRow, nil, resultBuffer.rows)\n\t\tresultBuffer.err = errors.Trace(resultBuffer.err)\n\t\treturn true\n\t}\n\n\tinnerRows := make([]Row, 0, len(values))\n\tfor _, value := range values {\n\t\tinnerRow, err1 := e.decodeRow(value)\n\t\tif err1 != nil {\n\t\t\tresultBuffer.rows = nil\n\t\t\tresultBuffer.err = errors.Trace(err1)\n\t\t\treturn false\n\t\t}\n\t\tinnerRows = append(innerRows, innerRow)\n\t}\n\n\tresultBuffer.rows, resultBuffer.err = e.resultGenerators[0].emit(outerRow, innerRows, resultBuffer.rows)\n\tif resultBuffer.err != nil {\n\t\tresultBuffer.err = errors.Trace(resultBuffer.err)\n\t\treturn false\n\t}\n\treturn true\n}",
"func (r Result) CountOfTablesWithConstraintViolations() int {\n\tcount := 0\n\tfor _, mergeStats := range r.Stats {\n\t\tif mergeStats.HasConstraintViolations() {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}",
"func (p *NumpyParser) checkRowCount(fieldsData BlockData) (int, error) {\n\trowCount := 0\n\trowCounter := make(map[string]int)\n\tfor i := 0; i < len(p.collectionInfo.Schema.Fields); i++ {\n\t\tschema := p.collectionInfo.Schema.Fields[i]\n\t\tif !schema.GetAutoID() {\n\t\t\tv, ok := fieldsData[schema.GetFieldID()]\n\t\t\tif !ok {\n\t\t\t\tif schema.GetIsDynamic() {\n\t\t\t\t\t// user might not provide numpy file for dynamic field, skip it, will auto-generate later\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlog.Warn(\"Numpy parser: field not provided\", zap.String(\"fieldName\", schema.GetName()))\n\t\t\t\treturn 0, fmt.Errorf(\"field '%s' not provided\", schema.GetName())\n\t\t\t}\n\t\t\trowCounter[schema.GetName()] = v.RowNum()\n\t\t\tif v.RowNum() > rowCount {\n\t\t\t\trowCount = v.RowNum()\n\t\t\t}\n\t\t}\n\t}\n\n\tfor name, count := range rowCounter {\n\t\tif count != rowCount {\n\t\t\tlog.Warn(\"Numpy parser: field row count is not equal to other fields row count\", zap.String(\"fieldName\", name),\n\t\t\t\tzap.Int(\"rowCount\", count), zap.Int(\"otherRowCount\", rowCount))\n\t\t\treturn 0, fmt.Errorf(\"field '%s' row count %d is not equal to other fields row count: %d\", name, count, rowCount)\n\t\t}\n\t}\n\n\treturn rowCount, nil\n}",
"func (h *fkCheckHelper) buildOtherTableScan() (outScope *scope, tabMeta *opt.TableMeta) {\n\totherTabMeta := h.mb.b.addTable(h.otherTab, tree.NewUnqualifiedTableName(h.otherTab.Name()))\n\treturn h.mb.b.buildScan(\n\t\totherTabMeta,\n\t\th.otherTabOrdinals,\n\t\t&tree.IndexFlags{IgnoreForeignKeys: true},\n\t\tnoRowLocking,\n\t\th.mb.b.allocScope(),\n\t), otherTabMeta\n}",
"func (outputer *rightOuterJoinResultGenerator) emit(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk) error {\n\t// outer row can not be joined with any inner row.\n\tif inners == nil || inners.Len() == 0 {\n\t\tchk.AppendPartialRow(0, outputer.defaultInner)\n\t\tchk.AppendPartialRow(outputer.defaultInner.Len(), outer)\n\t\treturn nil\n\t}\n\toutputer.chk.Reset()\n\tchkForJoin := outputer.chk\n\tif len(outputer.conditions) == 0 {\n\t\tchkForJoin = chk\n\t}\n\tnumToAppend := outputer.maxChunkSize - chk.NumRows()\n\tfor ; inners.Current() != inners.End() && numToAppend > 0; numToAppend-- {\n\t\toutputer.makeJoinRowToChunk(chkForJoin, inners.Current(), outer)\n\t\tinners.Next()\n\t}\n\tif len(outputer.conditions) == 0 {\n\t\treturn nil\n\t}\n\t// reach here, chkForJoin is outputer.chk\n\tmatched, err := outputer.filter(chkForJoin, chk)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tchkForJoin.Reset()\n\t// outer row can not be joined with any inner row.\n\tif !matched {\n\t\tchk.AppendPartialRow(0, outputer.defaultInner)\n\t\tchk.AppendPartialRow(outputer.defaultInner.Len(), outer)\n\t}\n\treturn nil\n}",
"func (e *HashJoinExec) fetchInnerRows(ctx context.Context) (err error) {\n\te.innerResult = chunk.NewList(e.innerExec.retTypes(), e.maxChunkSize)\n\te.innerResult.GetMemTracker().AttachTo(e.memTracker)\n\te.innerResult.GetMemTracker().SetLabel(\"innerResult\")\n\tfor {\n\t\tchk := e.children[e.innerIdx].newChunk()\n\t\terr = e.innerExec.NextChunk(ctx, chk)\n\t\tif err != nil || chk.NumRows() == 0 {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\te.innerResult.Add(chk)\n\t}\n}",
"func expressionCoversJoin(c sql.Expression, j *plan.JoinNode) (found bool) {\n\treturn transform.InspectExpr(c, func(expr sql.Expression) bool {\n\t\tswitch e := expr.(type) {\n\t\tcase expression.Comparer:\n\t\t\treturn comparisonSatisfiesJoinCondition(e, j)\n\t\t}\n\t\treturn false\n\t})\n}",
"func (q *Query) Outer(associations ...*Association) *Query {\n\tif q.err != nil {\n\t\treturn q\n\t}\n\n\tq.DmlBase.inner(false, associations...)\n\tq.lastToken = nil\n\n\treturn q\n}",
"func (cu *CellUnion) LeafCellsCovered() int64 {\n\tvar numLeaves int64\n\tfor _, c := range *cu {\n\t\tnumLeaves += 1 << uint64((MaxLevel-int64(c.Level()))<<1)\n\t}\n\treturn numLeaves\n}",
"func (root *Root) getTableCount() (count uint) {\n\tcount = 1 // we include the root in the count\n\tif root.slots != nil {\n\t\tfor i := uint(0); i < root.slotCount; i++ {\n\t\t\tif root.slots[i] != nil {\n\t\t\t\tnode := root.slots[i]\n\t\t\t\tif node != nil && !node.IsLeaf() {\n\t\t\t\t\ttDeeper := node.(*Table)\n\t\t\t\t\tcount += tDeeper.getTableCount()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (table ObjectMainTable) AllocatedCount() int {\n\tcapacity := table.Capacity()\n\tif capacity == 0 {\n\t\treturn 0\n\t}\n\tindex := int(table[0].CrossReferenceTableIndex)\n\tcount := 0\n\tfor (index != 0) && (count < capacity) {\n\t\tcount++\n\t\tindex = int(table[index].Next)\n\t}\n\treturn count\n}",
"func (c *FeatureContext) iShouldHaveCountRowsInTable(expectedCount int, tableName string) error {\n\tcount, err := queries.CountRows(c.qb, tableName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif count != expectedCount {\n\t\treturn fmt.Errorf(\"expected to find %d rows in table %q, got %d\", expectedCount, tableName, count)\n\t}\n\n\treturn nil\n}",
"func (*PhysicalTopN) canPushToIndexPlan(indexPlan PhysicalPlan, byItemCols []*expression.Column) bool {\n\t// If we call canPushToIndexPlan and there's no index plan, we should go into the index merge case.\n\t// Index merge case is specially handled for now. So we directly return false here.\n\t// So we directly return false.\n\tif indexPlan == nil {\n\t\treturn false\n\t}\n\tschema := indexPlan.Schema()\n\tfor _, col := range byItemCols {\n\t\tpos := schema.ColumnIndex(col)\n\t\tif pos == -1 {\n\t\t\treturn false\n\t\t}\n\t\tif schema.Columns[pos].IsPrefix {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate all possible combinations from join conditions for cost evaluation It will try all keys in join conditions
|
func constructPropertyByJoin(join *LogicalJoin) ([][]*requiredProperty, []int, error) {
var result [][]*requiredProperty
var condIndex []int
if join.EqualConditions == nil {
return nil, nil, nil
}
for i, cond := range join.EqualConditions {
if len(cond.GetArgs()) != 2 {
return nil, nil, errors.New("unexpected argument count for equal expression")
}
lExpr, rExpr := cond.GetArgs()[0], cond.GetArgs()[1]
// Only consider raw column reference and cowardly ignore calculations
// since we don't know if the function call preserve order
lColumn, lOK := lExpr.(*expression.Column)
rColumn, rOK := rExpr.(*expression.Column)
if lOK && rOK && compareTypeForOrder(lColumn.RetType, rColumn.RetType) {
result = append(result, []*requiredProperty{generateJoinProp(lColumn), generateJoinProp(rColumn)})
condIndex = append(condIndex, i)
} else {
continue
}
}
if len(result) == 0 {
return nil, nil, nil
}
return result, condIndex, nil
}
|
[
"func AllCombinations(input map[string][]string) (output []Row) {\n\tdeleteDups := func(result []Row) []Row {\n\t\thashes := map[string]bool{}\n\t\tfieldNames := []string{}\n\t\tfor fieldName := range input {\n\t\t\tfieldNames = append(fieldNames, fieldName)\n\t\t}\n\t\tcleanedResult := []Row{}\n\t\tfor _, row := range result {\n\t\t\thash := \"\"\n\t\t\tfor _, fieldName := range fieldNames {\n\t\t\t\thash += fieldName + \"|||\" + row[fieldName] + \"|||\"\n\t\t\t}\n\t\t\tif _, found := hashes[hash]; !found {\n\t\t\t\tcleanedResult = append(cleanedResult, row)\n\t\t\t\thashes[hash] = true\n\t\t\t}\n\t\t}\n\t\treturn cleanedResult\n\t}\n\n\taddRow := func(resRow Row, fieldName string, val string) Row {\n\t\tif _, ok := resRow[fieldName]; !ok {\n\t\t\tresRow[fieldName] = val\n\t\t\treturn resRow\n\t\t}\n\t\tif rVal, ok := resRow[fieldName]; ok && rVal == val {\n\t\t\treturn nil\n\t\t}\n\t\tif rVal, ok := resRow[fieldName]; ok && rVal != val {\n\t\t\tnewRow := Row{}\n\t\t\tfor resRowFieldName, resRowFieldVal := range resRow {\n\t\t\t\tnewRow[resRowFieldName] = resRowFieldVal\n\t\t\t}\n\t\t\tnewRow[fieldName] = val\n\t\t\treturn newRow\n\t\t}\n\t\treturn nil\n\t}\n\n\tresult := []Row{}\n\tfor fieldName, fieldVals := range input {\n\t\tfor _, val := range fieldVals {\n\t\t\tif len(result) == 0 {\n\t\t\t\tnewRow := Row{}\n\t\t\t\tnewRow[fieldName] = val\n\t\t\t\tresult = append(result, newRow)\n\t\t\t}\n\t\t\tfor _, resRow := range result {\n\t\t\t\tif newRow := addRow(resRow, fieldName, val); newRow != nil {\n\t\t\t\t\tresult = append(result, newRow)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresult = deleteDups(result)\n\t\t}\n\t}\n\n\treturn deleteDups(result)\n}",
"func (b *SQLBuilder) Join(j ...join) {\n\tb.w.AddIndent()\n\tdefer b.w.SubIndent()\n\n\tfor _, v := range j {\n\t\tb.w.WriteString(string(v.Type) + ` JOIN ` + b.SourceToSQL(v.New))\n\t\tif len(v.Conditions) > 0 {\n\t\t\tb.w.WriteString(` ON (`)\n\t\t\tb.Conditions(v.Conditions, false)\n\t\t\tb.w.WriteString(`)`)\n\t\t}\n\t\tb.w.WriteLine(``)\n\t}\n}",
"func joinConstraints(cs []*pod.Constraint, op joinOp) *pod.Constraint {\n\tif len(cs) == 0 {\n\t\treturn nil\n\t}\n\tif len(cs) == 1 {\n\t\treturn cs[0]\n\t}\n\n\tswitch op {\n\tcase _andOp:\n\t\treturn &pod.Constraint{\n\t\t\tType: pod.Constraint_CONSTRAINT_TYPE_AND,\n\t\t\tAndConstraint: &pod.AndConstraint{\n\t\t\t\tConstraints: cs,\n\t\t\t},\n\t\t}\n\tcase _orOp:\n\t\treturn &pod.Constraint{\n\t\t\tType: pod.Constraint_CONSTRAINT_TYPE_OR,\n\t\t\tOrConstraint: &pod.OrConstraint{\n\t\t\t\tConstraints: cs,\n\t\t\t},\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown join op: %v\", op))\n\t}\n}",
"func Join(this context.Context, that context.Context) context.Context {\n\tfor bkey := range knownKeys {\n\t\tval := that.Value(bkey)\n\t\tif val != nil {\n\t\t\tthis = withBaggage(this, bkey, val.(string))\n\t\t}\n\t}\n\treturn this\n}",
"func Join(g1 Grouping, col1 string, g2 Grouping, col2 string) Grouping {\n\tvar ng GroupingBuilder\n\tfor _, gid := range g1.Tables() {\n\t\tt1, t2 := g1.Table(gid), g2.Table(gid)\n\t\tif t2 == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: Optimize for cases where col1 and/or col2 are\n\t\t// constant.\n\n\t\t// Index col2 in t2.\n\t\tridx := make(map[interface{}][]int)\n\t\trv := reflect.ValueOf(t2.MustColumn(col2))\n\t\tfor i, l := 0, rv.Len(); i < l; i++ {\n\t\t\tv := rv.Index(i).Interface()\n\t\t\tridx[v] = append(ridx[v], i)\n\t\t}\n\n\t\t// For each row in t1, find the matching rows in col2\n\t\t// and build up the row indexes for t1 and t2.\n\t\tidx1, idx2 := []int{}, []int{}\n\t\tlv := reflect.ValueOf(t1.MustColumn(col1))\n\t\tfor i, l := 0, lv.Len(); i < l; i++ {\n\t\t\tr := ridx[lv.Index(i).Interface()]\n\t\t\tfor range r {\n\t\t\t\tidx1 = append(idx1, i)\n\t\t\t}\n\t\t\tidx2 = append(idx2, r...)\n\t\t}\n\n\t\t// Build the joined table.\n\t\tvar nt Builder\n\t\tfor _, col := range t1.Columns() {\n\t\t\tif cv, ok := t1.Const(col); ok {\n\t\t\t\tnt.Add(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnt.Add(col, slice.Select(t1.Column(col), idx1))\n\t\t}\n\t\tfor _, col := range t2.Columns() {\n\t\t\t// Often the join column is the same in both\n\t\t\t// and we can skip it because we added it from\n\t\t\t// the first table.\n\t\t\tif col == col1 && col == col2 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif cv, ok := t2.Const(col); ok {\n\t\t\t\tnt.Add(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnt.Add(col, slice.Select(t2.Column(col), idx2))\n\t\t}\n\n\t\tng.Add(gid, nt.Done())\n\t}\n\treturn ng.Done()\n}",
"func (hj *hashJoiner) runEqJoin(result func(identityKey string, offset uint32, fs FactSet, rowValues []Value)) {\n\trightJoinColumnIndexes := hj.rightColumns.MustIndexesOf(hj.joinVars)\n\tfor chunk := range hj.rightResCh {\n\t\tfor i := range chunk.offsets {\n\t\t\tjoinIdentityKey := chunk.identityKeysOf(i, rightJoinColumnIndexes)\n\t\t\tleftRows := hj.leftJoinValues[string(joinIdentityKey)]\n\t\t\t// create new factsSets for left[0] + right[i], left[1] + right[i], etc.\n\t\t\tfor _, left := range leftRows {\n\t\t\t\tresult(joinIdentityKey,\n\t\t\t\t\tleft.offset,\n\t\t\t\t\tjoinFactSets(left.fact, chunk.Facts[i]),\n\t\t\t\t\thj.joiner(left.vals, chunk.Row(i)))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (f *Factory) ConstructJoin(\n\tjoinOp opt.Operator, left, right memo.RelExpr, on memo.FiltersExpr, private *memo.JoinPrivate,\n) memo.RelExpr {\n\tswitch joinOp {\n\tcase opt.InnerJoinOp:\n\t\treturn f.ConstructInnerJoin(left, right, on, private)\n\tcase opt.InnerJoinApplyOp:\n\t\treturn f.ConstructInnerJoinApply(left, right, on, private)\n\tcase opt.LeftJoinOp:\n\t\treturn f.ConstructLeftJoin(left, right, on, private)\n\tcase opt.LeftJoinApplyOp:\n\t\treturn f.ConstructLeftJoinApply(left, right, on, private)\n\tcase opt.RightJoinOp:\n\t\treturn f.ConstructRightJoin(left, right, on, private)\n\tcase opt.FullJoinOp:\n\t\treturn f.ConstructFullJoin(left, right, on, private)\n\tcase opt.SemiJoinOp:\n\t\treturn f.ConstructSemiJoin(left, right, on, private)\n\tcase opt.SemiJoinApplyOp:\n\t\treturn f.ConstructSemiJoinApply(left, right, on, private)\n\tcase opt.AntiJoinOp:\n\t\treturn f.ConstructAntiJoin(left, right, on, private)\n\tcase opt.AntiJoinApplyOp:\n\t\treturn f.ConstructAntiJoinApply(left, right, on, private)\n\t}\n\tpanic(errors.AssertionFailedf(\"unexpected join operator: %v\", log.Safe(joinOp)))\n}",
"func (hj *hashJoiner) eqJoin(ctx context.Context) {\n\thj.runEqJoin(func(_ string, offset uint32, fs FactSet, rowValues []Value) {\n\t\thj.outputTo.add(ctx, offset, fs, rowValues)\n\t})\n}",
"func (ht *HashTable) buildNextChains(\n\tctx context.Context, first, next []uint64, offset, batchSize uint64,\n) {\n\t// The loop direction here is reversed to ensure that when we are building the\n\t// next chain for the probe table, the keyID in each equality chain inside\n\t// `next` is strictly in ascending order. This is crucial to ensure that when\n\t// built in distinct mode, hash table will emit the first distinct tuple it\n\t// encounters. When the next chain is built for build side, this invariant no\n\t// longer holds for the equality chains inside `next`. This is ok however for\n\t// the build side since all tuple that buffered on build side are already\n\t// distinct, therefore we can be sure that when we emit a tuple, there cannot\n\t// potentially be other tuples with the same key.\n\tfor id := offset + batchSize - 1; id >= offset; id-- {\n\t\t// keyID is stored into corresponding hash bucket at the front of the next\n\t\t// chain.\n\t\thash := next[id]\n\t\tfirstKeyID := first[hash]\n\t\t// This is to ensure that `first` always points to the tuple with smallest\n\t\t// keyID in each equality chain. firstKeyID==0 means it is the first tuple\n\t\t// that we have encountered with the given hash value.\n\t\tif firstKeyID == 0 || id < firstKeyID {\n\t\t\tnext[id] = first[hash]\n\t\t\tfirst[hash] = id\n\t\t} else {\n\t\t\tnext[id] = next[firstKeyID]\n\t\t\tnext[firstKeyID] = id\n\t\t}\n\t}\n\tht.cancelChecker.CheckEveryCall(ctx)\n}",
"func (ds *DataSource) generateIndexMergeAndPaths(normalPathCnt int) *util.AccessPath {\n\t// For now, we only consider intersection type IndexMerge when the index names are specified in the hints.\n\tif !ds.indexMergeHintsHasSpecifiedIdx() {\n\t\treturn nil\n\t}\n\n\t// 1. Collect partial paths from normal paths.\n\tvar partialPaths []*util.AccessPath\n\tfor i := 0; i < normalPathCnt; i++ {\n\t\toriginalPath := ds.possibleAccessPaths[i]\n\t\t// No need to consider table path as a partial path.\n\t\tif ds.possibleAccessPaths[i].IsTablePath() {\n\t\t\tcontinue\n\t\t}\n\t\tif !ds.isSpecifiedInIndexMergeHints(originalPath.Index.Name.L) {\n\t\t\tcontinue\n\t\t}\n\t\t// If the path contains a full range, ignore it.\n\t\tif ranger.HasFullRange(originalPath.Ranges, false) {\n\t\t\tcontinue\n\t\t}\n\t\tnewPath := originalPath.Clone()\n\t\tpartialPaths = append(partialPaths, newPath)\n\t}\n\tif len(partialPaths) < 2 {\n\t\treturn nil\n\t}\n\n\t// 2. Collect filters that can't be covered by the partial paths and deduplicate them.\n\tfinalFilters := make([]expression.Expression, 0)\n\tpartialFilters := make([]expression.Expression, 0, len(partialPaths))\n\thashCodeSet := make(map[string]struct{})\n\tfor _, path := range partialPaths {\n\t\t// Classify filters into coveredConds and notCoveredConds.\n\t\tcoveredConds := make([]expression.Expression, 0, len(path.AccessConds)+len(path.IndexFilters))\n\t\tnotCoveredConds := make([]expression.Expression, 0, len(path.IndexFilters)+len(path.TableFilters))\n\t\t// AccessConds can be covered by partial path.\n\t\tcoveredConds = append(coveredConds, path.AccessConds...)\n\t\tfor i, cond := range path.IndexFilters {\n\t\t\t// IndexFilters can be covered by partial path if it can be pushed down to TiKV.\n\t\t\tif !expression.CanExprsPushDown(ds.SCtx().GetSessionVars().StmtCtx, []expression.Expression{cond}, ds.SCtx().GetClient(), kv.TiKV) {\n\t\t\t\tpath.IndexFilters = append(path.IndexFilters[:i], path.IndexFilters[i+1:]...)\n\t\t\t\tnotCoveredConds = append(notCoveredConds, cond)\n\t\t\t} else {\n\t\t\t\tcoveredConds = append(coveredConds, cond)\n\t\t\t}\n\t\t}\n\t\t// TableFilters can't be covered by partial path.\n\t\tnotCoveredConds = append(notCoveredConds, path.TableFilters...)\n\n\t\t// Record covered filters in hashCodeSet.\n\t\t// Note that we only record filters that not appear in the notCoveredConds. It's possible that a filter appear\n\t\t// in both coveredConds and notCoveredConds (e.g. because of prefix index). So we need this extra check to\n\t\t// avoid wrong deduplication.\n\t\tnotCoveredHashCodeSet := make(map[string]struct{})\n\t\tfor _, cond := range notCoveredConds {\n\t\t\thashCode := string(cond.HashCode(ds.SCtx().GetSessionVars().StmtCtx))\n\t\t\tnotCoveredHashCodeSet[hashCode] = struct{}{}\n\t\t}\n\t\tfor _, cond := range coveredConds {\n\t\t\thashCode := string(cond.HashCode(ds.SCtx().GetSessionVars().StmtCtx))\n\t\t\tif _, ok := notCoveredHashCodeSet[hashCode]; !ok {\n\t\t\t\thashCodeSet[hashCode] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\tfinalFilters = append(finalFilters, notCoveredConds...)\n\t\tpartialFilters = append(partialFilters, coveredConds...)\n\t}\n\n\t// Remove covered filters from finalFilters and deduplicate finalFilters.\n\tdedupedFinalFilters := make([]expression.Expression, 0, len(finalFilters))\n\tfor _, cond := range finalFilters {\n\t\thashCode := string(cond.HashCode(ds.SCtx().GetSessionVars().StmtCtx))\n\t\tif _, ok := hashCodeSet[hashCode]; !ok {\n\t\t\tdedupedFinalFilters = append(dedupedFinalFilters, cond)\n\t\t\thashCodeSet[hashCode] = struct{}{}\n\t\t}\n\t}\n\n\t// Keep these partial filters as a part of table filters for safety if there is any parameter.\n\tif expression.MaybeOverOptimized4PlanCache(ds.SCtx(), partialFilters) {\n\t\tdedupedFinalFilters = append(dedupedFinalFilters, partialFilters...)\n\t}\n\n\t// 3. Estimate the row count after partial paths.\n\tsel, _, err := cardinality.Selectivity(ds.SCtx(), ds.tableStats.HistColl, partialFilters, nil)\n\tif err != nil {\n\t\tlogutil.BgLogger().Debug(\"something wrong happened, use the default selectivity\", zap.Error(err))\n\t\tsel = SelectionFactor\n\t}\n\n\tindexMergePath := &util.AccessPath{\n\t\tPartialIndexPaths: partialPaths,\n\t\tIndexMergeIsIntersection: true,\n\t\tTableFilters: dedupedFinalFilters,\n\t\tCountAfterAccess: sel * ds.tableStats.RowCount,\n\t}\n\treturn indexMergePath\n}",
"func (r Relation) MergeJoin(leftCols []AttrInfo, rightRelation Relationer, rightCols []AttrInfo, joinType JoinType, compType Comparison) Relationer {\n\tright, isRelation := rightRelation.(Relation)\n\n\tif !isRelation {\n\t\tpanic(\"unknown relation type\")\n\t\t// TODO: implement using Relationer.GetRawData()\n\t}\n\n\ttype MergeData struct {\n\t\tLeft *Column\n\t\tRight *Column\n\t\tCompare CompFunc\n\t\tLesser CompFunc\n\t\tEquals CompFunc\n\t}\n\n\tright = right.MergeSort(rightCols, ASC).(Relation)\n\tleft := r.MergeSort(leftCols, ASC).(Relation)\n\toutput := Relation{Columns: []Column{}}\n\n\tleftIndices := []int{}\n\trightIndices := []int{}\n\n\tleftRow, rightRow := 0, 0\n\tmaxLeftRows := left.Columns[0].GetNumRows()\n\tmaxRightRows := right.Columns[0].GetNumRows()\n\tvar mergeData []MergeData\n\n\taddOutputCols := func(base *Relation, tableName string, nullable bool) {\n\t\tif nullable {\n\t\t\tpanic(\"NULL values not implemented\")\n\t\t}\n\t\tfor _, col := range base.Columns {\n\t\t\tsignature := AttrInfo{Name: tableName + \".\" + col.Signature.Name, Enc: col.Signature.Enc, Type: col.Signature.Type}\n\t\t\toutput.Columns = append(output.Columns, NewColumn(signature))\n\t\t}\n\t}\n\n\tgetMergeData := func() []MergeData {\n\t\toutput := []MergeData{}\n\n\t\tfor sigIndex, signature := range leftCols {\n\t\t\tentry := MergeData{}\n\n\t\t\tfor colIndex, col := range left.Columns {\n\t\t\t\tif col.Signature == signature {\n\t\t\t\t\tentry.Left = &left.Columns[colIndex]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor colIndex, col := range right.Columns {\n\t\t\t\tif col.Signature == rightCols[sigIndex] {\n\t\t\t\t\tentry.Right = &right.Columns[colIndex]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif entry.Left == nil || entry.Right == nil {\n\t\t\t\tpanic(\"column not found\")\n\t\t\t}\n\n\t\t\tentry.Equals = compFuncs[signature.Type][EQ]\n\t\t\tentry.Lesser = compFuncs[signature.Type][LT]\n\n\t\t\toutput = append(output, entry)\n\t\t}\n\n\t\treturn output\n\t}\n\n\tisEqual := func(leftIndex int, rightIndex int) bool {\n\t\tfor _, entry := range mergeData {\n\t\t\tleftValue, _ := entry.Left.GetRow(leftIndex)\n\t\t\trightValue, _ := entry.Right.GetRow(rightIndex)\n\t\t\tif !entry.Equals(leftValue, rightValue) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tisLesser := func(leftIndex, rightIndex int) bool {\n\t\tfor _, entry := range mergeData {\n\t\t\tleftValue, _ := entry.Left.GetRow(leftIndex)\n\t\t\trightValue, _ := entry.Right.GetRow(rightIndex)\n\n\t\t\tif entry.Lesser(leftValue, rightValue) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif !entry.Equals(leftValue, rightValue) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\t// entries are equal\n\t\treturn false\n\t}\n\n\tgetNextRow := func(compare func(int, int) bool) int {\n\t\tnextRow := rightRow + 1\n\n\t\tfor nextRow < maxRightRows && compare(leftRow, nextRow) {\n\t\t\tnextRow++\n\t\t}\n\n\t\treturn nextRow\n\t}\n\n\tinnerJoin := func() ([]int, []int) {\n\t\tmergeData = getMergeData()\n\n\t\tfor leftRow < maxLeftRows && rightRow < maxRightRows {\n\t\t\tif isEqual(leftRow, rightRow) {\n\t\t\t\t// leftValue == rightValue\n\t\t\t\tnextRow := getNextRow(isEqual)\n\n\t\t\t\tswitch compType {\n\t\t\t\tcase GT:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase GEQ:\n\t\t\t\t\tfor i := 0; i < nextRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LT:\n\t\t\t\t\tfor i := nextRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LEQ:\n\t\t\t\t\tfor i := rightRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase EQ:\n\t\t\t\t\tfor i := rightRow; i < nextRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase NEQ:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\t\tfor i := nextRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tleftRow++\n\t\t\t} else if isLesser(leftRow, rightRow) {\n\t\t\t\t// leftValue < rightValue\n\t\t\t\tswitch compType {\n\t\t\t\tcase GT, GEQ:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LT, LEQ:\n\t\t\t\t\tfor i := rightRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase NEQ:\n\t\t\t\t\tfor i := 0; i < maxRightRows; i++ {\n\t\t\t\t\t\tif !isEqual(leftRow, i) {\n\t\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tleftRow++\n\t\t\t} else {\n\t\t\t\t// leftValue > rightValue\n\t\t\t\tnextRow := getNextRow(func(l, r int) bool { return !isEqual(l, r) && !isLesser(l, r) })\n\n\t\t\t\tswitch compType {\n\t\t\t\tcase NEQ:\n\t\t\t\t\t// do something?\n\t\t\t\t}\n\n\t\t\t\trightRow = nextRow\n\t\t\t}\n\t\t}\n\n\t\treturn leftIndices, rightIndices\n\t}\n\n\tsemiJoin := func() {\n\t\tmergeData = getMergeData()\n\n\t\tif compType != EQ {\n\t\t\tpanic(\"semi join only supports equality comparison\")\n\t\t}\n\n\t\tfor leftRow < maxLeftRows && rightRow < maxRightRows {\n\t\t\tif isEqual(leftRow, rightRow) {\n\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\n\t\t\t\tleftRow++\n\t\t\t} else if isLesser(leftRow, rightRow) {\n\t\t\t\tleftRow++\n\t\t\t} else {\n\t\t\t\tnextRow := getNextRow(func(l, r int) bool { return !isEqual(l, r) && !isLesser(l, r) })\n\t\t\t\trightRow = nextRow\n\t\t\t}\n\t\t}\n\t}\n\n\tcopyColumn := func(source *Column, dest *Column, indices []int) {\n\t\tfor _, row := range indices {\n\t\t\tvalue, _ := source.GetRow(row)\n\t\t\tdest.AddRow(source.Signature.Type, value)\n\t\t}\n\t}\n\n\tcopyLeftValues := func(indices []int) {\n\t\tfor colIndex := range left.Columns {\n\t\t\tcopyColumn(&left.Columns[colIndex], &output.Columns[colIndex], indices)\n\t\t}\n\t}\n\n\tcopyRightValues := func(indices []int) {\n\t\tnumLeftCols := len(left.Columns)\n\t\tfor colIndex := range right.Columns {\n\t\t\tcopyColumn(&right.Columns[colIndex], &output.Columns[numLeftCols+colIndex], indices)\n\t\t}\n\t}\n\n\tswitch joinType {\n\tcase INNER:\n\t\toutput.Name = r.Name + \" x \" + rightRelation.(Relation).Name\n\t\taddOutputCols(&left, r.Name, false)\n\t\taddOutputCols(&right, rightRelation.(Relation).Name, false)\n\t\tinnerJoin()\n\t\tcopyLeftValues(leftIndices)\n\t\tcopyRightValues(rightIndices)\n\t\tbreak\n\tcase SEMI:\n\t\toutput.Name = r.Name + \" (x \" + rightRelation.(Relation).Name + \")\"\n\t\taddOutputCols(&left, r.Name, false)\n\t\tsemiJoin()\n\t\tcopyLeftValues(leftIndices)\n\t\tbreak\n\tcase LEFTOUTER:\n\t\t// handle null values on left\n\t\tpanic(\"NULL values not implemented\")\n\tcase RIGHTOUTER:\n\t\t// handle null values on right\n\t\tpanic(\"NULL values not implemented\")\n\tdefault:\n\t\tpanic(\"unknown JoinType\")\n\t}\n\n\treturn output\n}",
"func (e *HashJoinExec) buildHashTableForList() error {\n\te.hashTable = mvmap.NewMVMap()\n\te.innerKeyColIdx = make([]int, len(e.innerKeys))\n\tfor i := range e.innerKeys {\n\t\te.innerKeyColIdx[i] = e.innerKeys[i].Index\n\t}\n\tvar (\n\t\thasNull bool\n\t\terr error\n\t\tkeyBuf = make([]byte, 0, 64)\n\t\tvalBuf = make([]byte, 8)\n\t)\n\tfor i := 0; i < e.innerResult.NumChunks(); i++ {\n\t\tchk := e.innerResult.GetChunk(i)\n\t\tfor j := 0; j < chk.NumRows(); j++ {\n\t\t\thasNull, keyBuf, err = e.getJoinKeyFromChkRow(false, chk.GetRow(j), keyBuf)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tif hasNull {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trowPtr := chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(j)}\n\t\t\t*(*chunk.RowPtr)(unsafe.Pointer(&valBuf[0])) = rowPtr\n\t\t\te.hashTable.Put(keyBuf, valBuf)\n\t\t}\n\t}\n\treturn nil\n}",
"func combinations(items []Item, ch chan []Item) {\t\n\tdefer close(ch)\t\n\n\tp := int(math.Pow(2., float64(len(items))))\t\n\n\tfor i := 0; i < p; i++ {\t\n\t\tset := []Item{}\t\n\t\tfor j := 0; j < len(items); j++ {\t\n\t\t\tif (i>>uint(j))&1 == 1 {\t\n\t\t\t\tset = append(set, items[j])\t\n\t\t\t}\t\n\t\t}\t\n\t\tch <- set\t\n\t}\t\n}",
"func getGroupJoin(inner interface{},\r\n\touterKeySelector OneArgsFunc,\r\n\tinnerKeySelector OneArgsFunc,\r\n\tresultSelector func(interface{}, []interface{}) interface{}, isLeftJoin bool) stepAction {\r\n\r\n\treturn getJoinImpl(inner, outerKeySelector, innerKeySelector,\r\n\t\tfunc(outerkv *hKeyValue, innerList []interface{}, results *[]interface{}) {\r\n\t\t\t*results = appendToSlice1(*results, resultSelector(outerkv.value, innerList))\r\n\t\t}, func(outerkv *hKeyValue, results *[]interface{}) {\r\n\t\t\t*results = appendToSlice1(*results, resultSelector(outerkv.value, []interface{}{}))\r\n\t\t}, isLeftJoin)\r\n}",
"func (and AndSet) Join(other AndSet) AndSet {\n\treturn append(and, other...)\n}",
"func (iw *indexHashJoinInnerWorker) doJoinInOrder(ctx context.Context, task *indexHashJoinTask, joinResult *indexHashJoinResult, h hash.Hash64, resultCh chan *indexHashJoinResult) (err error) {\n\tdefer func() {\n\t\tif err == nil && joinResult.chk != nil {\n\t\t\tif joinResult.chk.NumRows() > 0 {\n\t\t\t\tselect {\n\t\t\t\tcase resultCh <- joinResult:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tjoinResult.src <- joinResult.chk\n\t\t\t}\n\t\t}\n\t}()\n\tfor i, numChunks := 0, task.innerResult.NumChunks(); i < numChunks; i++ {\n\t\tfor j, chk := 0, task.innerResult.GetChunk(i); j < chk.NumRows(); j++ {\n\t\t\trow := chk.GetRow(j)\n\t\t\tptr := chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(j)}\n\t\t\terr = iw.collectMatchedInnerPtrs4OuterRows(row, ptr, task, h, iw.joinKeyBuf)\n\t\t\tfailpoint.Inject(\"TestIssue31129\", func() {\n\t\t\t\terr = errors.New(\"TestIssue31129\")\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// TODO: matchedInnerRowPtrs and matchedInnerRows can be moved to inner worker.\n\tmatchedInnerRows := make([]chunk.Row, 0, len(task.matchedInnerRowPtrs))\n\tvar hasMatched, hasNull, ok bool\n\tfor chkIdx, innerRowPtrs4Chk := range task.matchedInnerRowPtrs {\n\t\tfor outerRowIdx, innerRowPtrs := range innerRowPtrs4Chk {\n\t\t\tmatchedInnerRows, hasMatched, hasNull = matchedInnerRows[:0], false, false\n\t\t\touterRow := task.outerResult.GetChunk(chkIdx).GetRow(outerRowIdx)\n\t\t\tfor _, ptr := range innerRowPtrs {\n\t\t\t\tmatchedInnerRows = append(matchedInnerRows, task.innerResult.GetRow(ptr))\n\t\t\t}\n\t\t\tiw.rowIter.Reset(matchedInnerRows)\n\t\t\titer := iw.rowIter\n\t\t\tfor iter.Begin(); iter.Current() != iter.End(); {\n\t\t\t\tmatched, isNull, err := iw.joiner.tryToMatchInners(outerRow, iter, joinResult.chk)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\thasMatched, hasNull = matched || hasMatched, isNull || hasNull\n\t\t\t\tif joinResult.chk.IsFull() {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase resultCh <- joinResult:\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn ctx.Err()\n\t\t\t\t\t}\n\t\t\t\t\tjoinResult, ok = iw.getNewJoinResult(ctx)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn errors.New(\"indexHashJoinInnerWorker.doJoinInOrder failed\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !hasMatched {\n\t\t\t\tiw.joiner.onMissMatch(hasNull, outerRow, joinResult.chk)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder {\n\treturn b.JoinClause(\"CROSS JOIN \"+join, rest...)\n}",
"func (ht *HashTable) buildFromBufferedTuples(ctx context.Context) {\n\tfor ht.shouldResize(ht.Vals.Length()) {\n\t\tht.numBuckets *= 2\n\t}\n\tht.BuildScratch.First = colexecutils.MaybeAllocateUint64Array(ht.BuildScratch.First, int(ht.numBuckets))\n\tif ht.ProbeScratch.First != nil {\n\t\tht.ProbeScratch.First = colexecutils.MaybeAllocateUint64Array(ht.ProbeScratch.First, int(ht.numBuckets))\n\t}\n\tfor i, keyCol := range ht.keyCols {\n\t\tht.Keys[i] = ht.Vals.ColVec(int(keyCol))\n\t}\n\t// ht.BuildScratch.Next is used to store the computed hash value of each key.\n\tht.BuildScratch.Next = colexecutils.MaybeAllocateUint64Array(ht.BuildScratch.Next, ht.Vals.Length()+1)\n\tht.ComputeBuckets(ctx, ht.BuildScratch.Next[1:], ht.Keys, ht.Vals.Length(), nil /* sel */)\n\tht.buildNextChains(ctx, ht.BuildScratch.First, ht.BuildScratch.Next, 1, uint64(ht.Vals.Length()))\n\t// Account for memory used by the internal auxiliary slices that are\n\t// limited in size.\n\tht.ProbeScratch.accountForLimitedSlices(ht.allocator)\n\t// Note that if ht.ProbeScratch.first is nil, it'll have zero capacity.\n\tnewUint64Count := int64(cap(ht.BuildScratch.First) + cap(ht.ProbeScratch.First) + cap(ht.BuildScratch.Next))\n\tht.allocator.AdjustMemoryUsage(sizeOfUint64 * (newUint64Count - ht.unlimitedSlicesNumUint64AccountedFor))\n\tht.unlimitedSlicesNumUint64AccountedFor = newUint64Count\n}",
"func mergeConditionKeyMap(condKeyMap1, condKeyMap2 ConditionKeyMap) ConditionKeyMap {\n\tout := CopyConditionKeyMap(condKeyMap1)\n\n\tfor k, v := range condKeyMap2 {\n\t\tif ev, ok := out[k]; ok {\n\t\t\tout[k] = ev.Union(v)\n\t\t} else {\n\t\t\tout[k] = set.CopyStringSet(v)\n\t\t}\n\t}\n\n\treturn out\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalMergeJoin converts the merge join to physicalPlanInfo. TODO: Refactor and merge with hash join
|
func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {
lChild := p.children[0].(LogicalPlan)
rChild := p.children[1].(LogicalPlan)
newEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)
for i, cond := range p.EqualConditions {
if i == condIndex {
continue
}
// prevent further index contamination
newCond := cond.Clone()
newCond.ResolveIndices(p.schema)
newEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))
}
eqCond := p.EqualConditions[condIndex]
otherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)
join := PhysicalMergeJoin{
EqualConditions: []*expression.ScalarFunction{eqCond},
LeftConditions: p.LeftConditions,
RightConditions: p.RightConditions,
OtherConditions: otherFilter,
DefaultValues: p.DefaultValues,
// Assume order for both side are the same
Desc: lProp.props[0].desc,
}.init(p.allocator, p.ctx)
join.SetSchema(p.schema)
join.JoinType = joinType
var lInfo *physicalPlanInfo
var rInfo *physicalPlanInfo
// Try no sort first
lInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
lInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)
lInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)
if err != nil {
return nil, errors.Trace(err)
}
if lInfoNoSorted.cost < lInfoEnforceSort.cost {
lInfo = lInfoNoSorted
} else {
lInfo = lInfoEnforceSort
}
rInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
rInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)
rInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)
if err != nil {
return nil, errors.Trace(err)
}
if rInfoEnforceSort.cost < rInfoNoSorted.cost {
rInfo = rInfoEnforceSort
} else {
rInfo = rInfoNoSorted
}
parentProp = join.tryConsumeOrder(parentProp, eqCond)
resultInfo := join.matchProperty(parentProp, lInfo, rInfo)
// TODO: Considering keeping order in join to remove at least
// one ordering property
resultInfo = enforceProperty(parentProp, resultInfo)
return resultInfo, nil
}
|
[
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error {\n\tswitch ajoin.Join {\n\tcase sqlparser.NormalJoinType, sqlparser.StraightJoinType, sqlparser.LeftJoinType:\n\tcase sqlparser.RightJoinType:\n\t\tconvertToLeftJoin(ajoin)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported: %s\", ajoin.Join.ToString())\n\t}\n\tif err := pb.processTableExpr(ajoin.LeftExpr, reservedVars, where); err != nil {\n\t\treturn err\n\t}\n\trpb := newPrimitiveBuilder(pb.vschema, pb.jt)\n\tif err := rpb.processTableExpr(ajoin.RightExpr, reservedVars, where); err != nil {\n\t\treturn err\n\t}\n\treturn pb.join(rpb, ajoin, reservedVars, where)\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (r Relation) MergeJoin(leftCols []AttrInfo, rightRelation Relationer, rightCols []AttrInfo, joinType JoinType, compType Comparison) Relationer {\n\tright, isRelation := rightRelation.(Relation)\n\n\tif !isRelation {\n\t\tpanic(\"unknown relation type\")\n\t\t// TODO: implement using Relationer.GetRawData()\n\t}\n\n\ttype MergeData struct {\n\t\tLeft *Column\n\t\tRight *Column\n\t\tCompare CompFunc\n\t\tLesser CompFunc\n\t\tEquals CompFunc\n\t}\n\n\tright = right.MergeSort(rightCols, ASC).(Relation)\n\tleft := r.MergeSort(leftCols, ASC).(Relation)\n\toutput := Relation{Columns: []Column{}}\n\n\tleftIndices := []int{}\n\trightIndices := []int{}\n\n\tleftRow, rightRow := 0, 0\n\tmaxLeftRows := left.Columns[0].GetNumRows()\n\tmaxRightRows := right.Columns[0].GetNumRows()\n\tvar mergeData []MergeData\n\n\taddOutputCols := func(base *Relation, tableName string, nullable bool) {\n\t\tif nullable {\n\t\t\tpanic(\"NULL values not implemented\")\n\t\t}\n\t\tfor _, col := range base.Columns {\n\t\t\tsignature := AttrInfo{Name: tableName + \".\" + col.Signature.Name, Enc: col.Signature.Enc, Type: col.Signature.Type}\n\t\t\toutput.Columns = append(output.Columns, NewColumn(signature))\n\t\t}\n\t}\n\n\tgetMergeData := func() []MergeData {\n\t\toutput := []MergeData{}\n\n\t\tfor sigIndex, signature := range leftCols {\n\t\t\tentry := MergeData{}\n\n\t\t\tfor colIndex, col := range left.Columns {\n\t\t\t\tif col.Signature == signature {\n\t\t\t\t\tentry.Left = &left.Columns[colIndex]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor colIndex, col := range right.Columns {\n\t\t\t\tif col.Signature == rightCols[sigIndex] {\n\t\t\t\t\tentry.Right = &right.Columns[colIndex]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif entry.Left == nil || entry.Right == nil {\n\t\t\t\tpanic(\"column not found\")\n\t\t\t}\n\n\t\t\tentry.Equals = compFuncs[signature.Type][EQ]\n\t\t\tentry.Lesser = compFuncs[signature.Type][LT]\n\n\t\t\toutput = append(output, entry)\n\t\t}\n\n\t\treturn output\n\t}\n\n\tisEqual := func(leftIndex int, rightIndex int) bool {\n\t\tfor _, entry := range mergeData {\n\t\t\tleftValue, _ := entry.Left.GetRow(leftIndex)\n\t\t\trightValue, _ := entry.Right.GetRow(rightIndex)\n\t\t\tif !entry.Equals(leftValue, rightValue) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tisLesser := func(leftIndex, rightIndex int) bool {\n\t\tfor _, entry := range mergeData {\n\t\t\tleftValue, _ := entry.Left.GetRow(leftIndex)\n\t\t\trightValue, _ := entry.Right.GetRow(rightIndex)\n\n\t\t\tif entry.Lesser(leftValue, rightValue) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif !entry.Equals(leftValue, rightValue) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\t// entries are equal\n\t\treturn false\n\t}\n\n\tgetNextRow := func(compare func(int, int) bool) int {\n\t\tnextRow := rightRow + 1\n\n\t\tfor nextRow < maxRightRows && compare(leftRow, nextRow) {\n\t\t\tnextRow++\n\t\t}\n\n\t\treturn nextRow\n\t}\n\n\tinnerJoin := func() ([]int, []int) {\n\t\tmergeData = getMergeData()\n\n\t\tfor leftRow < maxLeftRows && rightRow < maxRightRows {\n\t\t\tif isEqual(leftRow, rightRow) {\n\t\t\t\t// leftValue == rightValue\n\t\t\t\tnextRow := getNextRow(isEqual)\n\n\t\t\t\tswitch compType {\n\t\t\t\tcase GT:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase GEQ:\n\t\t\t\t\tfor i := 0; i < nextRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LT:\n\t\t\t\t\tfor i := nextRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LEQ:\n\t\t\t\t\tfor i := rightRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase EQ:\n\t\t\t\t\tfor i := rightRow; i < nextRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase NEQ:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\t\tfor i := nextRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tleftRow++\n\t\t\t} else if isLesser(leftRow, rightRow) {\n\t\t\t\t// leftValue < rightValue\n\t\t\t\tswitch compType {\n\t\t\t\tcase GT, GEQ:\n\t\t\t\t\tfor i := 0; i < rightRow; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase LT, LEQ:\n\t\t\t\t\tfor i := rightRow; i < maxRightRows; i++ {\n\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t}\n\t\t\t\tcase NEQ:\n\t\t\t\t\tfor i := 0; i < maxRightRows; i++ {\n\t\t\t\t\t\tif !isEqual(leftRow, i) {\n\t\t\t\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\t\t\t\t\t\t\trightIndices = append(rightIndices, i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tleftRow++\n\t\t\t} else {\n\t\t\t\t// leftValue > rightValue\n\t\t\t\tnextRow := getNextRow(func(l, r int) bool { return !isEqual(l, r) && !isLesser(l, r) })\n\n\t\t\t\tswitch compType {\n\t\t\t\tcase NEQ:\n\t\t\t\t\t// do something?\n\t\t\t\t}\n\n\t\t\t\trightRow = nextRow\n\t\t\t}\n\t\t}\n\n\t\treturn leftIndices, rightIndices\n\t}\n\n\tsemiJoin := func() {\n\t\tmergeData = getMergeData()\n\n\t\tif compType != EQ {\n\t\t\tpanic(\"semi join only supports equality comparison\")\n\t\t}\n\n\t\tfor leftRow < maxLeftRows && rightRow < maxRightRows {\n\t\t\tif isEqual(leftRow, rightRow) {\n\t\t\t\tleftIndices = append(leftIndices, leftRow)\n\n\t\t\t\tleftRow++\n\t\t\t} else if isLesser(leftRow, rightRow) {\n\t\t\t\tleftRow++\n\t\t\t} else {\n\t\t\t\tnextRow := getNextRow(func(l, r int) bool { return !isEqual(l, r) && !isLesser(l, r) })\n\t\t\t\trightRow = nextRow\n\t\t\t}\n\t\t}\n\t}\n\n\tcopyColumn := func(source *Column, dest *Column, indices []int) {\n\t\tfor _, row := range indices {\n\t\t\tvalue, _ := source.GetRow(row)\n\t\t\tdest.AddRow(source.Signature.Type, value)\n\t\t}\n\t}\n\n\tcopyLeftValues := func(indices []int) {\n\t\tfor colIndex := range left.Columns {\n\t\t\tcopyColumn(&left.Columns[colIndex], &output.Columns[colIndex], indices)\n\t\t}\n\t}\n\n\tcopyRightValues := func(indices []int) {\n\t\tnumLeftCols := len(left.Columns)\n\t\tfor colIndex := range right.Columns {\n\t\t\tcopyColumn(&right.Columns[colIndex], &output.Columns[numLeftCols+colIndex], indices)\n\t\t}\n\t}\n\n\tswitch joinType {\n\tcase INNER:\n\t\toutput.Name = r.Name + \" x \" + rightRelation.(Relation).Name\n\t\taddOutputCols(&left, r.Name, false)\n\t\taddOutputCols(&right, rightRelation.(Relation).Name, false)\n\t\tinnerJoin()\n\t\tcopyLeftValues(leftIndices)\n\t\tcopyRightValues(rightIndices)\n\t\tbreak\n\tcase SEMI:\n\t\toutput.Name = r.Name + \" (x \" + rightRelation.(Relation).Name + \")\"\n\t\taddOutputCols(&left, r.Name, false)\n\t\tsemiJoin()\n\t\tcopyLeftValues(leftIndices)\n\t\tbreak\n\tcase LEFTOUTER:\n\t\t// handle null values on left\n\t\tpanic(\"NULL values not implemented\")\n\tcase RIGHTOUTER:\n\t\t// handle null values on right\n\t\tpanic(\"NULL values not implemented\")\n\tdefault:\n\t\tpanic(\"unknown JoinType\")\n\t}\n\n\treturn output\n}",
"func (p *PhysicalPlan) AddJoinStage(\n\tnodes []roachpb.NodeID,\n\tcore execinfrapb.ProcessorCoreUnion,\n\tpost execinfrapb.PostProcessSpec,\n\tleftEqCols, rightEqCols []uint32,\n\tleftTypes, rightTypes []*types.T,\n\tleftMergeOrd, rightMergeOrd execinfrapb.Ordering,\n\tleftRouters, rightRouters []ProcessorIdx,\n\tresultTypes []*types.T,\n) {\n\tpIdxStart := ProcessorIdx(len(p.Processors))\n\tstageID := p.NewStageOnNodes(nodes)\n\n\tfor _, n := range nodes {\n\t\tinputs := make([]execinfrapb.InputSyncSpec, 0, 2)\n\t\tinputs = append(inputs, execinfrapb.InputSyncSpec{ColumnTypes: leftTypes})\n\t\tinputs = append(inputs, execinfrapb.InputSyncSpec{ColumnTypes: rightTypes})\n\n\t\tproc := Processor{\n\t\t\tNode: n,\n\t\t\tSpec: execinfrapb.ProcessorSpec{\n\t\t\t\tInput: inputs,\n\t\t\t\tCore: core,\n\t\t\t\tPost: post,\n\t\t\t\tOutput: []execinfrapb.OutputRouterSpec{{Type: execinfrapb.OutputRouterSpec_PASS_THROUGH}},\n\t\t\t\tStageID: stageID,\n\t\t\t\tResultTypes: resultTypes,\n\t\t\t},\n\t\t}\n\t\tp.Processors = append(p.Processors, proc)\n\t}\n\n\tif len(nodes) > 1 {\n\t\t// Parallel hash or merge join: we distribute rows (by hash of\n\t\t// equality columns) to len(nodes) join processors.\n\n\t\t// Set up the left routers.\n\t\tfor _, resultProc := range leftRouters {\n\t\t\tp.Processors[resultProc].Spec.Output[0] = execinfrapb.OutputRouterSpec{\n\t\t\t\tType: execinfrapb.OutputRouterSpec_BY_HASH,\n\t\t\t\tHashColumns: leftEqCols,\n\t\t\t}\n\t\t}\n\t\t// Set up the right routers.\n\t\tfor _, resultProc := range rightRouters {\n\t\t\tp.Processors[resultProc].Spec.Output[0] = execinfrapb.OutputRouterSpec{\n\t\t\t\tType: execinfrapb.OutputRouterSpec_BY_HASH,\n\t\t\t\tHashColumns: rightEqCols,\n\t\t\t}\n\t\t}\n\t}\n\tp.ResultRouters = p.ResultRouters[:0]\n\n\t// Connect the left and right routers to the output joiners. Each joiner\n\t// corresponds to a hash bucket.\n\tfor bucket := 0; bucket < len(nodes); bucket++ {\n\t\tpIdx := pIdxStart + ProcessorIdx(bucket)\n\n\t\t// Connect left routers to the processor's first input. Currently the join\n\t\t// node doesn't care about the orderings of the left and right results.\n\t\tp.MergeResultStreams(leftRouters, bucket, leftMergeOrd, pIdx, 0, false /* forceSerialization */)\n\t\t// Connect right routers to the processor's second input if it has one.\n\t\tp.MergeResultStreams(rightRouters, bucket, rightMergeOrd, pIdx, 1, false /* forceSerialization */)\n\n\t\tp.ResultRouters = append(p.ResultRouters, pIdx)\n\t}\n}",
"func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*mppTask, *mppTask) {\n\tlp := lTask.p\n\tif _, ok := lp.(*PhysicalExchangeReceiver); ok {\n\t\tlp = lp.Children()[0].Children()[0]\n\t}\n\trp := rTask.p\n\tif _, ok := rp.(*PhysicalExchangeReceiver); ok {\n\t\trp = rp.Children()[0].Children()[0]\n\t}\n\t// to mark if any partition key needs to convert\n\tlMask := make([]bool, len(lTask.hashCols))\n\trMask := make([]bool, len(rTask.hashCols))\n\tcTypes := make([]*types.FieldType, len(lTask.hashCols))\n\tlChanged := false\n\trChanged := false\n\tfor i := range lTask.hashCols {\n\t\tlKey := lTask.hashCols[i]\n\t\trKey := rTask.hashCols[i]\n\t\tcType, lConvert, rConvert := negotiateCommonType(lKey.Col.RetType, rKey.Col.RetType)\n\t\tif lConvert {\n\t\t\tlMask[i] = true\n\t\t\tcTypes[i] = cType\n\t\t\tlChanged = true\n\t\t}\n\t\tif rConvert {\n\t\t\trMask[i] = true\n\t\t\tcTypes[i] = cType\n\t\t\trChanged = true\n\t\t}\n\t}\n\tif !lChanged && !rChanged {\n\t\treturn lTask, rTask\n\t}\n\tvar lProj, rProj *PhysicalProjection\n\tif lChanged {\n\t\tlProj = getProj(p.SCtx(), lp)\n\t\tlp = lProj\n\t}\n\tif rChanged {\n\t\trProj = getProj(p.SCtx(), rp)\n\t\trp = rProj\n\t}\n\n\tlPartKeys := make([]*property.MPPPartitionColumn, 0, len(rTask.hashCols))\n\trPartKeys := make([]*property.MPPPartitionColumn, 0, len(lTask.hashCols))\n\tfor i := range lTask.hashCols {\n\t\tlKey := lTask.hashCols[i]\n\t\trKey := rTask.hashCols[i]\n\t\tif lMask[i] {\n\t\t\tcType := cTypes[i].Clone()\n\t\t\tcType.SetFlag(lKey.Col.RetType.GetFlag())\n\t\t\tlCast := expression.BuildCastFunction(p.SCtx(), lKey.Col, cType)\n\t\t\tlKey = &property.MPPPartitionColumn{Col: appendExpr(lProj, lCast), CollateID: lKey.CollateID}\n\t\t}\n\t\tif rMask[i] {\n\t\t\tcType := cTypes[i].Clone()\n\t\t\tcType.SetFlag(rKey.Col.RetType.GetFlag())\n\t\t\trCast := expression.BuildCastFunction(p.SCtx(), rKey.Col, cType)\n\t\t\trKey = &property.MPPPartitionColumn{Col: appendExpr(rProj, rCast), CollateID: rKey.CollateID}\n\t\t}\n\t\tlPartKeys = append(lPartKeys, lKey)\n\t\trPartKeys = append(rPartKeys, rKey)\n\t}\n\t// if left or right child changes, we need to add enforcer.\n\tif lChanged {\n\t\tnlTask := lTask.copy().(*mppTask)\n\t\tnlTask.p = lProj\n\t\tnlTask = nlTask.enforceExchanger(&property.PhysicalProperty{\n\t\t\tTaskTp: property.MppTaskType,\n\t\t\tMPPPartitionTp: property.HashType,\n\t\t\tMPPPartitionCols: lPartKeys,\n\t\t})\n\t\tlTask = nlTask\n\t}\n\tif rChanged {\n\t\tnrTask := rTask.copy().(*mppTask)\n\t\tnrTask.p = rProj\n\t\tnrTask = nrTask.enforceExchanger(&property.PhysicalProperty{\n\t\t\tTaskTp: property.MppTaskType,\n\t\t\tMPPPartitionTp: property.HashType,\n\t\t\tMPPPartitionCols: rPartKeys,\n\t\t})\n\t\trTask = nrTask\n\t}\n\treturn lTask, rTask\n}",
"func newHashJoin(op *plandef.HashJoin, inputNodes []queryOperator) operator {\n\tif len(inputNodes) != 2 {\n\t\tpanic(fmt.Sprintf(\"hashJoin operation with unexpected inputs: %v\", len(inputNodes)))\n\t}\n\tpanicOnInvalidSpecificity(op.Specificity)\n\tcolumns, joiner := joinedColumns(inputNodes[0].columns(), inputNodes[1].columns())\n\n\treturn &bulkWrapper{singleRowOp: &hashJoin{\n\t\tdef: op,\n\t\tleft: inputNodes[0],\n\t\tright: inputNodes[1],\n\t\toutput: columns,\n\t\tjoiner: joiner,\n\t}}\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func constructPropertyByJoin(join *LogicalJoin) ([][]*requiredProperty, []int, error) {\n\tvar result [][]*requiredProperty\n\tvar condIndex []int\n\n\tif join.EqualConditions == nil {\n\t\treturn nil, nil, nil\n\t}\n\tfor i, cond := range join.EqualConditions {\n\t\tif len(cond.GetArgs()) != 2 {\n\t\t\treturn nil, nil, errors.New(\"unexpected argument count for equal expression\")\n\t\t}\n\t\tlExpr, rExpr := cond.GetArgs()[0], cond.GetArgs()[1]\n\t\t// Only consider raw column reference and cowardly ignore calculations\n\t\t// since we don't know if the function call preserve order\n\t\tlColumn, lOK := lExpr.(*expression.Column)\n\t\trColumn, rOK := rExpr.(*expression.Column)\n\t\tif lOK && rOK && compareTypeForOrder(lColumn.RetType, rColumn.RetType) {\n\t\t\tresult = append(result, []*requiredProperty{generateJoinProp(lColumn), generateJoinProp(rColumn)})\n\t\t\tcondIndex = append(condIndex, i)\n\t\t} else {\n\t\t\tcontinue\n\t\t}\n\t}\n\tif len(result) == 0 {\n\t\treturn nil, nil, nil\n\t}\n\treturn result, condIndex, nil\n}",
"func transformJoinApply(ctx *sql.Context, a *Analyzer, n sql.Node, scope *plan.Scope, sel RuleSelector) (sql.Node, transform.TreeIdentity, error) {\n\tswitch n.(type) {\n\tcase *plan.DeleteFrom, *plan.InsertInto:\n\t\treturn n, transform.SameTree, nil\n\t}\n\tvar applyId int\n\n\tret := n\n\tvar err error\n\tsame := transform.NewTree\n\tfor !same {\n\t\t// simplifySubqExpr can merge two scopes, requiring us to either\n\t\t// recurse on the merged scope or perform a fixed-point iteration.\n\t\tret, same, err = transform.Node(ret, func(n sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\t\tvar filters []sql.Expression\n\t\t\tvar child sql.Node\n\t\t\tswitch n := n.(type) {\n\t\t\tcase *plan.Filter:\n\t\t\t\tchild = n.Child\n\t\t\t\tfilters = expression.SplitConjunction(n.Expression)\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tif sel == nil {\n\t\t\t\treturn n, transform.SameTree, nil\n\t\t\t}\n\n\t\t\tsubScope := scope.NewScopeFromSubqueryExpression(n)\n\t\t\tvar matches []applyJoin\n\t\t\tvar newFilters []sql.Expression\n\n\t\t\t// separate decorrelation candidates\n\t\t\tfor _, e := range filters {\n\t\t\t\tif !plan.IsNullRejecting(e) {\n\t\t\t\t\t// TODO: rewrite dual table to permit in-scope joins,\n\t\t\t\t\t// which aren't possible when values are projected\n\t\t\t\t\t// above join filter\n\t\t\t\t\trt := getResolvedTable(n)\n\t\t\t\t\tif rt == nil || plan.IsDualTable(rt.Table) {\n\t\t\t\t\t\tnewFilters = append(newFilters, e)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcandE := e\n\t\t\t\top := plan.JoinTypeSemi\n\t\t\t\tif n, ok := e.(*expression.Not); ok {\n\t\t\t\t\tcandE = n.Child\n\t\t\t\t\top = plan.JoinTypeAnti\n\t\t\t\t}\n\n\t\t\t\tvar sq *plan.Subquery\n\t\t\t\tvar l sql.Expression\n\t\t\t\tvar joinF sql.Expression\n\t\t\t\tvar max1 bool\n\t\t\t\tswitch e := candE.(type) {\n\t\t\t\tcase *plan.InSubquery:\n\t\t\t\t\tsq, _ = e.Right.(*plan.Subquery)\n\t\t\t\t\tl = e.Left\n\n\t\t\t\t\tjoinF = expression.NewEquals(nil, nil)\n\t\t\t\tcase expression.Comparer:\n\t\t\t\t\tsq, _ = e.Right().(*plan.Subquery)\n\t\t\t\t\tl = e.Left()\n\t\t\t\t\tjoinF = e\n\t\t\t\t\tmax1 = true\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tif sq != nil {\n\t\t\t\t\tsq.Query, _, err = fixidx.FixFieldIndexesForNode(ctx, a.LogFn(), scope.NewScopeFromSubqueryExpression(n), sq.Query)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif sq != nil && nodeIsCacheable(sq.Query, len(subScope.Schema())) {\n\t\t\t\t\tmatches = append(matches, applyJoin{l: l, r: sq, op: op, filter: joinF, max1: max1})\n\t\t\t\t} else {\n\t\t\t\t\tnewFilters = append(newFilters, e)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(matches) == 0 {\n\t\t\t\treturn n, transform.SameTree, nil\n\t\t\t}\n\n\t\t\tret := child\n\t\t\tfor _, m := range matches {\n\t\t\t\t// A successful candidate is built with:\n\t\t\t\t// (1) Semi or anti join between the outer scope and (2) conditioned on (3).\n\t\t\t\t// (2) Simplified or unnested subquery (table alias).\n\t\t\t\t// (3) Join condition synthesized from the original correlated expression\n\t\t\t\t// normalized to match changes to (2).\n\t\t\t\tsubq := m.r\n\n\t\t\t\tname := fmt.Sprintf(\"scalarSubq%d\", applyId)\n\t\t\t\tapplyId++\n\n\t\t\t\tsch := subq.Query.Schema()\n\t\t\t\tvar rightF sql.Expression\n\t\t\t\tif len(sch) == 1 {\n\t\t\t\t\tsubqCol := subq.Query.Schema()[0]\n\t\t\t\t\trightF = expression.NewGetFieldWithTable(len(scope.Schema()), subqCol.Type, name, subqCol.Name, subqCol.Nullable)\n\t\t\t\t} else {\n\t\t\t\t\ttup := make(expression.Tuple, len(sch))\n\t\t\t\t\tfor i, c := range sch {\n\t\t\t\t\t\ttup[i] = expression.NewGetFieldWithTable(len(scope.Schema())+i, c.Type, name, c.Name, c.Nullable)\n\t\t\t\t\t}\n\t\t\t\t\trightF = tup\n\t\t\t\t}\n\n\t\t\t\tq, _, err := fixidx.FixFieldIndexesForNode(ctx, a.LogFn(), scope, subq.Query)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t\t}\n\n\t\t\t\tvar newSubq sql.Node = plan.NewSubqueryAlias(name, subq.QueryString, q)\n\t\t\t\tnewSubq, err = simplifySubqExpr(newSubq)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, transform.SameTree, err\n\t\t\t\t}\n\t\t\t\tif m.max1 {\n\t\t\t\t\tnewSubq = plan.NewMax1Row(newSubq, name)\n\t\t\t\t}\n\n\t\t\t\tcondSch := append(ret.Schema(), newSubq.Schema()...)\n\t\t\t\tfilter, err := m.filter.WithChildren(m.l, rightF)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, transform.SameTree, err\n\t\t\t\t}\n\t\t\t\tfilter, _, err = fixidx.FixFieldIndexes(scope, a.LogFn(), condSch, filter)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, transform.SameTree, err\n\t\t\t\t}\n\t\t\t\tvar comment string\n\t\t\t\tif c, ok := ret.(sql.CommentedNode); ok {\n\t\t\t\t\tcomment = c.Comment()\n\t\t\t\t}\n\t\t\t\tnewJoin := plan.NewJoin(ret, newSubq, m.op, filter)\n\t\t\t\tret = newJoin.WithComment(comment)\n\t\t\t}\n\n\t\t\tif len(newFilters) == 0 {\n\t\t\t\treturn ret, transform.NewTree, nil\n\t\t\t}\n\t\t\treturn plan.NewFilter(expression.JoinAnd(newFilters...), ret), transform.NewTree, nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn n, transform.SameTree, err\n\t\t}\n\t}\n\treturn ret, transform.TreeIdentity(applyId == 0), nil\n}",
"func (p *BaseRouter) join(challenge []byte, hash *common.Address, joinKey *ecdsa.PrivateKey, peer *PttPeer) error {\n\tjoin := &Join{\n\t\tHash: hash[:],\n\t\tChallenge: challenge,\n\t}\n\n\tdata, err := json.Marshal(join)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeyInfo := joinKeyToKeyInfo(joinKey)\n\n\tencData, err := p.EncryptData(JoinMsg, data, keyInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpttData, err := p.MarshalData(CodeTypeJoin, hash, encData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpttData.Node = peer.GetID()[:]\n\tlog.Debug(\"join: to SendData\")\n\terr = peer.SendData(pttData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanStream converts the logical aggregation to the stream aggregation physicalPlanInfo.
|
func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {
for _, aggFunc := range p.AggFuncs {
if aggFunc.GetMode() == expression.FinalMode {
return &physicalPlanInfo{cost: math.MaxFloat64}, nil
}
}
agg := PhysicalAggregation{
AggType: StreamedAgg,
AggFuncs: p.AggFuncs,
GroupByItems: p.GroupByItems,
}.init(p.allocator, p.ctx)
agg.HasGby = len(p.GroupByItems) > 0
agg.SetSchema(p.schema)
// TODO: Consider distinct key.
info := &physicalPlanInfo{cost: math.MaxFloat64}
gbyCols := p.groupByCols
if len(gbyCols) != len(p.GroupByItems) {
// group by a + b is not interested in any order.
return info, nil
}
isSortKey := make([]bool, len(gbyCols))
newProp := &requiredProperty{
props: make([]*columnProp, 0, len(gbyCols)),
}
for _, pro := range prop.props {
idx := p.getGbyColIndex(pro.col)
if idx == -1 {
return info, nil
}
isSortKey[idx] = true
// We should add columns in aggregation in order to keep index right.
newProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})
}
newProp.sortKeyLen = len(newProp.props)
for i, col := range gbyCols {
if !isSortKey[i] {
newProp.props = append(newProp.props, &columnProp{col: col})
}
}
childInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)
if err != nil {
return nil, errors.Trace(err)
}
info = addPlanToResponse(agg, childInfo)
info.cost += info.count * cpuFactor
info.count = info.count * aggFactor
return info, nil
}
|
[
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) {\n\tqe.mu.RLock()\n\tdefer qe.mu.RUnlock()\n\tsplan, err := planbuilder.BuildStreaming(sql, qe.tables)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplan := &TabletPlan{Plan: splan, Original: sql}\n\tplan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String())\n\tplan.buildAuthorized()\n\treturn plan, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func createStream(req *encoder.CreateStreamRequest, brokerAddr string) (*postgres.Stream, error) {\n\toperations := []*postgres.Operation{}\n\n\tfor _, o := range req.Operations {\n\t\toperation, err := createOperation(o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\toperations = append(operations, operation)\n\t}\n\n\treturn &postgres.Stream{\n\t\tCommunityID: req.CommunityId,\n\t\tPublicKey: req.RecipientPublicKey,\n\t\tOperations: operations,\n\t\tDevice: &postgres.Device{\n\t\t\tDeviceToken: req.DeviceToken,\n\t\t\tLabel: req.DeviceLabel,\n\t\t\tLongitude: req.Location.Longitude,\n\t\t\tLatitude: req.Location.Latitude,\n\t\t\tExposure: strings.ToLower(req.Exposure.String()),\n\t\t},\n\t}, nil\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (qe *QueryEngine) GetMessageStreamPlan(name string) (*TabletPlan, error) {\n\tqe.mu.RLock()\n\tdefer qe.mu.RUnlock()\n\tsplan, err := planbuilder.BuildMessageStreaming(name, qe.tables)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplan := &TabletPlan{Plan: splan}\n\tplan.Rules = qe.queryRuleSources.FilterByPlan(\"stream from \"+name, plan.PlanID, plan.TableName().String())\n\tplan.buildAuthorized()\n\treturn plan, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (c *ApiService) StreamVideoRoomSummary(params *ListVideoRoomSummaryParams) (chan InsightsV1VideoRoomSummary, chan error) {\n\tif params == nil {\n\t\tparams = &ListVideoRoomSummaryParams{}\n\t}\n\tparams.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))\n\n\trecordChannel := make(chan InsightsV1VideoRoomSummary, 1)\n\terrorChannel := make(chan error, 1)\n\n\tresponse, err := c.PageVideoRoomSummary(params, \"\", \"\")\n\tif err != nil {\n\t\terrorChannel <- err\n\t\tclose(recordChannel)\n\t\tclose(errorChannel)\n\t} else {\n\t\tgo c.streamVideoRoomSummary(response, params, recordChannel, errorChannel)\n\t}\n\n\treturn recordChannel, errorChannel\n}",
"func (m *Metadata) toBoltMetadata() *bolt.ComputationMetadata {\n\tvar inputs []*bolt.StreamMetadata\n\n\tfor _, input := range m.Inputs {\n\t\tmd := bolt.NewStreamMetadata()\n\t\tmd.Name = input.Name\n\t\tmd.Grouping = groupingMap[input.Grouping]\n\t\tinputs = append(inputs, md)\n\t}\n\n\treturn &bolt.ComputationMetadata{\n\t\tName: m.Name,\n\t\tIstreams: inputs,\n\t\tOstreams: m.Outputs,\n\t}\n}",
"func (mgr *singleTypeChannelsMgr) createMsgStream(collectionID UniqueID) (msgstream.MsgStream, error) {\n\tmgr.mu.RLock()\n\tinfos, ok := mgr.infos[collectionID]\n\tif ok && infos.stream != nil {\n\t\t// already exist.\n\t\tmgr.mu.RUnlock()\n\t\treturn infos.stream, nil\n\t}\n\tmgr.mu.RUnlock()\n\n\tchannelInfos, err := mgr.getChannelsFunc(collectionID)\n\tif err != nil {\n\t\t// What if stream created by other goroutines?\n\t\tlog.Error(\"failed to get channels\", zap.Error(err), zap.Int64(\"collection\", collectionID))\n\t\treturn nil, err\n\t}\n\n\tstream, err := createStream(mgr.msgStreamFactory, channelInfos.pchans, mgr.repackFunc)\n\tif err != nil {\n\t\t// What if stream created by other goroutines?\n\t\tlog.Error(\"failed to create message stream\", zap.Error(err), zap.Int64(\"collection\", collectionID))\n\t\treturn nil, err\n\t}\n\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tif !mgr.streamExistPrivate(collectionID) {\n\t\tlog.Info(\"create message stream\", zap.Int64(\"collection\", collectionID),\n\t\t\tzap.Strings(\"virtual_channels\", channelInfos.vchans),\n\t\t\tzap.Strings(\"physical_channels\", channelInfos.pchans))\n\t\tmgr.infos[collectionID] = streamInfos{channelInfos: channelInfos, stream: stream}\n\t\tincPChansMetrics(channelInfos.pchans)\n\t}\n\n\treturn mgr.infos[collectionID].stream, nil\n}",
"func DummyStream() *graylog.Stream {\n\treturn &graylog.Stream{\n\t\tID: \"000000000000000000000001\",\n\t\tCreatorUserID: \"local:admin\",\n\t\tOutputs: []graylog.Output{},\n\t\tMatchingType: \"AND\",\n\t\tDescription: \"Stream containing all messages\",\n\t\tCreatedAt: \"2018-02-20T11:37:19.371Z\",\n\t\tRules: []graylog.StreamRule{},\n\t\tAlertConditions: []graylog.AlertCondition{},\n\t\tAlertReceivers: &graylog.AlertReceivers{\n\t\t\tEmails: []string{},\n\t\t\tUsers: []string{},\n\t\t},\n\t\tTitle: \"All messages\",\n\t\tIndexSetID: \"5a8c086fc006c600013ca6f5\",\n\t\t// \"content_pack\": null,\n\t}\n}",
"func (p *PhysicalPlan) EnsureSingleStreamPerNode(\n\tforceSerialization bool, post execinfrapb.PostProcessSpec,\n) {\n\t// Fast path - check if we need to do anything.\n\tvar nodes util.FastIntSet\n\tvar foundDuplicates bool\n\tfor _, pIdx := range p.ResultRouters {\n\t\tproc := &p.Processors[pIdx]\n\t\tif nodes.Contains(int(proc.Node)) {\n\t\t\tfoundDuplicates = true\n\t\t\tbreak\n\t\t}\n\t\tnodes.Add(int(proc.Node))\n\t}\n\tif !foundDuplicates {\n\t\treturn\n\t}\n\tstreams := make([]ProcessorIdx, 0, 2)\n\n\tfor i := 0; i < len(p.ResultRouters); i++ {\n\t\tpIdx := p.ResultRouters[i]\n\t\tnode := p.Processors[p.ResultRouters[i]].Node\n\t\tstreams = append(streams[:0], pIdx)\n\t\t// Find all streams on the same node.\n\t\tfor j := i + 1; j < len(p.ResultRouters); {\n\t\t\tif p.Processors[p.ResultRouters[j]].Node == node {\n\t\t\t\tstreams = append(streams, p.ResultRouters[j])\n\t\t\t\t// Remove the stream.\n\t\t\t\tcopy(p.ResultRouters[j:], p.ResultRouters[j+1:])\n\t\t\t\tp.ResultRouters = p.ResultRouters[:len(p.ResultRouters)-1]\n\t\t\t} else {\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t\tif len(streams) == 1 {\n\t\t\t// Nothing to do for this node.\n\t\t\tcontinue\n\t\t}\n\n\t\t// Merge the streams into a no-op processor.\n\t\tproc := Processor{\n\t\t\tNode: node,\n\t\t\tSpec: execinfrapb.ProcessorSpec{\n\t\t\t\tInput: []execinfrapb.InputSyncSpec{{\n\t\t\t\t\t// The other fields will be filled in by MergeResultStreams.\n\t\t\t\t\tColumnTypes: p.GetResultTypes(),\n\t\t\t\t}},\n\t\t\t\tPost: post,\n\t\t\t\tCore: execinfrapb.ProcessorCoreUnion{Noop: &execinfrapb.NoopCoreSpec{}},\n\t\t\t\tOutput: []execinfrapb.OutputRouterSpec{{Type: execinfrapb.OutputRouterSpec_PASS_THROUGH}},\n\t\t\t\tResultTypes: p.GetResultTypes(),\n\t\t\t},\n\t\t}\n\t\tmergedProcIdx := p.AddProcessor(proc)\n\t\tp.MergeResultStreams(streams, 0 /* sourceRouterSlot */, p.MergeOrdering, mergedProcIdx, 0 /* destInput */, forceSerialization)\n\t\tp.ResultRouters[i] = mergedProcIdx\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanFinalHash converts the logical aggregation to the final hash aggregation physicalPlanInfo.
|
func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {
agg := PhysicalAggregation{
AggType: FinalAgg,
AggFuncs: p.AggFuncs,
GroupByItems: p.GroupByItems,
}.init(p.allocator, p.ctx)
agg.SetSchema(p.schema)
agg.HasGby = len(p.GroupByItems) > 0
schema := x.addAggregation(p.ctx, agg)
if schema.Len() == 0 {
return nil
}
x.(PhysicalPlan).SetSchema(schema)
info := addPlanToResponse(agg, childInfo)
info.count = info.count * aggFactor
// if we build the final aggregation, it must be the best plan.
info.cost = 0
return info
}
|
[
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func ConvertToHash(digestAlgorithm asn1.ObjectIdentifier) (crypto.Hash, bool) {\r\n\tvar hash crypto.Hash\r\n\tswitch {\r\n\tcase OIDDigestAlgorithmSHA1.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA1\r\n\tcase OIDDigestAlgorithmSHA256.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA256\r\n\tcase OIDDigestAlgorithmSHA384.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA384\r\n\tcase OIDDigestAlgorithmSHA512.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA512\r\n\tdefault:\r\n\t\treturn hash, false\r\n\t}\r\n\treturn hash, hash.Available()\r\n}",
"func (dag *BlockDAG) LastFinalityPointHash() *daghash.Hash {\n\tif dag.lastFinalityPoint == nil {\n\t\treturn nil\n\t}\n\treturn dag.lastFinalityPoint.hash\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func generateHash(currentMemberships []*models.ProjectMemberEntity, desiredMemberships []*goharborv1.HarborProjectMember) (string, error) {\n\ttype membershipComp struct {\n\t\tCurrentMemberships []*models.ProjectMemberEntity\n\t\tDesiredMemberships []*goharborv1.HarborProjectMember\n\t}\n\n\tmembershipByteArr, err := json.Marshal(membershipComp{CurrentMemberships: currentMemberships, DesiredMemberships: desiredMemberships})\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"error marshaling memberships for comparison\")\n\n\t\treturn \"\", err\n\t}\n\n\tcurrentHashArr := sha256.Sum256(membershipByteArr)\n\n\treturn hex.EncodeToString(currentHashArr[:]), nil\n}",
"func (o ArtifactOutput) Hash() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Artifact) pulumi.StringOutput { return v.Hash }).(pulumi.StringOutput)\n}",
"func ComputeHash(cmap cmap.ConcurrentMap) (md5Hash [16]byte, items map[string]interface{}) {\n\tvar b bytes.Buffer\n\titems = cmap.Items()\n\n\tfor k, v := range items {\n\t\tfmt.Fprintf(&b, \"%s=%+v\", k, v)\n\t}\n\tmd5Hash = md5.Sum(b.Bytes())\n\n\treturn\n}",
"func PerceptionHashExtend(img image.Image, hashSize int) (string, error) {\n\tif img == nil {\n\t\treturn \"\", errors.New(\"Image object can not be nil.\")\n\t}\n\thighfreqFactor := 4\n\timgSize := hashSize * highfreqFactor\n\n\tresized := resize.Resize(uint(imgSize), uint(imgSize), img, resize.Bilinear)\n\tpixels := transforms.Rgb2Gray(resized)\n\tdct := transforms.DCT2D(pixels, imgSize, imgSize)\n\tflattens := transforms.FlattenPixels(dct, hashSize, hashSize)\n\t// calculate median\n\tmedian := etcs.MedianOfPixels(flattens)\n\n\tlenOfByte := 8\n\tlenOfPhash := hashSize * hashSize\n\tphash := make([]byte, lenOfPhash/lenOfByte)\n\tfor idx, p := range flattens {\n\t\tindexOfByteArray := (lenOfPhash - 1 - idx) / lenOfByte\n\t\tindexOfBit := idx % lenOfByte\n\t\tif p > median {\n\t\t\tphash[indexOfByteArray] |= 1 << uint(indexOfBit)\n\t\t}\n\t}\n\treturn hex.EncodeToString(phash), nil\n}",
"func (batch *BatchPPD) calculateEntryHash() string {\n\thash := 0\n\tfor _, entry := range batch.entries {\n\t\thash = hash + entry.RDFIIdentification\n\t}\n\treturn batch.numericField(hash, 10)\n}",
"func deriveHash(object struct {\n\tParam0 string\n\tParam1 *int\n}) uint64 {\n\th := uint64(17)\n\th = 31*h + deriveHash_(object.Param0)\n\th = 31*h + deriveHash_1(object.Param1)\n\treturn h\n}",
"func CalculateHash(block Block) string { // TODO: simply hash concatenated fields\n\tblock.Hash = \"\"\n\tblock.Tx.Signature = \"\"\n\treturn Shasum([]byte(ToJSON(block)))\n}",
"func BuildFinalModeAggregation(\n\tsctx sessionctx.Context, original *AggInfo, partialIsCop bool, isMPPTask bool) (partial, final *AggInfo, firstRowFuncMap map[*aggregation.AggFuncDesc]*aggregation.AggFuncDesc) {\n\tfirstRowFuncMap = make(map[*aggregation.AggFuncDesc]*aggregation.AggFuncDesc, len(original.AggFuncs))\n\tpartial = &AggInfo{\n\t\tAggFuncs: make([]*aggregation.AggFuncDesc, 0, len(original.AggFuncs)),\n\t\tGroupByItems: original.GroupByItems,\n\t\tSchema: expression.NewSchema(),\n\t}\n\tpartialCursor := 0\n\tfinal = &AggInfo{\n\t\tAggFuncs: make([]*aggregation.AggFuncDesc, len(original.AggFuncs)),\n\t\tGroupByItems: make([]expression.Expression, 0, len(original.GroupByItems)),\n\t\tSchema: original.Schema,\n\t}\n\n\tpartialGbySchema := expression.NewSchema()\n\t// add group by columns\n\tfor _, gbyExpr := range partial.GroupByItems {\n\t\tvar gbyCol *expression.Column\n\t\tif col, ok := gbyExpr.(*expression.Column); ok {\n\t\t\tgbyCol = col\n\t\t} else {\n\t\t\tgbyCol = &expression.Column{\n\t\t\t\tUniqueID: sctx.GetSessionVars().AllocPlanColumnID(),\n\t\t\t\tRetType: gbyExpr.GetType(),\n\t\t\t}\n\t\t}\n\t\tpartialGbySchema.Append(gbyCol)\n\t\tfinal.GroupByItems = append(final.GroupByItems, gbyCol)\n\t}\n\n\t// TODO: Refactor the way of constructing aggregation functions.\n\t// This for loop is ugly, but I do not find a proper way to reconstruct\n\t// it right away.\n\n\t// group_concat is special when pushing down, it cannot take the two phase execution if no distinct but with orderBy, and other cases are also different:\n\t// for example: group_concat([distinct] expr0, expr1[, order by expr2] separator ‘,’)\n\t// no distinct, no orderBy: can two phase\n\t// \t\t[final agg] group_concat(col#1,’,’)\n\t// \t\t[part agg] group_concat(expr0, expr1,’,’) -> col#1\n\t// no distinct, orderBy: only one phase\n\t// distinct, no orderBy: can two phase\n\t// \t\t[final agg] group_concat(distinct col#0, col#1,’,’)\n\t// \t\t[part agg] group by expr0 ->col#0, expr1 -> col#1\n\t// distinct, orderBy: can two phase\n\t// \t\t[final agg] group_concat(distinct col#0, col#1, order by col#2,’,’)\n\t// \t\t[part agg] group by expr0 ->col#0, expr1 -> col#1; agg function: firstrow(expr2)-> col#2\n\n\tfor i, aggFunc := range original.AggFuncs {\n\t\tfinalAggFunc := &aggregation.AggFuncDesc{HasDistinct: false}\n\t\tfinalAggFunc.Name = aggFunc.Name\n\t\tfinalAggFunc.OrderByItems = aggFunc.OrderByItems\n\t\targs := make([]expression.Expression, 0, len(aggFunc.Args))\n\t\tif aggFunc.HasDistinct {\n\t\t\t/*\n\t\t\t\teg: SELECT COUNT(DISTINCT a), SUM(b) FROM t GROUP BY c\n\n\t\t\t\tchange from\n\t\t\t\t\t[root] group by: c, funcs:count(distinct a), funcs:sum(b)\n\t\t\t\tto\n\t\t\t\t\t[root] group by: c, funcs:count(distinct a), funcs:sum(b)\n\t\t\t\t\t\t[cop]: group by: c, a\n\t\t\t*/\n\t\t\t// onlyAddFirstRow means if the distinctArg does not occur in group by items,\n\t\t\t// it should be replaced with a firstrow() agg function, needed for the order by items of group_concat()\n\t\t\tgetDistinctExpr := func(distinctArg expression.Expression, onlyAddFirstRow bool) (ret expression.Expression) {\n\t\t\t\t// 1. add all args to partial.GroupByItems\n\t\t\t\tfoundInGroupBy := false\n\t\t\t\tfor j, gbyExpr := range partial.GroupByItems {\n\t\t\t\t\tif gbyExpr.Equal(sctx, distinctArg) && gbyExpr.GetType().Equal(distinctArg.GetType()) {\n\t\t\t\t\t\t// if the two expressions exactly the same in terms of data types and collation, then can avoid it.\n\t\t\t\t\t\tfoundInGroupBy = true\n\t\t\t\t\t\tret = partialGbySchema.Columns[j]\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundInGroupBy {\n\t\t\t\t\tvar gbyCol *expression.Column\n\t\t\t\t\tif col, ok := distinctArg.(*expression.Column); ok {\n\t\t\t\t\t\tgbyCol = col\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgbyCol = &expression.Column{\n\t\t\t\t\t\t\tUniqueID: sctx.GetSessionVars().AllocPlanColumnID(),\n\t\t\t\t\t\t\tRetType: distinctArg.GetType(),\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// 2. add group by items if needed\n\t\t\t\t\tif !onlyAddFirstRow {\n\t\t\t\t\t\tpartial.GroupByItems = append(partial.GroupByItems, distinctArg)\n\t\t\t\t\t\tpartialGbySchema.Append(gbyCol)\n\t\t\t\t\t\tret = gbyCol\n\t\t\t\t\t}\n\t\t\t\t\t// 3. add firstrow() if needed\n\t\t\t\t\tif !partialIsCop || onlyAddFirstRow {\n\t\t\t\t\t\t// if partial is a cop task, firstrow function is redundant since group by items are outputted\n\t\t\t\t\t\t// by group by schema, and final functions use group by schema as their arguments.\n\t\t\t\t\t\t// if partial agg is not cop, we must append firstrow function & schema, to output the group by\n\t\t\t\t\t\t// items.\n\t\t\t\t\t\t// maybe we can unify them sometime.\n\t\t\t\t\t\t// only add firstrow for order by items of group_concat()\n\t\t\t\t\t\tfirstRow, err := aggregation.NewAggFuncDesc(sctx, ast.AggFuncFirstRow, []expression.Expression{distinctArg}, false)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tpanic(\"NewAggFuncDesc FirstRow meets error: \" + err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpartial.AggFuncs = append(partial.AggFuncs, firstRow)\n\t\t\t\t\t\tnewCol, _ := gbyCol.Clone().(*expression.Column)\n\t\t\t\t\t\tnewCol.RetType = firstRow.RetTp\n\t\t\t\t\t\tpartial.Schema.Append(newCol)\n\t\t\t\t\t\tif onlyAddFirstRow {\n\t\t\t\t\t\t\tret = newCol\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpartialCursor++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn ret\n\t\t\t}\n\n\t\t\tfor j, distinctArg := range aggFunc.Args {\n\t\t\t\t// the last arg of ast.AggFuncGroupConcat is the separator, so just put it into the final agg\n\t\t\t\tif aggFunc.Name == ast.AggFuncGroupConcat && j+1 == len(aggFunc.Args) {\n\t\t\t\t\targs = append(args, distinctArg)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\targs = append(args, getDistinctExpr(distinctArg, false))\n\t\t\t}\n\n\t\t\tbyItems := make([]*util.ByItems, 0, len(aggFunc.OrderByItems))\n\t\t\tfor _, byItem := range aggFunc.OrderByItems {\n\t\t\t\tbyItems = append(byItems, &util.ByItems{Expr: getDistinctExpr(byItem.Expr, true), Desc: byItem.Desc})\n\t\t\t}\n\n\t\t\tif aggFunc.HasDistinct && isMPPTask && aggFunc.GroupingID > 0 {\n\t\t\t\t// keep the groupingID as it was, otherwise the new split final aggregate's ganna lost its groupingID info.\n\t\t\t\tfinalAggFunc.GroupingID = aggFunc.GroupingID\n\t\t\t}\n\n\t\t\tfinalAggFunc.OrderByItems = byItems\n\t\t\tfinalAggFunc.HasDistinct = aggFunc.HasDistinct\n\t\t\t// In logical optimize phase, the Agg->PartitionUnion->TableReader may become\n\t\t\t// Agg1->PartitionUnion->Agg2->TableReader, and the Agg2 is a partial aggregation.\n\t\t\t// So in the push down here, we need to add a new if-condition check:\n\t\t\t// If the original agg mode is partial already, the finalAggFunc's mode become Partial2.\n\t\t\tif aggFunc.Mode == aggregation.CompleteMode {\n\t\t\t\tfinalAggFunc.Mode = aggregation.CompleteMode\n\t\t\t} else if aggFunc.Mode == aggregation.Partial1Mode || aggFunc.Mode == aggregation.Partial2Mode {\n\t\t\t\tfinalAggFunc.Mode = aggregation.Partial2Mode\n\t\t\t}\n\t\t} else {\n\t\t\tif aggFunc.Name == ast.AggFuncGroupConcat && len(aggFunc.OrderByItems) > 0 {\n\t\t\t\t// group_concat can only run in one phase if it has order by items but without distinct property\n\t\t\t\tpartial = nil\n\t\t\t\tfinal = original\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif aggregation.NeedCount(finalAggFunc.Name) {\n\t\t\t\t// only Avg and Count need count\n\t\t\t\tif isMPPTask && finalAggFunc.Name == ast.AggFuncCount {\n\t\t\t\t\t// For MPP Task, the final count() is changed to sum().\n\t\t\t\t\t// Note: MPP mode does not run avg() directly, instead, avg() -> sum()/(case when count() = 0 then 1 else count() end),\n\t\t\t\t\t// so we do not process it here.\n\t\t\t\t\tfinalAggFunc.Name = ast.AggFuncSum\n\t\t\t\t} else {\n\t\t\t\t\t// avg branch\n\t\t\t\t\tft := types.NewFieldType(mysql.TypeLonglong)\n\t\t\t\t\tft.SetFlen(21)\n\t\t\t\t\tft.SetCharset(charset.CharsetBin)\n\t\t\t\t\tft.SetCollate(charset.CollationBin)\n\t\t\t\t\tpartial.Schema.Append(&expression.Column{\n\t\t\t\t\t\tUniqueID: sctx.GetSessionVars().AllocPlanColumnID(),\n\t\t\t\t\t\tRetType: ft,\n\t\t\t\t\t})\n\t\t\t\t\targs = append(args, partial.Schema.Columns[partialCursor])\n\t\t\t\t\tpartialCursor++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif finalAggFunc.Name == ast.AggFuncApproxCountDistinct {\n\t\t\t\tft := types.NewFieldType(mysql.TypeString)\n\t\t\t\tft.SetCharset(charset.CharsetBin)\n\t\t\t\tft.SetCollate(charset.CollationBin)\n\t\t\t\tft.AddFlag(mysql.NotNullFlag)\n\t\t\t\tpartial.Schema.Append(&expression.Column{\n\t\t\t\t\tUniqueID: sctx.GetSessionVars().AllocPlanColumnID(),\n\t\t\t\t\tRetType: ft,\n\t\t\t\t})\n\t\t\t\targs = append(args, partial.Schema.Columns[partialCursor])\n\t\t\t\tpartialCursor++\n\t\t\t}\n\t\t\tif aggregation.NeedValue(finalAggFunc.Name) {\n\t\t\t\tpartial.Schema.Append(&expression.Column{\n\t\t\t\t\tUniqueID: sctx.GetSessionVars().AllocPlanColumnID(),\n\t\t\t\t\tRetType: original.Schema.Columns[i].GetType(),\n\t\t\t\t})\n\t\t\t\targs = append(args, partial.Schema.Columns[partialCursor])\n\t\t\t\tpartialCursor++\n\t\t\t}\n\t\t\tif aggFunc.Name == ast.AggFuncAvg {\n\t\t\t\tcntAgg := aggFunc.Clone()\n\t\t\t\tcntAgg.Name = ast.AggFuncCount\n\t\t\t\terr := cntAgg.TypeInfer(sctx)\n\t\t\t\tif err != nil { // must not happen\n\t\t\t\t\tpartial = nil\n\t\t\t\t\tfinal = original\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpartial.Schema.Columns[partialCursor-2].RetType = cntAgg.RetTp\n\t\t\t\t// we must call deep clone in this case, to avoid sharing the arguments.\n\t\t\t\tsumAgg := aggFunc.Clone()\n\t\t\t\tsumAgg.Name = ast.AggFuncSum\n\t\t\t\tsumAgg.TypeInfer4AvgSum(sumAgg.RetTp)\n\t\t\t\tpartial.Schema.Columns[partialCursor-1].RetType = sumAgg.RetTp\n\t\t\t\tpartial.AggFuncs = append(partial.AggFuncs, cntAgg, sumAgg)\n\t\t\t} else if aggFunc.Name == ast.AggFuncApproxCountDistinct || aggFunc.Name == ast.AggFuncGroupConcat {\n\t\t\t\tnewAggFunc := aggFunc.Clone()\n\t\t\t\tnewAggFunc.Name = aggFunc.Name\n\t\t\t\tnewAggFunc.RetTp = partial.Schema.Columns[partialCursor-1].GetType()\n\t\t\t\tpartial.AggFuncs = append(partial.AggFuncs, newAggFunc)\n\t\t\t\tif aggFunc.Name == ast.AggFuncGroupConcat {\n\t\t\t\t\t// append the last separator arg\n\t\t\t\t\targs = append(args, aggFunc.Args[len(aggFunc.Args)-1])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// other agg desc just split into two parts\n\t\t\t\tpartialFuncDesc := aggFunc.Clone()\n\t\t\t\tpartial.AggFuncs = append(partial.AggFuncs, partialFuncDesc)\n\t\t\t\tif aggFunc.Name == ast.AggFuncFirstRow {\n\t\t\t\t\tfirstRowFuncMap[partialFuncDesc] = finalAggFunc\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// In logical optimize phase, the Agg->PartitionUnion->TableReader may become\n\t\t\t// Agg1->PartitionUnion->Agg2->TableReader, and the Agg2 is a partial aggregation.\n\t\t\t// So in the push down here, we need to add a new if-condition check:\n\t\t\t// If the original agg mode is partial already, the finalAggFunc's mode become Partial2.\n\t\t\tif aggFunc.Mode == aggregation.CompleteMode {\n\t\t\t\tfinalAggFunc.Mode = aggregation.FinalMode\n\t\t\t} else if aggFunc.Mode == aggregation.Partial1Mode || aggFunc.Mode == aggregation.Partial2Mode {\n\t\t\t\tfinalAggFunc.Mode = aggregation.Partial2Mode\n\t\t\t}\n\t\t}\n\n\t\tfinalAggFunc.Args = args\n\t\tfinalAggFunc.RetTp = aggFunc.RetTp\n\t\tfinal.AggFuncs[i] = finalAggFunc\n\t}\n\tpartial.Schema.Append(partialGbySchema.Columns...)\n\tif partialIsCop {\n\t\tfor _, f := range partial.AggFuncs {\n\t\t\tf.Mode = aggregation.Partial1Mode\n\t\t}\n\t}\n\treturn\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (a TPMIAlgHash) Hash() (crypto.Hash, error) {\n\tswitch TPMAlgID(a) {\n\tcase TPMAlgSHA1:\n\t\treturn crypto.SHA1, nil\n\tcase TPMAlgSHA256:\n\t\treturn crypto.SHA256, nil\n\tcase TPMAlgSHA384:\n\t\treturn crypto.SHA384, nil\n\tcase TPMAlgSHA512:\n\t\treturn crypto.SHA512, nil\n\t}\n\treturn crypto.SHA256, fmt.Errorf(\"unsupported hash algorithm: %v\", a)\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func newHash(algo HashAlgo) (h hash.Hash) {\n\tswitch algo {\n\tcase HashSha256:\n\t\t// sha256 checksum specially on ARM64 platforms or whenever\n\t\t// requested as dictated by `xl.json` entry.\n\t\th = sha256.New()\n\tcase HashBlake2b:\n\t\t// ignore the error, because New512 without a key never fails\n\t\t// New512 only returns a non-nil error, if the length of the passed\n\t\t// key > 64 bytes - but we use blake2b as hash function (no key)\n\t\th, _ = blake2b.New512(nil)\n\t// Add new hashes here.\n\tdefault:\n\t\t// Default to blake2b.\n\t\t// ignore the error, because New512 without a key never fails\n\t\t// New512 only returns a non-nil error, if the length of the passed\n\t\t// key > 64 bytes - but we use blake2b as hash function (no key)\n\t\th, _ = blake2b.New512(nil)\n\t}\n\treturn h\n}",
"func (h *EntityHash) Sum() string {\n\thash := h.Hash.Sum(nil)\n\tif len(hash) > 12 {\n\t\thash = hash[:12]\n\t}\n\treturn base64.RawStdEncoding.EncodeToString(hash)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanCompleteHash converts the logical aggregation to the complete hash aggregation physicalPlanInfo.
|
func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {
agg := PhysicalAggregation{
AggType: CompleteAgg,
AggFuncs: p.AggFuncs,
GroupByItems: p.GroupByItems,
}.init(p.allocator, p.ctx)
agg.HasGby = len(p.GroupByItems) > 0
agg.SetSchema(p.schema)
info := addPlanToResponse(agg, childInfo)
info.cost += info.count * memoryFactor
info.count = info.count * aggFactor
return info
}
|
[
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func ConvertToHash(digestAlgorithm asn1.ObjectIdentifier) (crypto.Hash, bool) {\r\n\tvar hash crypto.Hash\r\n\tswitch {\r\n\tcase OIDDigestAlgorithmSHA1.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA1\r\n\tcase OIDDigestAlgorithmSHA256.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA256\r\n\tcase OIDDigestAlgorithmSHA384.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA384\r\n\tcase OIDDigestAlgorithmSHA512.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA512\r\n\tdefault:\r\n\t\treturn hash, false\r\n\t}\r\n\treturn hash, hash.Available()\r\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func deriveHash(object struct {\n\tParam0 string\n\tParam1 *int\n}) uint64 {\n\th := uint64(17)\n\th = 31*h + deriveHash_(object.Param0)\n\th = 31*h + deriveHash_1(object.Param1)\n\treturn h\n}",
"func (o *IamApiKeyAllOf) GetHashAlgorithmOk() (*string, bool) {\n\tif o == nil || o.HashAlgorithm == nil {\n\t\treturn nil, false\n\t}\n\treturn o.HashAlgorithm, true\n}",
"func (o ArtifactOutput) Hash() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Artifact) pulumi.StringOutput { return v.Hash }).(pulumi.StringOutput)\n}",
"func (_CairoProver *CairoProverCaller) RegistriesProgramHash(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _CairoProver.contract.Call(opts, &out, \"registriesProgramHash\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (g *GenericHash) Sum(b []byte) []byte {\n\tif g.sum != nil {\n\t\treturn append(b, g.sum...)\n\t}\n\tg.sum = make([]byte, g.size)\n\tif int(C.crypto_generichash_final(\n\t\t&g.state,\n\t\t(*C.uchar)(&g.sum[0]),\n\t\t(C.size_t)(g.size))) != 0 {\n\t\tpanic(\"see libsodium\")\n\t}\n\tg.state = C.struct_crypto_generichash_blake2b_state{}\n\treturn append(b, g.sum...)\n}",
"func (h *EntityHash) Sum() string {\n\thash := h.Hash.Sum(nil)\n\tif len(hash) > 12 {\n\t\thash = hash[:12]\n\t}\n\treturn base64.RawStdEncoding.EncodeToString(hash)\n}",
"func CalculateHash(block Block) string { // TODO: simply hash concatenated fields\n\tblock.Hash = \"\"\n\tblock.Tx.Signature = \"\"\n\treturn Shasum([]byte(ToJSON(block)))\n}",
"func newHash(algo HashAlgo) (h hash.Hash) {\n\tswitch algo {\n\tcase HashSha256:\n\t\t// sha256 checksum specially on ARM64 platforms or whenever\n\t\t// requested as dictated by `xl.json` entry.\n\t\th = sha256.New()\n\tcase HashBlake2b:\n\t\t// ignore the error, because New512 without a key never fails\n\t\t// New512 only returns a non-nil error, if the length of the passed\n\t\t// key > 64 bytes - but we use blake2b as hash function (no key)\n\t\th, _ = blake2b.New512(nil)\n\t// Add new hashes here.\n\tdefault:\n\t\t// Default to blake2b.\n\t\t// ignore the error, because New512 without a key never fails\n\t\t// New512 only returns a non-nil error, if the length of the passed\n\t\t// key > 64 bytes - but we use blake2b as hash function (no key)\n\t\th, _ = blake2b.New512(nil)\n\t}\n\treturn h\n}",
"func NewTiDBHashAggImpl(agg *plannercore.PhysicalHashAgg) *TiDBHashAggImpl {\n\treturn &TiDBHashAggImpl{baseImpl{plan: agg}}\n}",
"func PerceptionHashExtend(img image.Image, hashSize int) (string, error) {\n\tif img == nil {\n\t\treturn \"\", errors.New(\"Image object can not be nil.\")\n\t}\n\thighfreqFactor := 4\n\timgSize := hashSize * highfreqFactor\n\n\tresized := resize.Resize(uint(imgSize), uint(imgSize), img, resize.Bilinear)\n\tpixels := transforms.Rgb2Gray(resized)\n\tdct := transforms.DCT2D(pixels, imgSize, imgSize)\n\tflattens := transforms.FlattenPixels(dct, hashSize, hashSize)\n\t// calculate median\n\tmedian := etcs.MedianOfPixels(flattens)\n\n\tlenOfByte := 8\n\tlenOfPhash := hashSize * hashSize\n\tphash := make([]byte, lenOfPhash/lenOfByte)\n\tfor idx, p := range flattens {\n\t\tindexOfByteArray := (lenOfPhash - 1 - idx) / lenOfByte\n\t\tindexOfBit := idx % lenOfByte\n\t\tif p > median {\n\t\t\tphash[indexOfByteArray] |= 1 << uint(indexOfBit)\n\t\t}\n\t}\n\treturn hex.EncodeToString(phash), nil\n}",
"func (g *GitInfo) FullHash(ctx context.Context, ref string) (string, error) {\n\toutput, err := g.dir.Git(ctx, \"rev-parse\", fmt.Sprintf(\"%s^{commit}\", ref))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to obtain full hash: %s\", err)\n\t}\n\treturn strings.Trim(output, \"\\n\"), nil\n}",
"func ComputeHash(cmap cmap.ConcurrentMap) (md5Hash [16]byte, items map[string]interface{}) {\n\tvar b bytes.Buffer\n\titems = cmap.Items()\n\n\tfor k, v := range items {\n\t\tfmt.Fprintf(&b, \"%s=%+v\", k, v)\n\t}\n\tmd5Hash = md5.Sum(b.Bytes())\n\n\treturn\n}",
"func (a TPMIAlgHash) Hash() (crypto.Hash, error) {\n\tswitch TPMAlgID(a) {\n\tcase TPMAlgSHA1:\n\t\treturn crypto.SHA1, nil\n\tcase TPMAlgSHA256:\n\t\treturn crypto.SHA256, nil\n\tcase TPMAlgSHA384:\n\t\treturn crypto.SHA384, nil\n\tcase TPMAlgSHA512:\n\t\treturn crypto.SHA512, nil\n\t}\n\treturn crypto.SHA256, fmt.Errorf(\"unsupported hash algorithm: %v\", a)\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanHash converts the logical aggregation to the physical hash aggregation.
|
func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {
childInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
distinct := false
for _, fun := range p.AggFuncs {
if fun.IsDistinct() {
distinct = true
break
}
}
if !distinct {
if x, ok := childInfo.p.(physicalDistSQLPlan); ok {
info := p.convert2PhysicalPlanFinalHash(x, childInfo)
if info != nil {
return info, nil
}
}
}
return p.convert2PhysicalPlanCompleteHash(childInfo), nil
}
|
[
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func ConvertToHash(digestAlgorithm asn1.ObjectIdentifier) (crypto.Hash, bool) {\r\n\tvar hash crypto.Hash\r\n\tswitch {\r\n\tcase OIDDigestAlgorithmSHA1.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA1\r\n\tcase OIDDigestAlgorithmSHA256.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA256\r\n\tcase OIDDigestAlgorithmSHA384.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA384\r\n\tcase OIDDigestAlgorithmSHA512.Equal(digestAlgorithm):\r\n\t\thash = crypto.SHA512\r\n\tdefault:\r\n\t\treturn hash, false\r\n\t}\r\n\treturn hash, hash.Available()\r\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func deriveHash(object struct {\n\tParam0 string\n\tParam1 *int\n}) uint64 {\n\th := uint64(17)\n\th = 31*h + deriveHash_(object.Param0)\n\th = 31*h + deriveHash_1(object.Param1)\n\treturn h\n}",
"func PerceptionHashExtend(img image.Image, hashSize int) (string, error) {\n\tif img == nil {\n\t\treturn \"\", errors.New(\"Image object can not be nil.\")\n\t}\n\thighfreqFactor := 4\n\timgSize := hashSize * highfreqFactor\n\n\tresized := resize.Resize(uint(imgSize), uint(imgSize), img, resize.Bilinear)\n\tpixels := transforms.Rgb2Gray(resized)\n\tdct := transforms.DCT2D(pixels, imgSize, imgSize)\n\tflattens := transforms.FlattenPixels(dct, hashSize, hashSize)\n\t// calculate median\n\tmedian := etcs.MedianOfPixels(flattens)\n\n\tlenOfByte := 8\n\tlenOfPhash := hashSize * hashSize\n\tphash := make([]byte, lenOfPhash/lenOfByte)\n\tfor idx, p := range flattens {\n\t\tindexOfByteArray := (lenOfPhash - 1 - idx) / lenOfByte\n\t\tindexOfBit := idx % lenOfByte\n\t\tif p > median {\n\t\t\tphash[indexOfByteArray] |= 1 << uint(indexOfBit)\n\t\t}\n\t}\n\treturn hex.EncodeToString(phash), nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func CommitToHash(commit flow.StateCommitment) []byte {\n\thash := make([]byte, 32)\n\tcopy(hash, commit[:])\n\n\treturn hash\n}",
"func newHash(algo HashAlgo) (h hash.Hash) {\n\tswitch algo {\n\tcase HashSha256:\n\t\t// sha256 checksum specially on ARM64 platforms or whenever\n\t\t// requested as dictated by `xl.json` entry.\n\t\th = sha256.New()\n\tcase HashBlake2b:\n\t\t// ignore the error, because New512 without a key never fails\n\t\t// New512 only returns a non-nil error, if the length of the passed\n\t\t// key > 64 bytes - but we use blake2b as hash function (no key)\n\t\th, _ = blake2b.New512(nil)\n\t// Add new hashes here.\n\tdefault:\n\t\t// Default to blake2b.\n\t\t// ignore the error, because New512 without a key never fails\n\t\t// New512 only returns a non-nil error, if the length of the passed\n\t\t// key > 64 bytes - but we use blake2b as hash function (no key)\n\t\th, _ = blake2b.New512(nil)\n\t}\n\treturn h\n}",
"func MakeHash(ruleName string, resource *output.ResourceConfig) string {\n\tsep := []byte(\"^\")\n\n\th := sha1.New()\n\n\th.Write([]byte(ruleName))\n\th.Write(sep)\n\th.Write([]byte(resource.Locator))\n\th.Write(sep)\n\th.Write([]byte(resource.Type))\n\n\thash := h.Sum(nil)\n\n\treturn fmt.Sprintf(\"%x\", hash)\n}",
"func hashRule(r *rule) string {\n\thash := sha1.New() // #nosec G401: not used for security purposes\n\tb, _ := json.Marshal(r)\n\thash.Write(b)\n\thashValue := hex.EncodeToString(hash.Sum(nil))\n\treturn hashValue[:RuleIDLength]\n}",
"func ComputeHash(cmap cmap.ConcurrentMap) (md5Hash [16]byte, items map[string]interface{}) {\n\tvar b bytes.Buffer\n\titems = cmap.Items()\n\n\tfor k, v := range items {\n\t\tfmt.Fprintf(&b, \"%s=%+v\", k, v)\n\t}\n\tmd5Hash = md5.Sum(b.Bytes())\n\n\treturn\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func ComputeTemplateHash(template interface{}) string {\n\thasher := fnv.New32a()\n\n\thasher.Reset()\n\n\tprinter := spew.ConfigState{\n\t\tIndent: \" \",\n\t\tSortKeys: true,\n\t\tDisableMethods: true,\n\t\tSpewKeys: true,\n\t}\n\tprinter.Fprintf(hasher, \"%#v\", template)\n\n\treturn rand.SafeEncodeString(fmt.Sprint(hasher.Sum32()))\n}",
"func (p *NumpyParser) hashToPartition(fieldsData BlockData, rowNumber int) (int64, error) {\n\tif p.collectionInfo.PartitionKey == nil {\n\t\t// no partition key, directly return the target partition id\n\t\tif len(p.collectionInfo.PartitionIDs) != 1 {\n\t\t\treturn 0, fmt.Errorf(\"collection '%s' partition list is empty\", p.collectionInfo.Schema.Name)\n\t\t}\n\t\treturn p.collectionInfo.PartitionIDs[0], nil\n\t}\n\n\tpartitionKeyID := p.collectionInfo.PartitionKey.GetFieldID()\n\tfieldData := fieldsData[partitionKeyID]\n\tvalue := fieldData.GetRow(rowNumber)\n\tindex, err := pkToShard(value, uint32(len(p.collectionInfo.PartitionIDs)))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn p.collectionInfo.PartitionIDs[index], nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlan implements the LogicalPlan convert2PhysicalPlan interface.
|
func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {
planInfo, err := p.getPlanInfo(prop)
if err != nil {
return nil, errors.Trace(err)
}
if planInfo != nil {
return planInfo, nil
}
limit := prop.limit
if len(prop.props) == 0 {
planInfo, err = p.convert2PhysicalPlanHash()
if err != nil {
return nil, errors.Trace(err)
}
}
streamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))
if planInfo == nil || streamInfo.cost < planInfo.cost {
planInfo = streamInfo
}
planInfo = enforceProperty(limitProperty(limit), planInfo)
err = p.storePlanInfo(prop, planInfo)
return planInfo, errors.Trace(err)
}
|
[
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}",
"func NewLogicalPlan(transforms parser.Nodes, edges parser.Edges) (LogicalPlan, error) {\n\tlp := LogicalPlan{\n\t\tSteps: make(map[parser.NodeID]LogicalStep),\n\t\tPipeline: make([]parser.NodeID, 0, len(transforms)),\n\t}\n\n\t// Create all steps\n\tfor _, transform := range transforms {\n\t\tlp.Steps[transform.ID] = LogicalStep{\n\t\t\tTransform: transform,\n\t\t\tParents: make([]parser.NodeID, 0, 1),\n\t\t\tChildren: make([]parser.NodeID, 0, 1),\n\t\t}\n\t\tlp.Pipeline = append(lp.Pipeline, transform.ID)\n\t}\n\n\t// Link all parent/children\n\tfor _, edge := range edges {\n\t\tparent, ok := lp.Steps[edge.ParentID]\n\t\tif !ok {\n\t\t\treturn LogicalPlan{}, fmt.Errorf(\"invalid DAG found, parent %s not found for child %s\", edge.ParentID, edge.ChildID)\n\t\t}\n\n\t\tchild, ok := lp.Steps[edge.ChildID]\n\t\tif !ok {\n\t\t\treturn LogicalPlan{}, fmt.Errorf(\"invalid DAG found, child %s not found for parent %s\", edge.ChildID, edge.ParentID)\n\t\t}\n\n\t\tparent.Children = append(parent.Children, child.ID())\n\t\tchild.Parents = append(child.Parents, parent.ID())\n\t\t// Write back since we are doing copy instead reference\n\t\tlp.Steps[edge.ParentID] = parent\n\t\tlp.Steps[edge.ChildID] = child\n\t}\n\n\treturn lp, nil\n}",
"func NewPhysicalPlanner(options ...PhysicalOption) PhysicalPlanner {\n\tpp := &physicalPlanner{\n\t\theuristicPlannerPhysical: newHeuristicPlanner(),\n\t\theuristicPlannerParallel: newHeuristicPlanner(),\n\t\tdefaultMemoryLimit: math.MaxInt64,\n\t}\n\n\trulesPhysical := make([]Rule, len(ruleNameToPhysicalRule))\n\ti := 0\n\tfor _, v := range ruleNameToPhysicalRule {\n\t\trulesPhysical[i] = v\n\t\ti++\n\t}\n\n\trulesParallel := make([]Rule, len(ruleNameToParallelizeRules))\n\ti = 0\n\tfor _, v := range ruleNameToParallelizeRules {\n\t\trulesParallel[i] = v\n\t\ti++\n\t}\n\n\tpp.heuristicPlannerPhysical.addRules(rulesPhysical...)\n\n\tpp.heuristicPlannerPhysical.addRules(physicalConverterRule{})\n\n\tpp.heuristicPlannerParallel.addRules(rulesParallel...)\n\n\t// Options may add or remove rules, so process them after we've\n\t// added registered rules.\n\tfor _, opt := range options {\n\t\topt.apply(pp)\n\t}\n\n\treturn pp\n}",
"func CreatePlan(ctx context.Context, inputPath, outputPath string, customizationsPath, prjName string) plantypes.Plan {\n\tlogrus.Debugf(\"Temp Dir : %s\", common.TempPath)\n\tp := plantypes.NewPlan()\n\tp.Name = prjName\n\tp.Spec.RootDir = inputPath\n\tp.Spec.CustomizationsDir = customizationsPath\n\tif customizationsPath != \"\" {\n\t\tcommon.CheckAndCopyCustomizations(customizationsPath)\n\t}\n\tlogrus.Infoln(\"Loading Configuration\")\n\tconfigurationLoaders := configuration.GetLoaders()\n\tfor _, l := range configurationLoaders {\n\t\tlogrus.Infof(\"[%T] Loading configuration\", l)\n\t\terr := l.UpdatePlan(&p)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"[%T] Failed : %s\", l, err)\n\t\t} else {\n\t\t\tlogrus.Infof(\"[%T] Done\", l)\n\t\t}\n\t}\n\ttc, err := (&configuration.ClusterMDLoader{}).GetTargetClusterMetadataForPlan(p)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to load cluster metadata : %s\", err)\n\t}\n\ttransformer.Init(common.AssetsPath, inputPath, tc, outputPath, p.Name)\n\tts := transformer.GetTransformers()\n\tfor tn, t := range ts {\n\t\tconfig, _ := t.GetConfig()\n\t\tp.Spec.Configuration.Transformers[tn] = config.Spec.FilePath\n\t}\n\tlogrus.Infoln(\"Configuration loading done\")\n\n\tp.Spec.Services, err = transformer.GetServices(p.Name, inputPath)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create plan : %s\", err)\n\t}\n\tlogrus.Infof(\"No of services identified : %d\", len(p.Spec.Services))\n\treturn p\n}",
"func addPlanToResponse(parent PhysicalPlan, info *physicalPlanInfo) *physicalPlanInfo {\n\tnp := parent.Copy()\n\tnp.SetChildren(info.p)\n\tret := &physicalPlanInfo{p: np, cost: info.cost, count: info.count, reliable: info.reliable}\n\tif _, ok := parent.(*MaxOneRow); ok {\n\t\tret.count = 1\n\t\tret.reliable = true\n\t}\n\treturn ret\n}",
"func MakePhysicalInfrastructure(\n\tflowID uuid.UUID, gatewayNodeID roachpb.NodeID,\n) PhysicalInfrastructure {\n\treturn PhysicalInfrastructure{\n\t\tFlowID: flowID,\n\t\tGatewayNodeID: gatewayNodeID,\n\t}\n}",
"func convertToPipelineLimitation(lim process.Limitation) *pipeline.ProcessLimitation {\n\treturn &pipeline.ProcessLimitation{\n\t\tSize: lim.Size,\n\t\tBatchRows: lim.BatchRows,\n\t\tBatchSize: lim.BatchSize,\n\t\tPartitionRows: lim.PartitionRows,\n\t\tReaderSize: lim.ReaderSize,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
makeScanController will try to build a selection that controls the below scan's filter condition, and return a physicalPlanInfo. If the onlyCheck is true, it will only check whether this selection can become a scan controller without building the physical plan.
|
func (p *Selection) makeScanController() *physicalPlanInfo {
var (
child PhysicalPlan
corColConds []expression.Expression
)
ds := p.children[0].(*DataSource)
indices, _ := availableIndices(ds.indexHints, ds.tableInfo)
for _, expr := range p.Conditions {
if !expr.IsCorrelated() {
continue
}
cond := expression.PushDownNot(expr, false, nil)
corCols := extractCorColumns(cond)
for _, col := range corCols {
*col.Data = expression.One.Value
}
newCond, _ := expression.SubstituteCorCol2Constant(cond)
corColConds = append(corColConds, newCond)
}
if p.controllerStatus == controlTableScan {
ts := PhysicalTableScan{
Table: ds.tableInfo,
Columns: ds.Columns,
TableAsName: ds.TableAsName,
DBName: ds.DBName,
physicalTableSource: physicalTableSource{client: ds.ctx.GetClient()},
}.init(p.allocator, p.ctx)
ts.SetSchema(ds.schema)
if ds.ctx.Txn() != nil {
ts.readOnly = p.ctx.Txn().IsReadOnly()
} else {
ts.readOnly = true
}
child = ts
} else if p.controllerStatus == controlIndexScan {
var (
chosenPlan *PhysicalIndexScan
bestEqualCount int
)
for _, idx := range indices {
condsBackUp := make([]expression.Expression, 0, len(corColConds))
for _, cond := range corColConds {
condsBackUp = append(condsBackUp, cond.Clone())
}
_, _, accessEqualCount, _ := ranger.DetachIndexScanConditions(condsBackUp, idx)
if chosenPlan == nil || bestEqualCount < accessEqualCount {
is := PhysicalIndexScan{
Table: ds.tableInfo,
Index: idx,
Columns: ds.Columns,
TableAsName: ds.TableAsName,
OutOfOrder: true,
DBName: ds.DBName,
physicalTableSource: physicalTableSource{client: ds.ctx.GetClient()},
}.init(p.allocator, p.ctx)
is.SetSchema(ds.schema)
if is.ctx.Txn() != nil {
is.readOnly = p.ctx.Txn().IsReadOnly()
} else {
is.readOnly = true
}
is.DoubleRead = !isCoveringIndex(is.Columns, is.Index.Columns, is.Table.PKIsHandle)
chosenPlan, bestEqualCount = is, accessEqualCount
}
}
child = chosenPlan
}
newSel := p.Copy().(*Selection)
newSel.ScanController = true
newSel.SetChildren(child)
info := &physicalPlanInfo{
p: newSel,
count: float64(ds.statisticTable.Count),
}
info.cost = info.count * selectionFactor
return info
}
|
[
"func (p *DataSource) tryToConvert2DummyScan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tsel, isSel := p.parents[0].(*Selection)\n\tif !isSel {\n\t\treturn nil, nil\n\t}\n\n\tfor _, cond := range sel.Conditions {\n\t\tif con, ok := cond.(*expression.Constant); ok {\n\t\t\tresult, err := expression.EvalBool([]expression.Expression{con}, nil, p.ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif !result {\n\t\t\t\tdual := TableDual{}.init(p.allocator, p.ctx)\n\t\t\t\tdual.SetSchema(p.schema)\n\t\t\t\tinfo := &physicalPlanInfo{p: dual}\n\t\t\t\tp.storePlanInfo(prop, info)\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}",
"func planFilter(pl plan, allowed map[step.Type]struct{}) plan {\n\tif len(allowed) == 0 {\n\t\treturn pl\n\t}\n\n\tr := make(plan, 0, len(pl))\n\tfor _, v := range pl {\n\t\tif _, ok := allowed[v.GetID().Type]; ok {\n\t\t\tr = append(r, v)\n\t\t}\n\t}\n\n\treturn r\n}",
"func (p *Planner) buildPlan(nsn types.NamespacedName, src Sourcer, destroy bool, ispec v1.InfraSpec, cspec []v1.ClusterSpec) (plan, bool) {\n\tvar pl plan\n\tvar ok bool\n\tswitch {\n\tcase destroy:\n\t\tpl, ok = p.buildDestroyPlan(nsn, src, ispec, cspec)\n\n\tdefault:\n\t\tpl, ok = p.buildCreatePlan(nsn, src, ispec, cspec, p.Client)\n\t}\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\treturn planFilter(pl, p.AllowedStepTypes), true\n}",
"func selectStrategy(s *database.Store, f *filter.Filter) strategy {\n\tf = filter.Optimize(f)\n\n\tif f == nil {\n\t\t// if there's no filter, scan everything\n\t\treturn &scanRecords{s, nil}\n\t} else if len(f.Conditions) == 0 {\n\t\t// or if the filter matches nothing, perform a noop\n\t\treturn &noop{}\n\t}\n\n\tconds := &conditions{}\n\tif _, err := f.Accept(conds); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// find which condition type is the MOST constrained\n\t//\n\t// TODO(jmalloc): the initial cost should probably be derived from the number\n\t// of documents in the store, likewise the number of keys for key-related\n\t// strategies, however I don't think this should get any more complex until\n\t// there are benchmarks in place.\n\tcheapest := math.MaxUint32\n\tvar qs strategy = &scanRecords{s, f}\n\n\tif conds.IsOneOfCondition != nil {\n\t\tcheapest = len(conds.IsOneOfCondition.Values)\n\t\tqs = &useIDFirst{s, conds}\n\t}\n\n\tif conds.HasUniqueKeyInCondition != nil {\n\t\tcost := len(conds.HasUniqueKeyInCondition.Values)\n\t\tif cost < cheapest {\n\t\t\tcheapest = cost\n\t\t\tqs = &useUniqueKeyFirst{s, conds}\n\t\t}\n\t}\n\n\tif conds.HasKeysCondition != nil {\n\t\tcost := len(conds.HasKeysCondition.Values)\n\t\tif cost < cheapest {\n\t\t\tqs = &useKeysFirst{s, conds}\n\t\t}\n\t}\n\n\treturn qs\n}",
"func Build(statement sqlparser.Statement, tables map[string]*schema.Table) (*Plan, error) {\n\tvar plan *Plan\n\tvar err error\n\n\terr = checkForPoolingUnsafeConstructs(statement)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch stmt := statement.(type) {\n\tcase *sqlparser.Union:\n\t\tplan, err = &Plan{\n\t\t\tPlanID: PlanPassSelect,\n\t\t\tFieldQuery: GenerateFieldQuery(stmt),\n\t\t\tFullQuery: GenerateLimitQuery(stmt),\n\t\t}, nil\n\tcase *sqlparser.Select:\n\t\tplan, err = analyzeSelect(stmt, tables)\n\tcase *sqlparser.Insert:\n\t\tplan, err = analyzeInsert(stmt, tables)\n\tcase *sqlparser.Update:\n\t\tplan, err = analyzeUpdate(stmt, tables)\n\tcase *sqlparser.Delete:\n\t\tplan, err = analyzeDelete(stmt, tables)\n\tcase *sqlparser.Set:\n\t\tplan, err = analyzeSet(stmt), nil\n\tcase *sqlparser.DDL:\n\t\tplan, err = analyzeDDL(stmt, tables), nil\n\tcase *sqlparser.Show:\n\t\tplan, err = &Plan{PlanID: PlanOtherRead}, nil\n\tcase *sqlparser.OtherRead:\n\t\tplan, err = &Plan{PlanID: PlanOtherRead}, nil\n\tcase *sqlparser.OtherAdmin:\n\t\tplan, err = &Plan{PlanID: PlanOtherAdmin}, nil\n\tdefault:\n\t\treturn nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, \"invalid SQL\")\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplan.Permissions = BuildPermissions(statement)\n\treturn plan, nil\n}",
"func MakeFilterMakerMode(exchangeShim api.ExchangeShim, sdex *SDEX, tradingPair *model.TradingPair) SubmitFilter {\n\treturn &makerModeFilter{\n\t\tname: \"makeModeFilter\",\n\t\ttradingPair: tradingPair,\n\t\texchangeShim: exchangeShim,\n\t\tsdex: sdex,\n\t}\n}",
"func (_Comptroller *ComptrollerFilterer) WatchNewSupplyCapGuardian(opts *bind.WatchOpts, sink chan<- *ComptrollerNewSupplyCapGuardian) (event.Subscription, error) {\n\n\tlogs, sub, err := _Comptroller.contract.WatchLogs(opts, \"NewSupplyCapGuardian\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ComptrollerNewSupplyCapGuardian)\n\t\t\t\tif err := _Comptroller.contract.UnpackLog(event, \"NewSupplyCapGuardian\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}",
"func NewCommandScan(name string) *cobra.Command {\n\toptions := newDefaultClamScanOptions()\n\n\tcmd := &cobra.Command{\n\t\tUse: \"scan\",\n\t\tShort: \"Scans files using clamd\",\n\t\tLong: `Scan files for viruses using Clamav by traversing the provided path and\nsubmitting file descriptors via a Unix domain socket to a clamd process.`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := options.Complete(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := options.Validate(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := options.Run(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\n\toptions.Bind(cmd.Flags())\n\n\treturn cmd\n}",
"func createPaginatedControl(query elastic.Query, filters map[string][]string) (*elastic.NestedQuery, error) {\n\tfrom, size, err := paginatedParams(filters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnestedQuery := elastic.NewNestedQuery(\"profiles.controls\", query)\n\tnestedQuery = nestedQuery.InnerHit(elastic.NewInnerHit().From(from).Size(size))\n\treturn nestedQuery, nil\n}",
"func (_Comptroller *ComptrollerFilterer) FilterNewSupplyCapGuardian(opts *bind.FilterOpts) (*ComptrollerNewSupplyCapGuardianIterator, error) {\n\n\tlogs, sub, err := _Comptroller.contract.FilterLogs(opts, \"NewSupplyCapGuardian\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ComptrollerNewSupplyCapGuardianIterator{contract: _Comptroller.contract, event: \"NewSupplyCapGuardian\", logs: logs, sub: sub}, nil\n}",
"func (_Comptroller *ComptrollerFilterer) FilterNewPauseGuardian(opts *bind.FilterOpts) (*ComptrollerNewPauseGuardianIterator, error) {\n\n\tlogs, sub, err := _Comptroller.contract.FilterLogs(opts, \"NewPauseGuardian\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ComptrollerNewPauseGuardianIterator{contract: _Comptroller.contract, event: \"NewPauseGuardian\", logs: logs, sub: sub}, nil\n}",
"func (_Comptroller *ComptrollerFilterer) WatchNewPauseGuardian(opts *bind.WatchOpts, sink chan<- *ComptrollerNewPauseGuardian) (event.Subscription, error) {\n\n\tlogs, sub, err := _Comptroller.contract.WatchLogs(opts, \"NewPauseGuardian\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ComptrollerNewPauseGuardian)\n\t\t\t\tif err := _Comptroller.contract.UnpackLog(event, \"NewPauseGuardian\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (p *Planner) Plan() (*ir.Policy, error) {\n\n\tfor _, q := range p.queries {\n\t\tp.curr = &ir.Block{}\n\t\tdefined := false\n\n\t\tif err := p.planQuery(q, 0, func() error {\n\t\t\tp.appendStmt(ir.ReturnStmt{\n\t\t\t\tCode: ir.Defined,\n\t\t\t})\n\t\t\tdefined = true\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif defined {\n\t\t\tp.blocks = append(p.blocks, *p.curr)\n\t\t}\n\t}\n\n\tp.blocks = append(p.blocks, ir.Block{\n\t\tStmts: []ir.Stmt{\n\t\t\tir.ReturnStmt{\n\t\t\t\tCode: ir.Undefined,\n\t\t\t},\n\t\t},\n\t})\n\n\tpolicy := ir.Policy{\n\t\tStatic: ir.Static{\n\t\t\tStrings: p.strings,\n\t\t},\n\t\tPlan: ir.Plan{\n\t\t\tBlocks: p.blocks,\n\t\t},\n\t}\n\n\treturn &policy, nil\n}",
"func (m *stateManager) Choose(database string, numOfNodes int) ([]*models.PhysicalPlan, error) {\n\t// FIXME: need using storage's replica state ???\n\treplicas, err := m.GetQueryableReplicas(database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnodesLen := len(replicas)\n\tif nodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tif numOfNodes > 1 && nodesLen > 1 {\n\t\t// build compute target nodes.\n\t\treturn []*models.PhysicalPlan{flow.BuildPhysicalPlan(database, m.GetLiveNodes(), numOfNodes)}, nil\n\t}\n\t// build leaf storage nodes.\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: database,\n\t}\n\tfor storageNode, shardIDs := range replicas {\n\t\tphysicalPlan.AddTarget(&models.Target{\n\t\t\tIndicator: storageNode,\n\t\t\tShardIDs: shardIDs,\n\t\t})\n\t}\n\treturn []*models.PhysicalPlan{physicalPlan}, nil\n}",
"func NewScan(target Target) (Result, error) {\n\tnewOptions := Initialize(target, target.Options)\n\tmodelResult, err := scanning.Scan(target.URL, newOptions, \"Single\")\n\tresult := Result{\n\t\tLogs: modelResult.Logs,\n\t\tPoCs: modelResult.PoCs,\n\t\tDuration: modelResult.Duration,\n\t\tStartTime: modelResult.StartTime,\n\t\tEndTime: modelResult.EndTime,\n\t}\n\treturn result, err\n}",
"func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) {\n\tf := models.Filter{}\n\n\tc.getFieldStringSlice(tbl, \"namepass\", &f.NamePass)\n\tc.getFieldStringSlice(tbl, \"namedrop\", &f.NameDrop)\n\n\tc.getFieldStringSlice(tbl, \"pass\", &f.FieldPass)\n\tc.getFieldStringSlice(tbl, \"fieldpass\", &f.FieldPass)\n\n\tc.getFieldStringSlice(tbl, \"drop\", &f.FieldDrop)\n\tc.getFieldStringSlice(tbl, \"fielddrop\", &f.FieldDrop)\n\n\tc.getFieldTagFilter(tbl, \"tagpass\", &f.TagPassFilters)\n\tc.getFieldTagFilter(tbl, \"tagdrop\", &f.TagDropFilters)\n\n\tc.getFieldStringSlice(tbl, \"tagexclude\", &f.TagExclude)\n\tc.getFieldStringSlice(tbl, \"taginclude\", &f.TagInclude)\n\n\tc.getFieldString(tbl, \"metricpass\", &f.MetricPass)\n\n\tif c.hasErrs() {\n\t\treturn f, c.firstErr()\n\t}\n\n\tif err := f.Compile(); err != nil {\n\t\treturn f, err\n\t}\n\n\treturn f, nil\n}",
"func MakePlan(config *Config) (*Plan, error) {\n\tp := &Plan{\n\t\tOpCounts: make(map[OpType]int),\n\t\tdirRewriteMap: make(map[string]string),\n\t\tskipMap: make(map[string]bool),\n\t\toverwriteMap: make(map[string]bool),\n\t}\n\n\terr := addCleanRelPathsToFileMap(p.skipMap, config.SkipFiles)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = addCleanRelPathsToFileMap(p.overwriteMap, config.OverwriteFiles)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = p.makeTemplateValues(config, config.Skeleton)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = p.makeOperations(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlanEnforce converts a selection to physicalPlanInfo which does not push the required property to the children, but enforce the property instead.
|
func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {
child := p.children[0].(LogicalPlan)
info, err := child.convert2PhysicalPlan(&requiredProperty{})
if err != nil {
return nil, errors.Trace(err)
}
if prop.limit != nil && len(prop.props) > 0 {
if t, ok := info.p.(physicalDistSQLPlan); ok {
t.addTopN(p.ctx, prop)
} else if _, ok := info.p.(*Selection); !ok {
info = p.appendSelToInfo(info)
}
info = enforceProperty(prop, info)
} else if len(prop.props) != 0 {
info = &physicalPlanInfo{cost: math.MaxFloat64}
}
return info, nil
}
|
[
"func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tif p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)\n\t} else {\n\t\tinfo, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})\n\t}\n\tif err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tswitch info.p.(type) {\n\tcase *PhysicalHashJoin, *PhysicalHashSemiJoin:\n\t\tap := PhysicalApply{\n\t\t\tPhysicalJoin: info.p,\n\t\t\tOuterSchema: p.corCols,\n\t\t}.init(p.allocator, p.ctx)\n\t\tap.SetChildren(info.p.Children()...)\n\t\tap.SetSchema(info.p.Schema())\n\t\tinfo.p = ap\n\tdefault:\n\t\tinfo.cost = math.MaxFloat64\n\t\tinfo.p = nil\n\t}\n\tinfo = enforceProperty(prop, info)\n\tp.storePlanInfo(prop, info)\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func enforceProperty(prop *requiredProperty, info *physicalPlanInfo) *physicalPlanInfo {\n\tif info.p == nil {\n\t\treturn info\n\t}\n\tif len(prop.props) != 0 {\n\t\titems := make([]*ByItems, 0, len(prop.props))\n\t\tfor _, col := range prop.props {\n\t\t\titems = append(items, &ByItems{Expr: col.col, Desc: col.desc})\n\t\t}\n\t\tsort := Sort{\n\t\t\tByItems: items,\n\t\t\tExecLimit: prop.limit,\n\t\t}.init(info.p.Allocator(), info.p.context())\n\t\tsort.SetSchema(info.p.Schema())\n\t\tinfo = addPlanToResponse(sort, info)\n\n\t\tcount := info.count\n\t\tif prop.limit != nil {\n\t\t\tcount = float64(prop.limit.Offset + prop.limit.Count)\n\t\t\tinfo.reliable = true\n\t\t}\n\t\tinfo.cost += sortCost(count)\n\t} else if prop.limit != nil {\n\t\tlimit := Limit{Offset: prop.limit.Offset, Count: prop.limit.Count}.init(info.p.Allocator(), info.p.context())\n\t\tlimit.SetSchema(info.p.Schema())\n\t\tinfo = addPlanToResponse(limit, info)\n\t\tinfo.reliable = true\n\t}\n\tif prop.limit != nil && float64(prop.limit.Count) < info.count {\n\t\tinfo.count = float64(prop.limit.Count)\n\t}\n\treturn info\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func convertToPolicyCheck(in *CompCheck) *PolicyCheck {\n\tvar out PolicyCheck\n\n\tout.NodeId = in.NodeId\n\tout.NodeArch = in.NodeArch\n\tout.NodeType = in.NodeType\n\tout.NodeClusterNS = in.NodeClusterNS\n\tout.NodeNamespaceScoped = in.NodeNamespaceScoped\n\tout.NodePolicy = in.NodePolicy\n\tout.BusinessPolId = in.BusinessPolId\n\tout.BusinessPolicy = in.BusinessPolicy\n\tout.ServicePolicy = in.ServicePolicy\n\tout.Service = in.Service\n\n\treturn &out\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func addPlanToResponse(parent PhysicalPlan, info *physicalPlanInfo) *physicalPlanInfo {\n\tnp := parent.Copy()\n\tnp.SetChildren(info.p)\n\tret := &physicalPlanInfo{p: np, cost: info.cost, count: info.count, reliable: info.reliable}\n\tif _, ok := parent.(*MaxOneRow); ok {\n\t\tret.count = 1\n\t\tret.reliable = true\n\t}\n\treturn ret\n}",
"func convertToPipelineLimitation(lim process.Limitation) *pipeline.ProcessLimitation {\n\treturn &pipeline.ProcessLimitation{\n\t\tSize: lim.Size,\n\t\tBatchRows: lim.BatchRows,\n\t\tBatchSize: lim.BatchSize,\n\t\tPartitionRows: lim.PartitionRows,\n\t\tReaderSize: lim.ReaderSize,\n\t}\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func TurnNominalSortIntoProj(p PhysicalPlan, onlyColumn bool, orderByItems []*util.ByItems) PhysicalPlan {\n\tif onlyColumn {\n\t\treturn p.Children()[0]\n\t}\n\n\tnumOrderByItems := len(orderByItems)\n\tchildPlan := p.Children()[0]\n\n\tbottomProjSchemaCols := make([]*expression.Column, 0, len(childPlan.Schema().Columns)+numOrderByItems)\n\tbottomProjExprs := make([]expression.Expression, 0, len(childPlan.Schema().Columns)+numOrderByItems)\n\tfor _, col := range childPlan.Schema().Columns {\n\t\tnewCol := col.Clone().(*expression.Column)\n\t\tnewCol.Index = childPlan.Schema().ColumnIndex(newCol)\n\t\tbottomProjSchemaCols = append(bottomProjSchemaCols, newCol)\n\t\tbottomProjExprs = append(bottomProjExprs, newCol)\n\t}\n\n\tfor _, item := range orderByItems {\n\t\titemExpr := item.Expr\n\t\tif _, isScalarFunc := itemExpr.(*expression.ScalarFunction); !isScalarFunc {\n\t\t\tcontinue\n\t\t}\n\t\tbottomProjExprs = append(bottomProjExprs, itemExpr)\n\t\tnewArg := &expression.Column{\n\t\t\tUniqueID: p.SCtx().GetSessionVars().AllocPlanColumnID(),\n\t\t\tRetType: itemExpr.GetType(),\n\t\t\tIndex: len(bottomProjSchemaCols),\n\t\t}\n\t\tbottomProjSchemaCols = append(bottomProjSchemaCols, newArg)\n\t}\n\n\tchildProp := p.GetChildReqProps(0).CloneEssentialFields()\n\tbottomProj := PhysicalProjection{\n\t\tExprs: bottomProjExprs,\n\t\tAvoidColumnEvaluator: false,\n\t}.Init(p.SCtx(), childPlan.StatsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), p.SelectBlockOffset(), childProp)\n\tbottomProj.SetSchema(expression.NewSchema(bottomProjSchemaCols...))\n\tbottomProj.SetChildren(childPlan)\n\n\ttopProjExprs := make([]expression.Expression, 0, childPlan.Schema().Len())\n\tfor i := range childPlan.Schema().Columns {\n\t\tcol := childPlan.Schema().Columns[i].Clone().(*expression.Column)\n\t\tcol.Index = i\n\t\ttopProjExprs = append(topProjExprs, col)\n\t}\n\ttopProj := PhysicalProjection{\n\t\tExprs: topProjExprs,\n\t\tAvoidColumnEvaluator: false,\n\t}.Init(p.SCtx(), childPlan.StatsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), p.SelectBlockOffset(), childProp)\n\ttopProj.SetSchema(childPlan.Schema().Clone())\n\ttopProj.SetChildren(bottomProj)\n\n\tif origChildProj, isChildProj := childPlan.(*PhysicalProjection); isChildProj {\n\t\trefine4NeighbourProj(bottomProj, origChildProj)\n\t}\n\n\treturn topProj\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (p *DataSource) tryToConvert2DummyScan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tsel, isSel := p.parents[0].(*Selection)\n\tif !isSel {\n\t\treturn nil, nil\n\t}\n\n\tfor _, cond := range sel.Conditions {\n\t\tif con, ok := cond.(*expression.Constant); ok {\n\t\t\tresult, err := expression.EvalBool([]expression.Expression{con}, nil, p.ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif !result {\n\t\t\t\tdual := TableDual{}.init(p.allocator, p.ctx)\n\t\t\t\tdual.SetSchema(p.schema)\n\t\t\t\tinfo := &physicalPlanInfo{p: dual}\n\t\t\t\tp.storePlanInfo(prop, info)\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}",
"func convertToProcessLimitation(lim *pipeline.ProcessLimitation) process.Limitation {\n\treturn process.Limitation{\n\t\tSize: lim.Size,\n\t\tBatchRows: lim.BatchRows,\n\t\tBatchSize: lim.BatchSize,\n\t\tPartitionRows: lim.PartitionRows,\n\t\tReaderSize: lim.ReaderSize,\n\t}\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert2PhysicalPlan implements the LogicalPlan convert2PhysicalPlan interface.
|
func (p *LogicalApply) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {
info, err := p.getPlanInfo(prop)
if err != nil {
return info, errors.Trace(err)
}
if info != nil {
return info, nil
}
if p.JoinType == InnerJoin || p.JoinType == LeftOuterJoin {
info, err = p.LogicalJoin.convert2PhysicalPlanLeft(&requiredProperty{}, p.JoinType == InnerJoin)
} else {
info, err = p.LogicalJoin.convert2PhysicalPlanSemi(&requiredProperty{})
}
if err != nil {
return info, errors.Trace(err)
}
switch info.p.(type) {
case *PhysicalHashJoin, *PhysicalHashSemiJoin:
ap := PhysicalApply{
PhysicalJoin: info.p,
OuterSchema: p.corCols,
}.init(p.allocator, p.ctx)
ap.SetChildren(info.p.Children()...)
ap.SetSchema(info.p.Schema())
info.p = ap
default:
info.cost = math.MaxFloat64
info.p = nil
}
info = enforceProperty(prop, info)
p.storePlanInfo(prop, info)
return info, nil
}
|
[
"func (p *LogicalAggregation) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tplanInfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif planInfo != nil {\n\t\treturn planInfo, nil\n\t}\n\tlimit := prop.limit\n\tif len(prop.props) == 0 {\n\t\tplanInfo, err = p.convert2PhysicalPlanHash()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tstreamInfo, err := p.convert2PhysicalPlanStream(removeLimit(prop))\n\tif planInfo == nil || streamInfo.cost < planInfo.cost {\n\t\tplanInfo = streamInfo\n\t}\n\tplanInfo = enforceProperty(limitProperty(limit), planInfo)\n\terr = p.storePlanInfo(prop, planInfo)\n\treturn planInfo, errors.Trace(err)\n}",
"func (p *DataSource) convert2PhysicalPlan(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tinfo, err := p.getPlanInfo(prop)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif info != nil {\n\t\treturn info, nil\n\t}\n\tinfo, err = p.tryToConvert2DummyScan(prop)\n\tif info != nil || err != nil {\n\t\treturn info, errors.Trace(err)\n\t}\n\tclient := p.ctx.GetClient()\n\tmemDB := infoschema.IsMemoryDB(p.DBName.L)\n\tisDistReq := !memDB && client != nil && client.SupportRequestType(kv.ReqTypeSelect, 0)\n\tif !isDistReq {\n\t\tmemTable := PhysicalMemTable{\n\t\t\tDBName: p.DBName,\n\t\t\tTable: p.tableInfo,\n\t\t\tColumns: p.Columns,\n\t\t\tTableAsName: p.TableAsName,\n\t\t}.init(p.allocator, p.ctx)\n\t\tmemTable.SetSchema(p.schema)\n\t\trb := &ranger.Builder{Sc: p.ctx.GetSessionVars().StmtCtx}\n\t\tmemTable.Ranges = rb.BuildTableRanges(ranger.FullRange)\n\t\tinfo = &physicalPlanInfo{p: memTable}\n\t\tinfo = enforceProperty(prop, info)\n\t\tp.storePlanInfo(prop, info)\n\t\treturn info, nil\n\t}\n\tindices, includeTableScan := availableIndices(p.indexHints, p.tableInfo)\n\tif includeTableScan {\n\t\tinfo, err = p.convert2TableScan(prop)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\tif !includeTableScan || p.need2ConsiderIndex(prop) {\n\t\tfor _, index := range indices {\n\t\t\tindexInfo, err := p.convert2IndexScan(prop, index)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif info == nil || indexInfo.cost < info.cost {\n\t\t\t\tinfo = indexInfo\n\t\t\t}\n\t\t}\n\t}\n\treturn info, errors.Trace(p.storePlanInfo(prop, info))\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanHash() (*physicalPlanInfo, error) {\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdistinct := false\n\tfor _, fun := range p.AggFuncs {\n\t\tif fun.IsDistinct() {\n\t\t\tdistinct = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !distinct {\n\t\tif x, ok := childInfo.p.(physicalDistSQLPlan); ok {\n\t\t\tinfo := p.convert2PhysicalPlanFinalHash(x, childInfo)\n\t\t\tif info != nil {\n\t\t\t\treturn info, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn p.convert2PhysicalPlanCompleteHash(childInfo), nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanLeft(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tSmallTable: 1,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = LeftOuterJoin\n\t}\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tvar lInfo *physicalPlanInfo\n\tvar err error\n\tif innerJoin {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(removeLimit(lProp))\n\t} else {\n\t\tlInfo, err = lChild.convert2PhysicalPlan(convertLimitOffsetToCount(lProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanRight(prop *requiredProperty, innerJoin bool) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallRight := true\n\tfor _, col := range prop.props {\n\t\tif !rChild.Schema().Contains(col.col) {\n\t\t\tallRight = false\n\t\t}\n\t}\n\tjoin := PhysicalHashJoin{\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\t// TODO: decide concurrency by data size.\n\t\tConcurrency: JoinConcurrency,\n\t\tDefaultValues: p.DefaultValues,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tif innerJoin {\n\t\tjoin.JoinType = InnerJoin\n\t} else {\n\t\tjoin.JoinType = RightOuterJoin\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trProp := prop\n\tif !allRight {\n\t\trProp = &requiredProperty{}\n\t} else {\n\t\trProp = replaceColsInPropBySchema(rProp, rChild.Schema())\n\t}\n\tvar rInfo *physicalPlanInfo\n\tif innerJoin {\n\t\trInfo, err = rChild.convert2PhysicalPlan(removeLimit(rProp))\n\t} else {\n\t\trInfo, err = rChild.convert2PhysicalPlan(convertLimitOffsetToCount(rProp))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif !allRight {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *Selection) convert2PhysicalPlanEnforce(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tchild := p.children[0].(LogicalPlan)\n\tinfo, err := child.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif prop.limit != nil && len(prop.props) > 0 {\n\t\tif t, ok := info.p.(physicalDistSQLPlan); ok {\n\t\t\tt.addTopN(p.ctx, prop)\n\t\t} else if _, ok := info.p.(*Selection); !ok {\n\t\t\tinfo = p.appendSelToInfo(info)\n\t\t}\n\t\tinfo = enforceProperty(prop, info)\n\t} else if len(prop.props) != 0 {\n\t\tinfo = &physicalPlanInfo{cost: math.MaxFloat64}\n\t}\n\treturn info, nil\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanStream(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tfor _, aggFunc := range p.AggFuncs {\n\t\tif aggFunc.GetMode() == expression.FinalMode {\n\t\t\treturn &physicalPlanInfo{cost: math.MaxFloat64}, nil\n\t\t}\n\t}\n\tagg := PhysicalAggregation{\n\t\tAggType: StreamedAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\t// TODO: Consider distinct key.\n\tinfo := &physicalPlanInfo{cost: math.MaxFloat64}\n\tgbyCols := p.groupByCols\n\tif len(gbyCols) != len(p.GroupByItems) {\n\t\t// group by a + b is not interested in any order.\n\t\treturn info, nil\n\t}\n\tisSortKey := make([]bool, len(gbyCols))\n\tnewProp := &requiredProperty{\n\t\tprops: make([]*columnProp, 0, len(gbyCols)),\n\t}\n\tfor _, pro := range prop.props {\n\t\tidx := p.getGbyColIndex(pro.col)\n\t\tif idx == -1 {\n\t\t\treturn info, nil\n\t\t}\n\t\tisSortKey[idx] = true\n\t\t// We should add columns in aggregation in order to keep index right.\n\t\tnewProp.props = append(newProp.props, &columnProp{col: gbyCols[idx], desc: pro.desc})\n\t}\n\tnewProp.sortKeyLen = len(newProp.props)\n\tfor i, col := range gbyCols {\n\t\tif !isSortKey[i] {\n\t\t\tnewProp.props = append(newProp.props, &columnProp{col: col})\n\t\t}\n\t}\n\tchildInfo, err := p.children[0].(LogicalPlan).convert2PhysicalPlan(newProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tinfo = addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * cpuFactor\n\tinfo.count = info.count * aggFactor\n\treturn info, nil\n}",
"func (p *LogicalJoin) convert2PhysicalPlanSemi(prop *requiredProperty) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\tallLeft := true\n\tfor _, col := range prop.props {\n\t\tif !lChild.Schema().Contains(col.col) {\n\t\t\tallLeft = false\n\t\t}\n\t}\n\tjoin := PhysicalHashSemiJoin{\n\t\tWithAux: LeftOuterSemiJoin == p.JoinType,\n\t\tEqualConditions: p.EqualConditions,\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: p.OtherConditions,\n\t\tAnti: p.anti,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tlProp := prop\n\tif !allLeft {\n\t\tlProp = &requiredProperty{}\n\t}\n\tif p.JoinType == SemiJoin {\n\t\tlProp = removeLimit(lProp)\n\t}\n\tlInfo, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfo, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresultInfo := join.matchProperty(prop, lInfo, rInfo)\n\tif p.JoinType == SemiJoin {\n\t\tresultInfo.count = lInfo.count * selectionFactor\n\t} else {\n\t\tresultInfo.count = lInfo.count\n\t}\n\tif !allLeft {\n\t\tresultInfo = enforceProperty(prop, resultInfo)\n\t} else if p.JoinType == SemiJoin {\n\t\tresultInfo = enforceProperty(limitProperty(prop.limit), resultInfo)\n\t}\n\treturn resultInfo, nil\n}",
"func (p *LogicalJoin) convert2PhysicalMergeJoin(parentProp *requiredProperty, lProp *requiredProperty, rProp *requiredProperty, condIndex int, joinType JoinType) (*physicalPlanInfo, error) {\n\tlChild := p.children[0].(LogicalPlan)\n\trChild := p.children[1].(LogicalPlan)\n\n\tnewEQConds := make([]*expression.ScalarFunction, 0, len(p.EqualConditions)-1)\n\tfor i, cond := range p.EqualConditions {\n\t\tif i == condIndex {\n\t\t\tcontinue\n\t\t}\n\t\t// prevent further index contamination\n\t\tnewCond := cond.Clone()\n\t\tnewCond.ResolveIndices(p.schema)\n\t\tnewEQConds = append(newEQConds, newCond.(*expression.ScalarFunction))\n\t}\n\teqCond := p.EqualConditions[condIndex]\n\n\totherFilter := append(expression.ScalarFuncs2Exprs(newEQConds), p.OtherConditions...)\n\n\tjoin := PhysicalMergeJoin{\n\t\tEqualConditions: []*expression.ScalarFunction{eqCond},\n\t\tLeftConditions: p.LeftConditions,\n\t\tRightConditions: p.RightConditions,\n\t\tOtherConditions: otherFilter,\n\t\tDefaultValues: p.DefaultValues,\n\t\t// Assume order for both side are the same\n\t\tDesc: lProp.props[0].desc,\n\t}.init(p.allocator, p.ctx)\n\tjoin.SetSchema(p.schema)\n\tjoin.JoinType = joinType\n\n\tvar lInfo *physicalPlanInfo\n\tvar rInfo *physicalPlanInfo\n\n\t// Try no sort first\n\tlInfoEnforceSort, err := lChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlInfoEnforceSort = enforceProperty(lProp, lInfoEnforceSort)\n\n\tlInfoNoSorted, err := lChild.convert2PhysicalPlan(lProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif lInfoNoSorted.cost < lInfoEnforceSort.cost {\n\t\tlInfo = lInfoNoSorted\n\t} else {\n\t\tlInfo = lInfoEnforceSort\n\t}\n\n\trInfoEnforceSort, err := rChild.convert2PhysicalPlan(&requiredProperty{})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\trInfoEnforceSort = enforceProperty(rProp, rInfoEnforceSort)\n\n\trInfoNoSorted, err := rChild.convert2PhysicalPlan(rProp)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif rInfoEnforceSort.cost < rInfoNoSorted.cost {\n\t\trInfo = rInfoEnforceSort\n\t} else {\n\t\trInfo = rInfoNoSorted\n\t}\n\tparentProp = join.tryConsumeOrder(parentProp, eqCond)\n\n\tresultInfo := join.matchProperty(parentProp, lInfo, rInfo)\n\t// TODO: Considering keeping order in join to remove at least\n\t// one ordering property\n\tresultInfo = enforceProperty(parentProp, resultInfo)\n\treturn resultInfo, nil\n}",
"func MakePhysicalPlan(infra *PhysicalInfrastructure) PhysicalPlan {\n\treturn PhysicalPlan{\n\t\tPhysicalInfrastructure: infra,\n\t}\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanFinalHash(x physicalDistSQLPlan, childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: FinalAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.SetSchema(p.schema)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tschema := x.addAggregation(p.ctx, agg)\n\tif schema.Len() == 0 {\n\t\treturn nil\n\t}\n\tx.(PhysicalPlan).SetSchema(schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.count = info.count * aggFactor\n\t// if we build the final aggregation, it must be the best plan.\n\tinfo.cost = 0\n\treturn info\n}",
"func (p *LogicalAggregation) convert2PhysicalPlanCompleteHash(childInfo *physicalPlanInfo) *physicalPlanInfo {\n\tagg := PhysicalAggregation{\n\t\tAggType: CompleteAgg,\n\t\tAggFuncs: p.AggFuncs,\n\t\tGroupByItems: p.GroupByItems,\n\t}.init(p.allocator, p.ctx)\n\tagg.HasGby = len(p.GroupByItems) > 0\n\tagg.SetSchema(p.schema)\n\tinfo := addPlanToResponse(agg, childInfo)\n\tinfo.cost += info.count * memoryFactor\n\tinfo.count = info.count * aggFactor\n\treturn info\n}",
"func (mq *metadataQuery) makePlan() (*models.PhysicalPlan, error) {\n\t//FIXME need using storage's replica state ???\n\tstorageNodes, err := mq.runtime.stateMgr.GetQueryableReplicas(mq.database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstorageNodesLen := len(storageNodes)\n\tif storageNodesLen == 0 {\n\t\treturn nil, constants.ErrReplicaNotFound\n\t}\n\tcurBroker := mq.runtime.stateMgr.GetCurrentNode()\n\tcurBrokerIndicator := curBroker.Indicator()\n\tphysicalPlan := &models.PhysicalPlan{\n\t\tDatabase: mq.database,\n\t\tRoot: models.Root{\n\t\t\tIndicator: curBrokerIndicator,\n\t\t\tNumOfTask: int32(storageNodesLen),\n\t\t},\n\t}\n\treceivers := []models.StatelessNode{curBroker}\n\tfor storageNode, shardIDs := range storageNodes {\n\t\tphysicalPlan.AddLeaf(models.Leaf{\n\t\t\tBaseNode: models.BaseNode{\n\t\t\t\tParent: curBrokerIndicator,\n\t\t\t\tIndicator: storageNode,\n\t\t\t},\n\t\t\tShardIDs: shardIDs,\n\t\t\tReceivers: receivers,\n\t\t})\n\t}\n\treturn physicalPlan, nil\n}",
"func NewLogicalPlan(transforms parser.Nodes, edges parser.Edges) (LogicalPlan, error) {\n\tlp := LogicalPlan{\n\t\tSteps: make(map[parser.NodeID]LogicalStep),\n\t\tPipeline: make([]parser.NodeID, 0, len(transforms)),\n\t}\n\n\t// Create all steps\n\tfor _, transform := range transforms {\n\t\tlp.Steps[transform.ID] = LogicalStep{\n\t\t\tTransform: transform,\n\t\t\tParents: make([]parser.NodeID, 0, 1),\n\t\t\tChildren: make([]parser.NodeID, 0, 1),\n\t\t}\n\t\tlp.Pipeline = append(lp.Pipeline, transform.ID)\n\t}\n\n\t// Link all parent/children\n\tfor _, edge := range edges {\n\t\tparent, ok := lp.Steps[edge.ParentID]\n\t\tif !ok {\n\t\t\treturn LogicalPlan{}, fmt.Errorf(\"invalid DAG found, parent %s not found for child %s\", edge.ParentID, edge.ChildID)\n\t\t}\n\n\t\tchild, ok := lp.Steps[edge.ChildID]\n\t\tif !ok {\n\t\t\treturn LogicalPlan{}, fmt.Errorf(\"invalid DAG found, child %s not found for parent %s\", edge.ChildID, edge.ParentID)\n\t\t}\n\n\t\tparent.Children = append(parent.Children, child.ID())\n\t\tchild.Parents = append(child.Parents, parent.ID())\n\t\t// Write back since we are doing copy instead reference\n\t\tlp.Steps[edge.ParentID] = parent\n\t\tlp.Steps[edge.ChildID] = child\n\t}\n\n\treturn lp, nil\n}",
"func NewPhysicalPlanner(options ...PhysicalOption) PhysicalPlanner {\n\tpp := &physicalPlanner{\n\t\theuristicPlannerPhysical: newHeuristicPlanner(),\n\t\theuristicPlannerParallel: newHeuristicPlanner(),\n\t\tdefaultMemoryLimit: math.MaxInt64,\n\t}\n\n\trulesPhysical := make([]Rule, len(ruleNameToPhysicalRule))\n\ti := 0\n\tfor _, v := range ruleNameToPhysicalRule {\n\t\trulesPhysical[i] = v\n\t\ti++\n\t}\n\n\trulesParallel := make([]Rule, len(ruleNameToParallelizeRules))\n\ti = 0\n\tfor _, v := range ruleNameToParallelizeRules {\n\t\trulesParallel[i] = v\n\t\ti++\n\t}\n\n\tpp.heuristicPlannerPhysical.addRules(rulesPhysical...)\n\n\tpp.heuristicPlannerPhysical.addRules(physicalConverterRule{})\n\n\tpp.heuristicPlannerParallel.addRules(rulesParallel...)\n\n\t// Options may add or remove rules, so process them after we've\n\t// added registered rules.\n\tfor _, opt := range options {\n\t\topt.apply(pp)\n\t}\n\n\treturn pp\n}",
"func CreatePlan(ctx context.Context, inputPath, outputPath string, customizationsPath, prjName string) plantypes.Plan {\n\tlogrus.Debugf(\"Temp Dir : %s\", common.TempPath)\n\tp := plantypes.NewPlan()\n\tp.Name = prjName\n\tp.Spec.RootDir = inputPath\n\tp.Spec.CustomizationsDir = customizationsPath\n\tif customizationsPath != \"\" {\n\t\tcommon.CheckAndCopyCustomizations(customizationsPath)\n\t}\n\tlogrus.Infoln(\"Loading Configuration\")\n\tconfigurationLoaders := configuration.GetLoaders()\n\tfor _, l := range configurationLoaders {\n\t\tlogrus.Infof(\"[%T] Loading configuration\", l)\n\t\terr := l.UpdatePlan(&p)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"[%T] Failed : %s\", l, err)\n\t\t} else {\n\t\t\tlogrus.Infof(\"[%T] Done\", l)\n\t\t}\n\t}\n\ttc, err := (&configuration.ClusterMDLoader{}).GetTargetClusterMetadataForPlan(p)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to load cluster metadata : %s\", err)\n\t}\n\ttransformer.Init(common.AssetsPath, inputPath, tc, outputPath, p.Name)\n\tts := transformer.GetTransformers()\n\tfor tn, t := range ts {\n\t\tconfig, _ := t.GetConfig()\n\t\tp.Spec.Configuration.Transformers[tn] = config.Spec.FilePath\n\t}\n\tlogrus.Infoln(\"Configuration loading done\")\n\n\tp.Spec.Services, err = transformer.GetServices(p.Name, inputPath)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create plan : %s\", err)\n\t}\n\tlogrus.Infof(\"No of services identified : %d\", len(p.Spec.Services))\n\treturn p\n}",
"func addPlanToResponse(parent PhysicalPlan, info *physicalPlanInfo) *physicalPlanInfo {\n\tnp := parent.Copy()\n\tnp.SetChildren(info.p)\n\tret := &physicalPlanInfo{p: np, cost: info.cost, count: info.count, reliable: info.reliable}\n\tif _, ok := parent.(*MaxOneRow); ok {\n\t\tret.count = 1\n\t\tret.reliable = true\n\t}\n\treturn ret\n}",
"func MakePhysicalInfrastructure(\n\tflowID uuid.UUID, gatewayNodeID roachpb.NodeID,\n) PhysicalInfrastructure {\n\treturn PhysicalInfrastructure{\n\t\tFlowID: flowID,\n\t\tGatewayNodeID: gatewayNodeID,\n\t}\n}",
"func convertToPipelineLimitation(lim process.Limitation) *pipeline.ProcessLimitation {\n\treturn &pipeline.ProcessLimitation{\n\t\tSize: lim.Size,\n\t\tBatchRows: lim.BatchRows,\n\t\tBatchSize: lim.BatchSize,\n\t\tPartitionRows: lim.PartitionRows,\n\t\tReaderSize: lim.ReaderSize,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
addCachePlan will add a Cache plan above the plan whose father's IsCorrelated() is true but its own IsCorrelated() is false.
|
func addCachePlan(p PhysicalPlan, allocator *idAllocator) []*expression.CorrelatedColumn {
if len(p.Children()) == 0 {
return nil
}
selfCorCols := p.extractCorrelatedCols()
newChildren := make([]Plan, 0, len(p.Children()))
for _, child := range p.Children() {
childCorCols := addCachePlan(child.(PhysicalPlan), allocator)
// If p is a Selection and controls the access condition of below scan plan, there shouldn't have a cache plan.
if sel, ok := p.(*Selection); len(selfCorCols) > 0 && len(childCorCols) == 0 && (!ok || !sel.ScanController) {
newChild := Cache{}.init(p.Allocator(), p.context())
newChild.SetSchema(child.Schema())
addChild(newChild, child)
newChild.SetParents(p)
newChildren = append(newChildren, newChild)
} else {
newChildren = append(newChildren, child)
}
}
p.SetChildren(newChildren...)
return selfCorCols
}
|
[
"func (d *Dao) AddCache(f func()) {\n\tselect {\n\tcase d.missch <- f:\n\tdefault:\n\t\tlog.Warn(\"cacheproc chan full\")\n\t}\n}",
"func (d *Dao) AddArcCache(c context.Context, pid int64, arc *model.ArcSort) (err error) {\n\tvar (\n\t\tplakey = keyPlArc(pid)\n\t\tpladKey = keyPlArcDesc(pid)\n\t\tconn = d.redis.Get(c)\n\t\tcount int\n\t)\n\tdefer conn.Close()\n\tif _, err = redis.Bool(conn.Do(\"EXPIRE\", plakey, d.plExpire)); err != nil {\n\t\tlog.Error(\"conn.Do(EXPIRE %s) error(%v)\", plakey, err)\n\t\treturn\n\t}\n\tif _, err = redis.Bool(conn.Do(\"EXPIRE\", pladKey, d.plExpire)); err != nil {\n\t\tlog.Error(\"conn.Do(EXPIRE %s) error(%v)\", pladKey, err)\n\t\treturn\n\t}\n\targs1 := redis.Args{}.Add(plakey)\n\targs1 = args1.Add(arc.Sort).Add(arc.Aid)\n\tif err = conn.Send(\"ZADD\", args1...); err != nil {\n\t\tlog.Error(\"conn.Send(ZADD, %s, %v) error(%v)\", plakey, args1, err)\n\t\treturn\n\t}\n\tcount++\n\tif arc.Desc != \"\" {\n\t\targs2 := redis.Args{}.Add(pladKey).Add(arc.Aid).Add(arc.Desc)\n\t\tif err = conn.Send(\"HSET\", args2...); err != nil {\n\t\t\tlog.Error(\"conn.Send(ZADD, %s, %v) error(%v)\", plakey, args2, err)\n\t\t\treturn\n\t\t}\n\t\tcount++\n\t\tif err = conn.Send(\"EXPIRE\", pladKey, d.plExpire); err != nil {\n\t\t\tlog.Error(\"conn.Send(Expire, %s) error(%v)\", pladKey, err)\n\t\t\treturn\n\t\t}\n\t\tcount++\n\t}\n\tif err = conn.Send(\"EXPIRE\", plakey, d.plExpire); err != nil {\n\t\tlog.Error(\"conn.Send(Expire, %s) error(%v)\", pladKey, err)\n\t\treturn\n\t}\n\tif err = conn.Flush(); err != nil {\n\t\tlog.Error(\"conn.Flush error(%v)\", err)\n\t\treturn\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tif _, err = conn.Receive(); err != nil {\n\t\t\tlog.Error(\"conn.Receive() error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (c *cache) add(qname string, rr RR) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tc._add(qname, rr)\n}",
"func (d *Dao) AddRelationCache(c context.Context, m *favmdl.Favorite) (err error) {\n\tkey := relationKey(m.Mid, m.Fid)\n\tconn := d.redis.Get(c)\n\tdefer conn.Close()\n\tif err = conn.Send(\"ZADD\", key, m.MTime, m.Oid); err != nil {\n\t\tlog.Error(\"conn.Send(ZADD %s,%d) error(%v)\", key, m.Oid, err)\n\t\treturn\n\t}\n\tif err = conn.Send(\"EXPIRE\", key, d.redisExpire); err != nil {\n\t\tlog.Error(\"conn.Send(EXPIRE) error(%v)\", err)\n\t\treturn\n\t}\n\tif err = conn.Flush(); err != nil {\n\t\tlog.Error(\"conn.Flush() error(%v)\", err)\n\t\treturn\n\t}\n\tfor i := 0; i < 2; i++ {\n\t\tif _, err = conn.Receive(); err != nil {\n\t\t\tlog.Error(\"conn.Receive() error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func (plugin *LinuxInterfaceConfigurator) addToCache(iface *interfaces.LinuxInterfaces_Interface, peerIface *LinuxInterfaceConfig) *LinuxInterfaceConfig {\n\tconfig := &LinuxInterfaceConfig{config: iface, peer: peerIface}\n\tplugin.ifByName[iface.Name] = config\n\tif peerIface != nil {\n\t\tpeerIface.peer = config\n\t}\n\tif iface.Namespace != nil && iface.Namespace.Type == interfaces.LinuxInterfaces_Interface_Namespace_MICROSERVICE_REF_NS {\n\t\tif _, ok := plugin.ifsByMs[iface.Namespace.Microservice]; ok {\n\t\t\tplugin.ifsByMs[iface.Namespace.Microservice] = append(plugin.ifsByMs[iface.Namespace.Microservice], config)\n\t\t} else {\n\t\t\tplugin.ifsByMs[iface.Namespace.Microservice] = []*LinuxInterfaceConfig{config}\n\t\t}\n\t}\n\tplugin.log.Debugf(\"Linux interface with name %v added to cache (peer: %v)\",\n\t\tiface.Name, peerIface)\n\treturn config\n}",
"func addZoneToCache(zone *rainslib.ZoneSection, isAuthoritative bool) {\n\tnegAssertionCache.AddZone(zone, zone.ValidUntil(), isAuthoritative)\n\tlog.Debug(\"Added zone to cache\", \"zone\", *zone)\n\tfor _, section := range zone.Content {\n\t\tswitch section := section.(type) {\n\t\tcase *rainslib.AssertionSection:\n\t\t\tif shouldAssertionBeCached(section) {\n\t\t\t\ta := section.Copy(zone.Context, zone.SubjectZone)\n\t\t\t\taddAssertionToCache(a, isAuthoritative)\n\t\t\t}\n\t\tcase *rainslib.ShardSection:\n\t\t\tif shouldShardBeCached(section) {\n\t\t\t\ts := section.Copy(zone.Context, zone.SubjectZone)\n\t\t\t\taddShardToCache(s, isAuthoritative)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Warn(fmt.Sprintf(\"Not supported type. Expected *ShardSection or *AssertionSection. Got=%T\", section))\n\t\t}\n\t}\n}",
"func addAssertionToCache(a *rainslib.AssertionSection, isAuthoritative bool) {\n\tassertionsCache.Add(a, a.ValidUntil(), isAuthoritative)\n\tlog.Debug(\"Added assertion to cache\", \"assertion\", *a)\n\tfor _, obj := range a.Content {\n\t\tif obj.Type == rainslib.OTDelegation {\n\t\t\tif publicKey, ok := obj.Value.(rainslib.PublicKey); ok {\n\t\t\t\tpublicKey.ValidSince = a.ValidSince()\n\t\t\t\tpublicKey.ValidUntil = a.ValidUntil()\n\t\t\t\tok := zoneKeyCache.Add(a, publicKey, isAuthoritative)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Warn(\"number of entries in the zoneKeyCache reached a critical amount\")\n\t\t\t\t}\n\t\t\t\tlog.Debug(\"Added publicKey to cache\", \"publicKey\", publicKey)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"Object type and value type mismatch. This case must be prevented beforehand\")\n\t\t\t}\n\t\t}\n\t}\n}",
"func (d *Dao) AddRelationsCache(c context.Context, tp int8, mid, fid int64, fs []*favmdl.Favorite) (err error) {\n\tkey := relationKey(mid, fid)\n\tconn := d.redis.Get(c)\n\tdefer conn.Close()\n\tfor _, fav := range fs {\n\t\tif err = conn.Send(\"ZADD\", key, fav.MTime, fav.Oid); err != nil {\n\t\t\tlog.Error(\"conn.Send(ZADD %s,%d) error(%v)\", key, fav.Oid, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif err = conn.Send(\"EXPIRE\", key, d.redisExpire); err != nil {\n\t\tlog.Error(\"conn.Send(EXPIRE) error(%v)\", err)\n\t\treturn\n\t}\n\tif err = conn.Flush(); err != nil {\n\t\tlog.Error(\"conn.Flush() error(%v)\", err)\n\t\treturn\n\t}\n\tfor i := 0; i < len(fs)+1; i++ {\n\t\tif _, err = conn.Receive(); err != nil {\n\t\t\tlog.Error(\"conn.Receive() error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func addPlanToResponse(parent PhysicalPlan, info *physicalPlanInfo) *physicalPlanInfo {\n\tnp := parent.Copy()\n\tnp.SetChildren(info.p)\n\tret := &physicalPlanInfo{p: np, cost: info.cost, count: info.count, reliable: info.reliable}\n\tif _, ok := parent.(*MaxOneRow); ok {\n\t\tret.count = 1\n\t\tret.reliable = true\n\t}\n\treturn ret\n}",
"func (qr *Rule) AddPlanCond(planType planbuilder.PlanType) {\n\tqr.plans = append(qr.plans, planType)\n}",
"func addAddressAssertionToCache(a *rainslib.AddressAssertionSection, isAuthoritative bool) {\n\tif err := getAddressCache(a.SubjectAddr, a.Context).AddAddressAssertion(a); err != nil {\n\t\tlog.Warn(\"Was not able to add addressAssertion to cache\", \"addressAssertion\", a)\n\t}\n}",
"func (nr *NodeResInfo) AddPodToCache(pod *v1.Pod, resIDMap map[v1.ResourceName]int) {\n\t// AddPod does not check node's avilable resources, assue the pod can be binded to this node\n\tpodKey := pod.UID\n\n\tnr.Pods[podKey] = NewPodInfo(pod, resIDMap, cap(nr.Capa))\n\tklog.V(3).Infof(\"Add pod %s on node %s\", podKey, nr.Name)\n\tklog.V(4).Infof(\"Available: %v\", nr.GetAvailable())\n}",
"func AddCacheHook(hookPoint boil.HookPoint, cacheHook CacheHook) {\n\tswitch hookPoint {\n\tcase boil.BeforeInsertHook:\n\t\tcacheBeforeInsertHooks = append(cacheBeforeInsertHooks, cacheHook)\n\tcase boil.BeforeUpdateHook:\n\t\tcacheBeforeUpdateHooks = append(cacheBeforeUpdateHooks, cacheHook)\n\tcase boil.BeforeDeleteHook:\n\t\tcacheBeforeDeleteHooks = append(cacheBeforeDeleteHooks, cacheHook)\n\tcase boil.BeforeUpsertHook:\n\t\tcacheBeforeUpsertHooks = append(cacheBeforeUpsertHooks, cacheHook)\n\tcase boil.AfterInsertHook:\n\t\tcacheAfterInsertHooks = append(cacheAfterInsertHooks, cacheHook)\n\tcase boil.AfterSelectHook:\n\t\tcacheAfterSelectHooks = append(cacheAfterSelectHooks, cacheHook)\n\tcase boil.AfterUpdateHook:\n\t\tcacheAfterUpdateHooks = append(cacheAfterUpdateHooks, cacheHook)\n\tcase boil.AfterDeleteHook:\n\t\tcacheAfterDeleteHooks = append(cacheAfterDeleteHooks, cacheHook)\n\tcase boil.AfterUpsertHook:\n\t\tcacheAfterUpsertHooks = append(cacheAfterUpsertHooks, cacheHook)\n\t}\n}",
"func AddDataToCache(IP net.IP, Port layers.TCPPort, c *Cache) bool {\n\tIPHash := GetIPHash(IP.String())\n\tdataInfo := NetFace{IP, Port}\n\n\terr := c.AddItem(IPHash, dataInfo, 5*time.Minute)\n\treturn err\n}",
"func addAddressZoneToCache(zone *rainslib.AddressZoneSection, isAuthoritative bool) {\n\tif err := getAddressCache(zone.SubjectAddr, zone.Context).AddAddressZone(zone); err != nil {\n\t\tlog.Warn(\"Was not able to add addressZone to cache\", \"addressZone\", zone)\n\t}\n\tfor _, a := range zone.Content {\n\t\taddAddressAssertionToCache(a, isAuthoritative)\n\t}\n}",
"func addTemplateToCache(cacheKey string, tpl *template.Template, cache map[string]*template.Template) {\n\tcache[cacheKey] = tpl\n}",
"func (m *MockUpClient) AddUpPassedCache(ctx context.Context, in *UpCacheReq, opts ...grpc.CallOption) (*NoReply, error) {\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AddUpPassedCache\", varargs...)\n\tret0, _ := ret[0].(*NoReply)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (d *Dao) AddChannelCardCache(c context.Context, buvid, md5 string, channelID int64, now time.Time) (err error) {\n\tvar (\n\t\tkey = channelCardKey(buvid, channelID)\n\t\tconn = d.mc.Get(c)\n\t\tcurrenttimeSec = int32((now.Hour() * 60 * 60) + (now.Minute() * 60) + now.Second())\n\t\tovertime int32\n\t)\n\tif overtime = d.expire - currenttimeSec; overtime < 1 {\n\t\tovertime = d.expire\n\t}\n\tif err = conn.Set(&memcache.Item{Key: key, Object: md5, Flags: memcache.FlagJSON, Expiration: overtime}); err != nil {\n\t\tlog.Error(\"AddChannelCardCache d.mc.Set(%s,%v) error(%v)\", key, channelID, err)\n\t}\n\tconn.Close()\n\treturn\n}",
"func (s *StateDB) addCache(obj *StateObject) {\n\ts.stateObjects[obj.key.String()] = obj\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
thread is a dumb function for calling threads
|
func thread(source string) {
fmt.Println("called from:", source)
fmt.Println("--------------")
}
|
[
"func TestThreads() {\n\tthread(\"direct\")\n\n\tgo thread(\"goroutine\")\n\n\t//anonymous function\n\tgo func(msg string) {\n\t\tfmt.Println(msg)\n\t}(\"go routine\")\n\n\tfmt.Scanln() // take input\n\tfmt.Println(\"Scanln() done...\")\n}",
"func (h heap) runThread(p Program) {\n\tdefer h.wg.Done()\n\th.run(p)\n}",
"func x_cgo_thread_start(arg *ThreadStart) {\n\tvar ts *ThreadStart\n\t// Make our own copy that can persist after we return.\n\t//\t_cgo_tsan_acquire();\n\tts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts)))\n\t//\t_cgo_tsan_release();\n\tif ts == nil {\n\t\tprintln(\"fakecgo: out of memory in thread_start\")\n\t\tabort()\n\t}\n\t// *ts = *arg would cause a writebarrier so use memmove instead\n\tmemmove(unsafe.Pointer(ts), unsafe.Pointer(arg), unsafe.Sizeof(*ts))\n\t_cgo_sys_thread_start(ts) // OS-dependent half\n}",
"func joinThread(ctx context.Context, cli *client.Client, shared *SharedInfo) (*threadWithKeys, error) {\n\taddr, err := multiaddr.NewMultiaddr(shared.Addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err := thread.KeyFromString(shared.ThreadKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogSk, logPk, err := crypto.GenerateEd25519Key(crand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsk, _, err := crypto.GenerateEd25519Key(crand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tidentity := thread.NewLibp2pIdentity(sk)\n\ttok, err := cli.GetToken(ctx, identity)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfo, err := cli.AddThread(ctx, addr, corenet.WithThreadKey(key), corenet.WithLogKey(logSk), corenet.WithNewThreadToken(tok))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tch, err := cli.Subscribe(context.Background(), corenet.WithSubFilter(info.ID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &threadWithKeys{info, identity, logSk, logPk, cid.Undef, cli, ch, make(map[cid.Cid]bool)}, nil\n}",
"func (c *Cmd) SetThread(thread uint64) { c.T = thread }",
"func crawlThread(crwlurl <-chan UrlInfo, appurl chan<- UrlInfo, updurl chan<- LogInfo, i int) {\n\t //fmt.Printf(\"Started CRAWL Thread %d\\n\", i)\n\t var prevHst string = \"start\"\n\t for {\n\t\tselect {\n\t\t case crawlUrl := <-crwlurl:\n\t\t\t//Delay the http request if the thread had \n\t\t\t//requested to the same Host in the previous iteration\n\t\t\tcrawlUrli,_ := url.Parse(crawlUrl.UrlN)\n\t\t\tcrawlHst := crawlUrli.Host\n\t\t\tif (prevHst != \"start\" && prevHst == crawlHst) {\n\t\t\t //fmt.Printf(\"%s-%s Delaying\\n\",prevHst,crawlHst)\n\t\t\t time.Sleep(crawlDelay * time.Millisecond)\n\t\t\t} else {\n\t\t\t //fmt.Printf(\"%s-%s No Delay\\n\",prevHst,crawlHst)\n\t\t\t}\n\t\t\tprevHst = crawlHst\n\t\t\t//fmt.Printf(\"Thread %d : %s\\n\",i,crawlUrl.UrlN)\n\t\t\tgetUrlLinks(crawlUrl,appurl,updurl)\n\t\t}\n\t }\n}",
"func runSyscallThread() {\n\truntime.LockOSThread()\n\tmy := Mythread()\n\tsyscalltask = (threadptr)(unsafe.Pointer(my))\n\tlog.Infof(\"[syscall] tid:%d\", my.id)\n\tfor {\n\t\tcallptr, _, err := syscall.Syscall(SYS_WAIT_SYSCALL, 0, 0, 0)\n\t\tif err != 0 {\n\t\t\tthrow(\"bad SYS_WAIT_SYSCALL return\")\n\t\t}\n\t\tcall := (*isyscall.Request)(unsafe.Pointer(callptr))\n\n\t\tno := call.NO()\n\t\thandler := isyscall.GetHandler(no)\n\t\tif handler == nil {\n\t\t\tlog.Errorf(\"[syscall] unhandled %s(%d)(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\",\n\t\t\t\tsyscallName(int(no)), no,\n\t\t\t\tcall.Arg(0), call.Arg(1), call.Arg(2), call.Arg(3),\n\t\t\t\tcall.Arg(4), call.Arg(5))\n\t\t\tcall.SetErrorNO(syscall.ENOSYS)\n\t\t\tcall.Done()\n\t\t\tcontinue\n\t\t}\n\t\tgo func() {\n\t\t\thandler(call)\n\t\t\tvar iret interface{}\n\t\t\tret := call.Ret()\n\t\t\tif hasErrno(ret) {\n\t\t\t\tiret = syscall.Errno(-ret)\n\t\t\t} else {\n\t\t\t\tiret = ret\n\t\t\t}\n\t\t\tlog.Debugf(\"[syscall] %s(%d)(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) = %v\",\n\t\t\t\tsyscallName(int(no)), no,\n\t\t\t\tcall.Arg(0), call.Arg(1), call.Arg(2), call.Arg(3),\n\t\t\t\tcall.Arg(4), call.Arg(5), iret,\n\t\t\t)\n\t\t\tcall.Done()\n\t\t}()\n\t}\n}",
"func racecall(*byte, uintptr, uintptr, uintptr, uintptr)",
"func (d Delta) Thread() (Thread, error) {\n\tvar thread Thread\n\treturn thread, json.Unmarshal(d.Attributes, &thread)\n}",
"func runOnRenderThread(f func(), sync bool) {\n\tif sync {\n\t\ttheRenderThread.Call(f)\n\t\treturn\n\t}\n\n\t// As the current thread doesn't have a capacity in a channel,\n\t// CallAsync should block when the previously-queued task is not executed yet.\n\t// This blocking is expected as double-buffering is used.\n\ttheRenderThread.CallAsync(f)\n}",
"func luaNewThread(ls *LuaState) *LuaState {\n\tt := &LuaState{registry: ls.registry}\n\tt.pushLuaStack(newLuaStack(LUA_MINSTACK, t))\n\tls.stack.push(t)\n\treturn t\n}",
"func readThread(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvals := r.URL.Query()\n\tuuid := vals.Get(\"id\")\n\n\tthread, err := data.ThreadByUUID(uuid)\n\tif err != nil {\n\t\terror_message(w, r, ps, \"Cannot read thread\")\n\t} else {\n\t\ts, err := session(w, r)\n\t\tloginUser, _ := s.User()\n\t\tif err != nil {\n\t\t\tgenerateHTML(w, &thread, \"layout\", \"public.navbar\", \"public.thread\")\n\t\t} else {\n\t\t\tfetch_thread := data.DisplayData{\n\t\t\t\tUser: loginUser,\n\t\t\t\tSingleThread: thread,\n\t\t\t}\n\t\t\tgenerateHTML(w, &fetch_thread, \"layout\", \"private.navbar\", \"private.thread\")\n\n\t\t}\n\t}\n}",
"func (ext *ExtensionTwitterThread) commandTThread(bot *papaBot.Bot, sourceEvent *events.EventMessage, params []string) {\n\ttext := strings.Join(params, \" \")\n\tif text != \"\" {\n\t\text.lastTweet = ext.extractTweetId(text)\n\t}\n\tif ext.lastTweet == \"\" {\n\t\treturn\n\t}\n\tnotice := fmt.Sprintf(\"%s: https://threadreaderapp.com/thread/%s\", sourceEvent.Nick, ext.lastTweet)\n\tbot.SendMessage(sourceEvent, notice)\n\text.lastTweet = \"\"\n}",
"func newTestThread(startError, stopError error) *testThread {\n\treturn &testThread{\n\t\tdone: make(chan struct{}),\n\t\tstartError: startError,\n\t\tstopError: stopError,\n\t}\n}",
"func RunThread(clientset *kubeclient.Clientset, csvFile *os.File, namefmt, runID, createValue, deltaFmt string, tbase time.Time, hackCreate bool, updates, n, lag, thd, stride uint64, opPeriod float64) {\n\tklog.V(3).Infof(\"Thread %d creating %d objects with lag %d, stride %d, clientset %p\\n\", thd, n, lag, stride, clientset)\n\tvar iByPhase []uint64 = make([]uint64, 2+updates)\n\tvar iSum uint64\n\tlastPhase := 1 + updates\n\tfor iByPhase[lastPhase] < n {\n\t\tdt := float64(iSum*stride+thd) * opPeriod * float64(time.Second)\n\t\ttargt := tbase.Add(time.Duration(dt))\n\t\tnow := time.Now()\n\t\tif targt.After(now) {\n\t\t\tgap := targt.Sub(now)\n\t\t\tklog.V(4).Infof(\"For %#v in thread %d, target time is %s, now is %s; sleeping %s\\n\", iByPhase, thd, targt, now, gap)\n\t\t\ttime.Sleep(gap)\n\t\t} else {\n\t\t\tklog.V(4).Infof(\"For %#v in thread %d, target time is %s, now is %s; no sleep\\n\", iByPhase, thd, targt, now)\n\t\t}\n\t\tphase := lastPhase\n\t\tfor ; phase > 0 && iByPhase[phase-1] < iByPhase[phase]+lag && iByPhase[phase-1] < n; phase-- {\n\t\t}\n\t\tif iByPhase[phase] == 0 {\n\t\t\tklog.V(3).Infof(\"Thread %d doing first at phase %d\\n\", thd, phase)\n\t\t}\n\t\ti := iByPhase[phase]*stride + thd\n\t\tiByPhase[phase] += 1\n\t\tiSum += 1\n\t\tobjname := fmt.Sprintf(namefmt, runID, i)\n\t\tti0 := time.Now()\n\t\tif phase == 0 {\n\t\t\tobj := &kubecorev1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: objname,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tLabels: map[string]string{\"purpose\": \"scaletest\"},\n\t\t\t\t\tAnnotations: map[string]string{CreateTimestampAnnotation: ti0.Format(CreateTimestampLayout)},\n\t\t\t\t},\n\t\t\t\tData: map[string]string{\"foo\": createValue},\n\t\t\t}\n\t\t\tvar err error\n\t\t\tvar retObj *kubecorev1.ConfigMap\n\t\t\tif hackCreate {\n\t\t\t\tvar result kubecorev1.ConfigMap\n\t\t\t\tretObj = &result\n\t\t\t\tcv1 := clientset.CoreV1().(*corev1.CoreV1Client)\n\t\t\t\trc := cv1.RESTClient()\n\t\t\t\terr = rc.Post().\n\t\t\t\t\tNamespace(namespace).\n\t\t\t\t\tResource(\"configmaps\").\n\t\t\t\t\tParam(\"r\", runID).\n\t\t\t\t\tParam(\"i\", strconv.FormatUint(i, 10)).\n\t\t\t\t\t//\tVersionedParams(\n\t\t\t\t\t//\t\t&metav1.ListOptions{TimeoutSeconds: &toSecs},\n\t\t\t\t\t//\t\tscheme.ParameterCodec).\n\t\t\t\t\tBody(obj).\n\t\t\t\t\tDo(context.Background()).\n\t\t\t\t\tInto(retObj)\n\t\t\t} else {\n\t\t\t\tretObj, err = clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), obj, metav1.CreateOptions{FieldManager: \"cmdriverclosed\"})\n\t\t\t}\n\t\t\ttif := time.Now()\n\t\t\twritelog(\"create\", obj.Name, ti0, tif, csvFile, err)\n\t\t\tif err != nil {\n\t\t\t\tatomic.AddInt64(&createErrors, 1)\n\t\t\t\tklog.V(6).Infof(\"Create failed: %#+v\\n\", err)\n\t\t\t} else if i == 1 {\n\t\t\t\tvar buf []byte\n\t\t\t\tvar err error\n\t\t\t\tbuf, err = retObj.Marshal()\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Warningf(\"Marshaling returned object %#+v threw %#+v\\n\", retObj, err)\n\t\t\t\t} else {\n\t\t\t\t\tcreatedObjLen = len(buf)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if phase < lastPhase {\n\t\t\tti0s := ti0.Format(CreateTimestampLayout)\n\t\t\tdelta := fmt.Sprintf(deltaFmt, phase, ti0s)\n\t\t\tretObj, err := clientset.CoreV1().ConfigMaps(namespace).Patch(context.Background(), objname, types.StrategicMergePatchType, []byte(delta), metav1.PatchOptions{FieldManager: \"cmdriverclosed\"})\n\t\t\ttif := time.Now()\n\t\t\twritelog(\"update\", objname, ti0, tif, csvFile, err)\n\t\t\tif err != nil {\n\t\t\t\tatomic.AddInt64(&updateErrors, 1)\n\t\t\t} else if i == 1 && phase == 1 {\n\t\t\t\tvar buf []byte\n\t\t\t\tvar err error\n\t\t\t\tbuf, err = retObj.Marshal()\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Warningf(\"Marshaling returned object %#+v threw %#+v\\n\", retObj, err)\n\t\t\t\t} else {\n\t\t\t\t\tupdatedObjLen = len(buf)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tdelopts := metav1.DeleteOptions{}\n\t\t\terr := clientset.CoreV1().ConfigMaps(namespace).Delete(context.Background(), objname, delopts)\n\t\t\ttif := time.Now()\n\t\t\twritelog(\"delete\", objname, ti0, tif, csvFile, err)\n\t\t\tif err != nil {\n\t\t\t\tatomic.AddInt64(&deleteErrors, 1)\n\t\t\t}\n\t\t}\n\t}\n}",
"func postThread(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tsess, err := session(w, r)\n\tif err != nil {\n\t\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n\t} else {\n\t\terr = r.ParseForm()\n\t\tif err != nil {\n\t\t\tdanger(err, \"Cannot parse form\")\n\t\t}\n\t\tuser, err := sess.User()\n\t\tif err != nil {\n\t\t\tdanger(err, \"Cannot get user from session\")\n\t\t}\n\t\tbody := r.PostFormValue(\"body\")\n\t\tuuid := r.PostFormValue(\"uuid\")\n\n\t\tthread, err := data.ThreadByUUID(uuid)\n\t\tif err != nil {\n\t\t\terror_message(w, r, ps, \"Cannot read thread\")\n\t\t}\n\t\tif _, err := user.CreatePost(thread, body); err != nil {\n\t\t\tdanger(err, \"Cannot created post\")\n\t\t}\n\t\turl := fmt.Sprint(\"/thread/read?id=\", uuid)\n\t\thttp.Redirect(w, r, url, http.StatusFound)\n\t}\n}",
"func main() {\n\tlambdaFunc := func() { fmt.Println(\"Lambda Function!\") }\n\tgo lambdaFunc()\n\tgo func() { fmt.Println(\"Threaded Lambda without a pointer!\") }()\n\tgo func(m string) { fmt.Println(m) }(\"Threaded lambda with an argument!\")\n\ttime.Sleep(250)\n}",
"func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {\n\tif iscgo {\n\t\t// In cgo, we are not aware of threads created in C, so this approach will not work.\n\t\tpanic(\"doAllThreadsSyscall not supported with cgo enabled\")\n\t}\n\n\t// STW to guarantee that user goroutines see an atomic change to thread\n\t// state. Without STW, goroutines could migrate Ms while change is in\n\t// progress and e.g., see state old -> new -> old -> new.\n\t//\n\t// N.B. Internally, this function does not depend on STW to\n\t// successfully change every thread. It is only needed for user\n\t// expectations, per above.\n\tstopTheWorld(\"doAllThreadsSyscall\")\n\n\t// This function depends on several properties:\n\t//\n\t// 1. All OS threads that already exist are associated with an M in\n\t// allm. i.e., we won't miss any pre-existing threads.\n\t// 2. All Ms listed in allm will eventually have an OS thread exist.\n\t// i.e., they will set procid and be able to receive signals.\n\t// 3. OS threads created after we read allm will clone from a thread\n\t// that has executed the system call. i.e., they inherit the\n\t// modified state.\n\t//\n\t// We achieve these through different mechanisms:\n\t//\n\t// 1. Addition of new Ms to allm in allocm happens before clone of its\n\t// OS thread later in newm.\n\t// 2. newm does acquirem to avoid being preempted, ensuring that new Ms\n\t// created in allocm will eventually reach OS thread clone later in\n\t// newm.\n\t// 3. We take allocmLock for write here to prevent allocation of new Ms\n\t// while this function runs. Per (1), this prevents clone of OS\n\t// threads that are not yet in allm.\n\tallocmLock.lock()\n\n\t// Disable preemption, preventing us from changing Ms, as we handle\n\t// this M specially.\n\t//\n\t// N.B. STW and lock() above do this as well, this is added for extra\n\t// clarity.\n\tacquirem()\n\n\t// N.B. allocmLock also prevents concurrent execution of this function,\n\t// serializing use of perThreadSyscall, mp.needPerThreadSyscall, and\n\t// ensuring all threads execute system calls from multiple calls in the\n\t// same order.\n\n\tr1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)\n\tif GOARCH == \"ppc64\" || GOARCH == \"ppc64le\" {\n\t\t// TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.\n\t\tr2 = 0\n\t}\n\tif errno != 0 {\n\t\treleasem(getg().m)\n\t\tallocmLock.unlock()\n\t\tstartTheWorld()\n\t\treturn r1, r2, errno\n\t}\n\n\tperThreadSyscall = perThreadSyscallArgs{\n\t\ttrap: trap,\n\t\ta1: a1,\n\t\ta2: a2,\n\t\ta3: a3,\n\t\ta4: a4,\n\t\ta5: a5,\n\t\ta6: a6,\n\t\tr1: r1,\n\t\tr2: r2,\n\t}\n\n\t// Wait for all threads to start.\n\t//\n\t// As described above, some Ms have been added to allm prior to\n\t// allocmLock, but not yet completed OS clone and set procid.\n\t//\n\t// At minimum we must wait for a thread to set procid before we can\n\t// send it a signal.\n\t//\n\t// We take this one step further and wait for all threads to start\n\t// before sending any signals. This prevents system calls from getting\n\t// applied twice: once in the parent and once in the child, like so:\n\t//\n\t// A B C\n\t// add C to allm\n\t// doAllThreadsSyscall\n\t// allocmLock.lock()\n\t// signal B\n\t// <receive signal>\n\t// execute syscall\n\t// <signal return>\n\t// clone C\n\t// <thread start>\n\t// set procid\n\t// signal C\n\t// <receive signal>\n\t// execute syscall\n\t// <signal return>\n\t//\n\t// In this case, thread C inherited the syscall-modified state from\n\t// thread B and did not need to execute the syscall, but did anyway\n\t// because doAllThreadsSyscall could not be sure whether it was\n\t// required.\n\t//\n\t// Some system calls may not be idempotent, so we ensure each thread\n\t// executes the system call exactly once.\n\tfor mp := allm; mp != nil; mp = mp.alllink {\n\t\tfor atomic.Load64(&mp.procid) == 0 {\n\t\t\t// Thread is starting.\n\t\t\tosyield()\n\t\t}\n\t}\n\n\t// Signal every other thread, where they will execute perThreadSyscall\n\t// from the signal handler.\n\tgp := getg()\n\ttid := gp.m.procid\n\tfor mp := allm; mp != nil; mp = mp.alllink {\n\t\tif atomic.Load64(&mp.procid) == tid {\n\t\t\t// Our thread already performed the syscall.\n\t\t\tcontinue\n\t\t}\n\t\tmp.needPerThreadSyscall.Store(1)\n\t\tsignalM(mp, sigPerThreadSyscall)\n\t}\n\n\t// Wait for all threads to complete.\n\tfor mp := allm; mp != nil; mp = mp.alllink {\n\t\tif mp.procid == tid {\n\t\t\tcontinue\n\t\t}\n\t\tfor mp.needPerThreadSyscall.Load() != 0 {\n\t\t\tosyield()\n\t\t}\n\t}\n\n\tperThreadSyscall = perThreadSyscallArgs{}\n\n\treleasem(getg().m)\n\tallocmLock.unlock()\n\tstartTheWorld()\n\n\treturn r1, r2, errno\n}",
"func NewSimple(f func()) *Thread {\n\treturn makeThread(\n\t\tcontext.Background(),\n\t\tSimple,\n\t\tf,\n\t)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TestThreads is where we do this, no need to export above
|
func TestThreads() {
thread("direct")
go thread("goroutine")
//anonymous function
go func(msg string) {
fmt.Println(msg)
}("go routine")
fmt.Scanln() // take input
fmt.Println("Scanln() done...")
}
|
[
"func TestGetThreads(t *testing.T) {\n\towner, _ := createTestUser(t)\n\tmember1, _ := createTestUser(t)\n\tmember2, _ := createTestUser(t)\n\tnonmember, _ := createTestUser(t)\n\tthread := createTestThread(t, &owner, []*models.User{&member1, &member2})\n\n\ttype test struct {\n\t\tAuthHeader map[string]string\n\t\tOutCode int\n\t\tIsThreadInRes bool\n\t}\n\n\ttests := []test{\n\t\t{AuthHeader: getAuthHeader(owner.Token), OutCode: http.StatusOK, IsThreadInRes: true},\n\t\t{AuthHeader: getAuthHeader(member1.Token), OutCode: http.StatusOK, IsThreadInRes: true},\n\t\t{AuthHeader: getAuthHeader(member2.Token), OutCode: http.StatusOK, IsThreadInRes: true},\n\t\t{AuthHeader: getAuthHeader(nonmember.Token), OutCode: http.StatusOK, IsThreadInRes: false},\n\t\t{AuthHeader: map[string]string{\"boop\": \"beep\"}, OutCode: http.StatusUnauthorized, IsThreadInRes: false},\n\t}\n\n\tfor _, testCase := range tests {\n\t\t_, rr, respData := thelpers.TestEndpoint(t, tc, th, \"GET\", \"/threads\", nil, testCase.AuthHeader)\n\n\t\tthelpers.AssertStatusCodeEqual(t, rr, testCase.OutCode)\n\n\t\tif testCase.OutCode >= 400 {\n\t\t\tcontinue\n\t\t}\n\n\t\tgotThreads := respData[\"threads\"].([]interface{})\n\n\t\tif testCase.IsThreadInRes {\n\t\t\tthelpers.AssetObjectsContainKeys(t, \"id\", []string{thread.ID}, gotThreads)\n\t\t\tthelpers.AssetObjectsContainKeys(t, \"subject\", []string{thread.Subject}, gotThreads)\n\n\t\t\tgotThread := gotThreads[0].(map[string]interface{})\n\n\t\t\tgotThreadUsers := gotThread[\"users\"].([]interface{})\n\t\t\tthelpers.AssetObjectsContainKeys(t, \"id\", []string{owner.ID, member1.ID, member2.ID}, gotThreadUsers)\n\t\t\tthelpers.AssetObjectsContainKeys(t, \"fullName\", []string{owner.FullName, member1.FullName, member2.FullName}, gotThreadUsers)\n\n\t\t\tgotThreadOwner := gotThread[\"owner\"].(map[string]interface{})\n\t\t\tthelpers.AssertEqual(t, gotThreadOwner[\"id\"], thread.Owner.ID)\n\t\t\tthelpers.AssertEqual(t, gotThreadOwner[\"fullName\"], thread.Owner.FullName)\n\t\t} else {\n\t\t\tthelpers.AssetObjectsContainKeys(t, \"id\", []string{}, gotThreads)\n\t\t}\n\t}\n}",
"func newTestThread(startError, stopError error) *testThread {\n\treturn &testThread{\n\t\tdone: make(chan struct{}),\n\t\tstartError: startError,\n\t\tstopError: stopError,\n\t}\n}",
"func (s *LrpcTestSuite) TestConcurrent() {\n\n\tt := s.T()\n\n\tvar wg sync.WaitGroup\n\n\tstartTime := time.Now()\n\n\t// send 3 different sleep requests of 1, 2, 3 seconds, this function should\n\t// complete a little bit over 3 seconds\n\twg.Add(3)\n\tgo s.doSleep(&wg, uint32(1))\n\tgo s.doSleep(&wg, uint32(2))\n\tgo s.doSleep(&wg, uint32(3))\n\twg.Wait()\n\n\telapsed := time.Since(startTime)\n\tt.Logf(\"Elapsed time: %v\\n\", elapsed)\n\ts.Assert().Greater(elapsed, 3*time.Second, \"Should be longer than the longest parallel tests\")\n\ts.Assert().Less(elapsed, 4*time.Second, \"concurrent requests are not served in parallel\")\n\n}",
"func TestThreadStats(t *testing.T) {\n\tc, err := NewCollection(schedtestcommon.TestTrace1(t), NormalizeTimestamps(false))\n\tif err != nil {\n\t\tt.Fatalf(\"Broken collection, can't proceed: `%s'\", err)\n\t}\n\ttests := []struct {\n\t\tdescription string\n\t\tPIDs []PID\n\t\tCPUs []CPUID\n\t\tstartTimestamp trace.Timestamp\n\t\tendTimestamp trace.Timestamp\n\t\twantThreadStats *ThreadStatistics\n\t}{{\n\t\tdescription: \"everything\",\n\t\tstartTimestamp: trace.UnknownTimestamp,\n\t\tendTimestamp: trace.UnknownTimestamp,\n\t\twantThreadStats: &ThreadStatistics{\n\t\t\tWaitTime: 80, // 10 (PID 100) + 60 (PID 200) + 10 (PID 300)\n\t\t\tPostWakeupWaitTime: 80, // All waits\n\t\t\tRunTime: 200, // All the time on CPUs 1 and 2\n\t\t\tSleepTime: 120, // 40 (PID 200) + 80 (PID 300)\n\t\t\tWakeups: 5, // PID 100 at start and end, PID 200 at 1040, PID 300 at 1090, PID 400 at end\n\t\t\tMigrations: 1, // PID 200\n\t\t},\n\t}, {\n\t\tdescription: \"CPU 2\",\n\t\tCPUs: []CPUID{2},\n\t\tstartTimestamp: trace.UnknownTimestamp,\n\t\tendTimestamp: trace.UnknownTimestamp,\n\t\twantThreadStats: &ThreadStatistics{\n\t\t\tWaitTime: 20, // PID 200\n\t\t\tPostWakeupWaitTime: 20,\n\t\t\tRunTime: 100,\n\t\t\tSleepTime: 0,\n\t\t\tWakeups: 2, // PID 400 at end, PID 200 at 1080 when it arrived\n\t\t\tMigrations: 0, // No migrations among only 1 CPU\n\t\t},\n\t}, {\n\t\tdescription: \"PID 200\",\n\t\tPIDs: []PID{200},\n\t\tstartTimestamp: trace.UnknownTimestamp,\n\t\tendTimestamp: trace.UnknownTimestamp,\n\t\twantThreadStats: &ThreadStatistics{\n\t\t\tWaitTime: 60,\n\t\t\tPostWakeupWaitTime: 60,\n\t\t\tRunTime: 0,\n\t\t\tSleepTime: 40,\n\t\t\tWakeups: 1, // At 1040\n\t\t\tMigrations: 1,\n\t\t},\n\t}, {\n\t\tdescription: \"Time filtered\",\n\t\tstartTimestamp: 1045,\n\t\tendTimestamp: 1090,\n\t\twantThreadStats: &ThreadStatistics{\n\t\t\tWaitTime: 45, // PID 200\n\t\t\tPostWakeupWaitTime: 45, // All waits\n\t\t\tRunTime: 90, // all the time on CPUs 1 and 2\n\t\t\tSleepTime: 45, // PID 300\n\t\t\tWakeups: 2, // PID 200 at its initial point, PID 300 at 1090\n\t\t\tMigrations: 1, // PID 200\n\t\t},\n\t}}\n\tfor _, test := range tests {\n\t\tt.Run(test.description, func(t *testing.T) {\n\t\t\tgotThreadStats, err := c.ThreadStats(PIDs(test.PIDs...), CPUs(test.CPUs...), TimeRange(test.startTimestamp, test.endTimestamp))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Unexpected error %s\", err)\n\t\t\t}\n\t\t\tdiff := cmp.Diff(gotThreadStats, test.wantThreadStats)\n\t\t\tif len(diff) > 0 {\n\t\t\t\tt.Errorf(\"c.ThreadStats() = %v, diff(got->want) %v\", gotThreadStats, diff)\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestThreadedRegister(t *testing.T) {\n\t// dummyChannel for use for test\n\tdummyChannel := make(chan []byte)\n\tdefer func() {\n\t\t// cleanup the dummyChannel at the end of the test\n\t\tclose(dummyChannel)\n\t}()\n\n\tfor _, testVal := range TestThreadedRegisterProvider {\n\t\tt.Run(testVal.testName, func(t *testing.T) {\n\t\t\treg := GetRegistryProvider()\n\t\t\tvar wg sync.WaitGroup\n\n\t\t\tregisterThread := func(wg *sync.WaitGroup, clientIDs []identifier.Client) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor _, inputClientID := range clientIDs {\n\t\t\t\t\treg.Register(dummyChannel, inputClientID)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tIDLength := len(testVal.inputClientIDs)\n\t\t\twg.Add(1)\n\t\t\tgo registerThread(&wg, testVal.inputClientIDs[0:IDLength/2])\n\t\t\twg.Add(1)\n\t\t\tgo registerThread(&wg, testVal.inputClientIDs[IDLength/2:])\n\n\t\t\t// Will wait for the above two threads to finish\n\t\t\twg.Wait()\n\n\t\t\t// now check to see if all the clients are in the map\n\t\t\tfor _, inputClientID := range testVal.inputClientIDs {\n\t\t\t\t_, ok := reg.lookupMap[inputClientID.ClientUUID]\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\n\t\t\t\t\t\t\"unable to find: hubID: %v, clientID: %v\",\n\t\t\t\t\t\tinputClientID.HubName.HubName,\n\t\t\t\t\t\tinputClientID.ClientUUID.UUID,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestParallel(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\texpected map[string]bool\n\t\tsleepDuration time.Duration\n\t}{\n\t\t{\n\t\t\tname: \"simple test\",\n\t\t\texpected: map[string]bool{\"dev1\": true, \"dev2\": true, \"dev3\": true, \"dev4\": true},\n\t\t\tsleepDuration: 200 * time.Millisecond,\n\t\t},\n\t}\n\n\ttestHosts := map[string]*gornir.Host{\n\t\t\"dev1\": {Hostname: \"dev1\"},\n\t\t\"dev2\": {Hostname: \"dev2\"},\n\t\t\"dev3\": {Hostname: \"dev3\"},\n\t\t\"dev4\": {Hostname: \"dev4\"},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tresults := make(chan *gornir.JobResult, len(testHosts))\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\trnr := runner.Parallel()\n\t\t\tstartTime := time.Now()\n\t\t\tif err := rnr.Run(\n\t\t\t\tcontext.Background(),\n\t\t\t\tNewNullLogger(),\n\t\t\t\tmake(gornir.Processors, 0),\n\t\t\t\t&testTaskSleep{sleepDuration: tc.sleepDuration},\n\t\t\t\ttestHosts,\n\t\t\t\tresults,\n\t\t\t); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := rnr.Wait(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tclose(results)\n\n\t\t\t// let's process the results and turn it into a map so we can\n\t\t\t// compare with our expected value\n\t\t\tgot := make(map[string]bool)\n\t\t\tfor res := range results {\n\t\t\t\tgot[res.Host().Hostname] = res.Data().(testTaskSleepResults).success\n\t\t\t}\n\t\t\tif !cmp.Equal(got, tc.expected) {\n\t\t\t\tt.Error(cmp.Diff(got, tc.expected))\n\t\t\t}\n\t\t\t// now we check test took what we expected\n\t\t\tif time.Since(startTime) > (tc.sleepDuration + time.Millisecond*100) {\n\t\t\t\tt.Errorf(\"test took to long, parallelization might not be working: %v\\n\", time.Since(startTime).Seconds())\n\t\t\t}\n\t\t})\n\t}\n}",
"func runTest(t *testing.T, c testConfig) {\n\tfmt.Printf(\"Testing %v jobs on %v workers with jobCap of %v and useDocker %v\\n\", c.jobs, c.workers, c.jobCap, c.useDocker)\n\td := NewDispatcher(c.workers, c.jobCap)\n\n\tvar enqueueError error\n\n\t// wg waits for all the jobs to finish.\n\tvar wg sync.WaitGroup\n\n\t// Start all the jobs.\n\tfor i := 0; i < c.jobs; i++ {\n\t\tres := newMockResponseEventSink()\n\t\tjob := NewJob(mockTestBody, res, c.maxSize, c.maxTime, c.useDocker, c.memLimit)\n\n\t\tresultChan, err := d.Enqueue(job)\n\t\tif err != nil {\n\t\t\tenqueueError = err\n\t\t\tbreak\n\t\t}\n\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tr := <-resultChan\n\t\t\tassertExpectedResult(t, c, r)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tif !c.expectEnqueueFail && enqueueError != nil {\n\t\tt.Fatalf(\"Enqueue failed: %v. Test config: %#v\", enqueueError, c)\n\t}\n\n\tif c.expectEnqueueFail && enqueueError == nil {\n\t\tt.Fatalf(\"Expected Enqueue to fail but it did not. Test config: %#v\", c)\n\t}\n\n\t// Test must finish in 30 seconds.\n\ttimeout := time.After(30 * time.Second)\n\n\tjobsFinished := make(chan bool)\n\n\tgo func() {\n\t\twg.Wait()\n\t\tjobsFinished <- true\n\t}()\n\n\t// Wait for timeout or all jobs to finish.\n\tselect {\n\tcase <-timeout:\n\t\tt.Fatalf(\"Expected jobs to complete but got timeout. Test config: %#v\", c)\n\tcase <-jobsFinished:\n\t}\n\n\td.Stop()\n}",
"func (tk *AsyncTestKit) ConcurrentRun(\n\tconcurrent int,\n\tloops int,\n\tprepareFunc func(ctx context.Context, tk *AsyncTestKit, concurrent int, currentLoop int) [][][]interface{},\n\twriteFunc func(ctx context.Context, tk *AsyncTestKit, input [][]interface{}),\n\tcheckFunc func(ctx context.Context, tk *AsyncTestKit),\n) {\n\tchannel := make([]chan [][]interface{}, concurrent)\n\tcontextList := make([]context.Context, concurrent)\n\tdoneList := make([]context.CancelFunc, concurrent)\n\n\tfor i := 0; i < concurrent; i++ {\n\t\tw := i\n\t\tchannel[w] = make(chan [][]interface{}, 1)\n\t\tcontextList[w], doneList[w] = context.WithCancel(context.Background())\n\t\tcontextList[w] = tk.OpenSession(contextList[w], \"test\")\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tr := recover()\n\t\t\t\ttk.require.Nil(r, string(GetStack()))\n\t\t\t\tdoneList[w]()\n\t\t\t}()\n\n\t\t\tfor input := range channel[w] {\n\t\t\t\twriteFunc(contextList[w], tk, input)\n\t\t\t}\n\t\t}()\n\t}\n\n\tdefer func() {\n\t\tfor i := 0; i < concurrent; i++ {\n\t\t\ttk.CloseSession(contextList[i])\n\t\t}\n\t}()\n\n\tctx := tk.OpenSession(context.Background(), \"test\")\n\tdefer tk.CloseSession(ctx)\n\ttk.MustExec(ctx, \"use test\")\n\n\tfor j := 0; j < loops; j++ {\n\t\tdata := prepareFunc(ctx, tk, concurrent, j)\n\t\tfor i := 0; i < concurrent; i++ {\n\t\t\tchannel[i] <- data[i]\n\t\t}\n\t}\n\n\tfor i := 0; i < concurrent; i++ {\n\t\tclose(channel[i])\n\t}\n\n\tfor i := 0; i < concurrent; i++ {\n\t\t<-contextList[i].Done()\n\t}\n\tcheckFunc(ctx, tk)\n}",
"func runTests(inventory Inventory, opts TestOpts) (errs int) {\n\n\tinput := make(chan Job)\n\toutput := make(chan Job)\n\tdone := make(chan bool, len(inventory[\"images\"]))\n\n\tfor i := 0; i < opts.Threads; i++ {\n\t\tgo testWorker(input, output)\n\t}\n\n\tgo reporter(output, done)\n\n\tfor i, image := range inventory[\"images\"] {\n\t\tinput <- Job{\n\t\t\tImage: image,\n\t\t\tRetries: opts.Retries,\n\t\t\tId: i,\n\t\t}\n\t}\n\n\terrs = 0\n\tfor i := 0; i < len(inventory[\"images\"]); i++ {\n\t\tif <-done == false {\n\t\t\terrs++\n\t\t}\n\t}\n\n\treturn\n}",
"func TestTableParallel(t *testing.T) {\n\ttcs := []struct {\n\t\tname string\n\t\tsleep time.Duration\n\t}{\n\t\t{name: \"1s\", sleep: 1 * time.Second},\n\t\t{name: \"2s\", sleep: 2 * time.Second},\n\t\t{name: \"3s\", sleep: 3 * time.Second},\n\t}\n\t// start_for_TestTableParallel OMIT\n\tfor _, tc := range tcs { // HLsharing\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel() // HL\n\t\t\tt.Log(\"running \", tc.name) // HLsharing\n\t\t\ttime.Sleep(tc.sleep)\n\t\t})\n\t}\n\t// end_for_TestTableParallel OMIT\n}",
"func (s *Suite) TestConcurrentExecution() {\n\tsts := SimpleTestSetup{\n\t\tNamespaceName: \"TestConcurrentExecution\",\n\t\tWorkSpecName: \"spec\",\n\t}\n\tsts.SetUp(s)\n\tdefer sts.TearDown(s)\n\n\tnumUnits := 100\n\ts.createWorkUnits(sts.WorkSpec, numUnits)\n\tresults := make(chan map[string]string, 8)\n\tpanics := pooled(func() {\n\t\tworker := createWorker(sts.Namespace)\n\t\tme := worker.Name()\n\t\tdone := make(map[string]string)\n\t\tfor {\n\t\t\tattempts, err := worker.RequestAttempts(coordinate.AttemptRequest{})\n\t\t\tif !s.NoError(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(attempts) == 0 {\n\t\t\t\tresults <- done\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, attempt := range attempts {\n\t\t\t\tdone[attempt.WorkUnit().Name()] = me\n\t\t\t\terr = attempt.Finish(nil)\n\t\t\t\tif !s.NoError(err) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\ts.Empty(panics)\n\n\tclose(results)\n\tallResults := make(map[string]string)\n\tfor result := range results {\n\t\tfor name, seq := range result {\n\t\t\tif other, dup := allResults[name]; dup {\n\t\t\t\ts.Fail(\"duplicate work unit\",\n\t\t\t\t\t\"work unit %v done by both %v and %v\", name, other, seq)\n\t\t\t} else {\n\t\t\t\tallResults[name] = seq\n\t\t\t}\n\t\t}\n\t}\n\tfor i := 0; i < numUnits; i++ {\n\t\tname := fmt.Sprintf(\"u%v\", i)\n\t\ts.Contains(allResults, name,\n\t\t\t\"work unit %v not done by anybody\", name)\n\t}\n}",
"func (t *FFXTester) TestMultiple(ctx context.Context, tests []testsharder.Test, stdout, stderr io.Writer, outDir string) ([]*TestResult, error) {\n\tvar testDefs []build.TestListEntry\n\ttestsByURL := make(map[string]testsharder.Test)\n\tfor _, test := range tests {\n\t\tvar numRuns int\n\t\tif test.RunAlgorithm == testsharder.KeepGoing {\n\t\t\tnumRuns = test.Runs\n\t\t} else {\n\t\t\t// StopOnFailure and StopOnSuccess are used to determine retries, which are not\n\t\t\t// supported yet with ffx. Retries are now run at the end after all tests have\n\t\t\t// run, so they can just be rerun with another call to Test() for StopOnSuccess.\n\t\t\t// StopOnFailure is used for multiplier shards to run as many times as test.Runs\n\t\t\t// or until it gets a failure. This will need to be supported in ffx so that we\n\t\t\t// can use the multiple test feature in ffx to run multiplier tests.\n\t\t\tnumRuns = 1\n\t\t}\n\t\ttestsByURL[test.PackageURL] = test\n\n\t\tfor i := 0; i < numRuns; i++ {\n\t\t\ttestDefs = append(testDefs, build.TestListEntry{\n\t\t\t\tName: test.PackageURL,\n\t\t\t\tLabels: []string{test.Label},\n\t\t\t\tExecution: build.ExecutionDef{\n\t\t\t\t\tType: \"fuchsia_component\",\n\t\t\t\t\tComponentURL: test.PackageURL,\n\t\t\t\t\tTimeoutSeconds: int(test.Timeout.Seconds()),\n\t\t\t\t\tParallel: test.Parallel,\n\t\t\t\t\tMaxSeverityLogs: test.LogSettings.MaxSeverity,\n\t\t\t\t},\n\t\t\t\tTags: test.Tags,\n\t\t\t})\n\t\t}\n\t}\n\tt.ffx.SetStdoutStderr(stdout, stderr)\n\tdefer t.ffx.SetStdoutStderr(os.Stdout, os.Stderr)\n\n\textraArgs := []string{\"--filter-ansi\"}\n\tif t.experimentLevel == 3 {\n\t\textraArgs = append(extraArgs, \"--experimental-parallel-execution\", \"8\")\n\t}\n\tstartTime := clock.Now(ctx)\n\trunResult, err := t.ffx.Test(ctx, build.TestList{Data: testDefs, SchemaID: build.TestListSchemaIDExperimental}, outDir, extraArgs...)\n\tif err != nil {\n\t\treturn []*TestResult{}, err\n\t}\n\treturn t.processTestResult(runResult, testsByURL, clock.Now(ctx).Sub(startTime))\n}",
"func TestMultiPutStress(t *testing.T) {\n\n}",
"func TestConcurrentSafety(t *testing.T) {\n\tworkerCount := 10\n\tdoneChan := make(chan struct{})\n\n\tfor i := 0; i < workerCount; i++ {\n\t\tgo func() {\n\t\t\tfor j := 0; j < 1000; j++ {\n\t\t\t\tFirstName()\n\t\t\t\tLastName()\n\t\t\t\tGender()\n\t\t\t\tFullName()\n\t\t\t\tDay()\n\t\t\t\tCountry()\n\t\t\t\tCompany()\n\t\t\t\tIndustry()\n\t\t\t\tStreet()\n\t\t\t}\n\t\t\tdoneChan <- struct{}{}\n\t\t}()\n\t}\n\n\tfor i := 0; i < workerCount; i++ {\n\t\t<-doneChan\n\t}\n}",
"func TestAllTasksLaunchParallel(t *testing.T) {\n\n\tworkTime := time.Second\n\ttasks := []Task{func() error {\n\t\ttime.Sleep(workTime)\n\t\treturn nil\n\t}, func() error {\n\t\ttime.Sleep(workTime)\n\t\treturn nil\n\t}, func() error {\n\t\ttime.Sleep(workTime)\n\t\treturn nil\n\t}, func() error {\n\t\ttime.Sleep(workTime)\n\t\treturn nil\n\t}}\n\tthreadCount := 4\n\terrorLimit := 1\n\tstartTime := time.Now()\n\terr := Run(tasks, threadCount, errorLimit)\n\tendTime := time.Now()\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, int(workTime.Seconds()), endTime.Second()-startTime.Second())\n}",
"func initTests(c *C, p string) error {\n\tfor i, t := range c.Tests {\n\t\ttk, err := task.New(t.TaskName, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Tests[i].Task = *tk\n\t}\n\treturn nil\n}",
"func TestMultiAttachUesInConcurrencyWithGNBs(numberGNBs int) {\n\n\tvar wg sync.WaitGroup\n\n\tcfg, err := config.GetConfig()\n\tif err != nil {\n\t\tlog.Fatal(\"Error in get configuration\")\n\t}\n\n\tlog.Info(\"Testing attached with \", numberGNBs, \" ues in different GNBs\")\n\tlog.Info(\"[CORE]\", cfg.AMF.Name, \" Core in Testing\")\n\n\tranPort := cfg.GNodeB.ControlIF.Port\n\n\t// Launch several goroutines and increment the WaitGroup counter for each.\n\tfor i := 1; i <= numberGNBs; i++ {\n\t\timsi := control_test_engine.ImsiGenerator(i)\n\t\twg.Add(1)\n\t\tgo attachUeWithGNB(imsi, cfg, int64(i), &wg, ranPort)\n\t\tranPort++\n\t\t// time.Sleep(10* time.Millisecond)\n\t}\n\n\t// wait for multiple goroutines.\n\twg.Wait()\n\n\t// function worked fine.\n\t//return nil\n}",
"func TestMychan(t *testing.T) {\n\tfmt.Println(\"test method : test mychan \")\n}",
"func TestTableParallelFixed(t *testing.T) {\n\ttcs := []struct {\n\t\tname string\n\t\tsleep time.Duration\n\t}{\n\t\t{name: \"1s\", sleep: 1 * time.Second},\n\t\t{name: \"2s\", sleep: 2 * time.Second},\n\t\t{name: \"3s\", sleep: 3 * time.Second},\n\t}\n\t// start_for_TestTableParallelFixed OMIT\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\ttc := tc // rebidding tc so the goroutines will not share it // HL\n\t\t\tt.Parallel()\n\t\t\tt.Log(\"running \", tc.name)\n\t\t\ttime.Sleep(tc.sleep)\n\t\t})\n\t}\n\t// end_for_TestTableParallelFixed OMIT\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Builder returns a builder filling in the with prefix and Joins. It uses the default placeholder format for squirrel.SelectBuilder. To use with postgres as a main query, chain with `PlaceholderFormat`. Subqueries should use the default placeholder format. Ensure to add paging etc and sort. Conditions are automatically applied.
|
func (d *SelectBuilder) Builder(cols ...string) squirrel.SelectBuilder {
qry := squirrel.Select(cols...).From(d.From).
PrefixExpr(d.WithPrefix)
d.ApplyConditions(&qry)
d.ApplyJoin(&qry)
return qry
}
|
[
"func (q *BaseQuery) Builder() (*sq.SelectBuilder, error) {\n\tvar (\n\t\tfields []string\n\t\tselectedFields []SchemaField\n\t)\n\tif len(q.selectedFields) == 0 {\n\t\tselectedFields = q.Schema.Columns()\n\t} else {\n\t\tselectedFields = q.selectedFields\n\t}\n\tfields = make([]string, 0, len(selectedFields)-len(q.fieldsExcluded)) // All fields of the schema without the excluded fields.\n\n\t// Generate the fields list for selection\n\tfor _, schemaField := range selectedFields {\n\t\tif _, ok := q.fieldsExcluded[schemaField]; ok {\n\t\t\tcontinue // Skip field...\n\t\t}\n\t\tif aliasField, ok := schemaField.(AliasedSchemaField); ok {\n\t\t\tfields = append(fields, aliasField.Reference())\n\t\t\tcontinue\n\t\t}\n\t\tfields = append(fields, schemaField.QualifiedName(q.Schema))\n\t}\n\n\t// Format the table\n\ttableName := q.Schema.Table()\n\tif q.Schema.Alias() != \"\" {\n\t\ttableName += \" \" + q.Schema.Alias()\n\t}\n\n\t// Creates the initial select\n\tsqQuery := sq.Select(fields...).From(tableName)\n\n\tsqQuery.PlaceholderFormat(sq.Dollar) // TODO(jota): To parametrize it.\n\n\t// If we have conditions to be added ...\n\tif len(q.where) > 0 {\n\t\tq.applyConditions(sqQuery)\n\t}\n\n\t// Applies the limit\n\tif q.limit > 0 {\n\t\tsqQuery.Limit(q.limit)\n\t}\n\n\t// Applies the offset\n\tif q.offset > 0 {\n\t\tsqQuery.Offset(q.offset)\n\t}\n\n\tif len(q.order) > 0 {\n\t\tsqQuery.OrderBy(q.order...)\n\t}\n\n\tif q.forType != nil {\n\t\tsqQuery.For(*q.forType, q.forLockingType)\n\t}\n\treturn sqQuery, nil\n}",
"func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder {\n\treturn b.JoinClause(\"JOIN \"+join, rest...)\n}",
"func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder {\n\treturn b.PrefixExpr(Expr(sql, args...))\n}",
"func NewBuilder(table string) *Builder {\n\tb := Builder{\n\t\ttable: table,\n\t\tprocessor: &BuildProcessorDefault{},\n\t\tallowemptywhere: false,\n\t}\n\tb.fields = list.New()\n\treturn &b\n}",
"func (b *Builder) Join(table string, condition string, args ...string) *Builder {\n joinType := \"inner\"\n lenArgs := len(args)\n if lenArgs > 0 {\n joinType = args[0]\n }\n j := &join{table, condition, joinType}\n b.joins = append(b.joins, j)\n return b\n}",
"func (q SelectQuery) AppendSQL(buf *strings.Builder, args *[]interface{}, params map[string]int) {\n\t// WITH\n\tif !q.nested {\n\t\tappendCTEs(buf, args, q.CTEs, q.FromTable, q.JoinTables)\n\t}\n\t// SELECT\n\tif q.SelectType == \"\" {\n\t\tq.SelectType = SelectTypeDefault\n\t}\n\tbuf.WriteString(string(q.SelectType))\n\tif q.SelectType == SelectTypeDistinctOn {\n\t\tbuf.WriteString(\" (\")\n\t\tq.DistinctOn.AppendSQLExclude(buf, args, nil, nil)\n\t\tbuf.WriteString(\")\")\n\t}\n\tif len(q.SelectFields) > 0 {\n\t\tbuf.WriteString(\" \")\n\t\tq.SelectFields.AppendSQLExcludeWithAlias(buf, args, nil, nil)\n\t}\n\t// FROM\n\tif q.FromTable != nil {\n\t\tbuf.WriteString(\" FROM \")\n\t\tswitch v := q.FromTable.(type) {\n\t\tcase Query:\n\t\t\tbuf.WriteString(\"(\")\n\t\t\tv.NestThis().AppendSQL(buf, args, nil)\n\t\t\tbuf.WriteString(\")\")\n\t\tdefault:\n\t\t\tq.FromTable.AppendSQL(buf, args, nil)\n\t\t}\n\t\talias := q.FromTable.GetAlias()\n\t\tif alias != \"\" {\n\t\t\tbuf.WriteString(\" AS \")\n\t\t\tbuf.WriteString(alias)\n\t\t}\n\t}\n\t// JOIN\n\tif len(q.JoinTables) > 0 {\n\t\tbuf.WriteString(\" \")\n\t\tq.JoinTables.AppendSQL(buf, args, nil)\n\t}\n\t// WHERE\n\tif len(q.WherePredicate.Predicates) > 0 {\n\t\tbuf.WriteString(\" WHERE \")\n\t\tq.WherePredicate.toplevel = true\n\t\tq.WherePredicate.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// GROUP BY\n\tif len(q.GroupByFields) > 0 {\n\t\tbuf.WriteString(\" GROUP BY \")\n\t\tq.GroupByFields.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// HAVING\n\tif len(q.HavingPredicate.Predicates) > 0 {\n\t\tbuf.WriteString(\" HAVING \")\n\t\tq.HavingPredicate.toplevel = true\n\t\tq.HavingPredicate.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// WINDOW\n\tif len(q.Windows) > 0 {\n\t\tbuf.WriteString(\" WINDOW \")\n\t\tq.Windows.AppendSQL(buf, args, nil)\n\t}\n\t// ORDER BY\n\tif len(q.OrderByFields) > 0 {\n\t\tbuf.WriteString(\" ORDER BY \")\n\t\tq.OrderByFields.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// LIMIT\n\tif q.LimitValue != nil {\n\t\tbuf.WriteString(\" LIMIT ?\")\n\t\tif *q.LimitValue < 0 {\n\t\t\t*q.LimitValue = -*q.LimitValue\n\t\t}\n\t\t*args = append(*args, *q.LimitValue)\n\t}\n\t// OFFSET\n\tif q.OffsetValue != nil {\n\t\tbuf.WriteString(\" OFFSET ?\")\n\t\tif *q.OffsetValue < 0 {\n\t\t\t*q.OffsetValue = -*q.OffsetValue\n\t\t}\n\t\t*args = append(*args, *q.OffsetValue)\n\t}\n\tif !q.nested {\n\t\tquery := buf.String()\n\t\tbuf.Reset()\n\t\tquestionToDollarPlaceholders(buf, query)\n\t\tif q.Log != nil {\n\t\t\tvar logOutput string\n\t\t\tswitch {\n\t\t\tcase Lstats&q.LogFlag != 0:\n\t\t\t\tlogOutput = \"\\n----[ Executing query ]----\\n\" + buf.String() + \" \" + fmt.Sprint(*args) +\n\t\t\t\t\t\"\\n----[ with bind values ]----\\n\" + questionInterpolate(query, *args...)\n\t\t\tcase Linterpolate&q.LogFlag != 0:\n\t\t\t\tlogOutput = questionInterpolate(query, *args...)\n\t\t\tdefault:\n\t\t\t\tlogOutput = buf.String() + \" \" + fmt.Sprint(*args)\n\t\t\t}\n\t\t\tswitch q.Log.(type) {\n\t\t\tcase *log.Logger:\n\t\t\t\t_ = q.Log.Output(q.logSkip+2, logOutput)\n\t\t\tdefault:\n\t\t\t\t_ = q.Log.Output(q.logSkip+1, logOutput)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (sqlbldr *Builder) StartWith( aSql string ) *Builder {\n\tsqlbldr.mySql = aSql\n\treturn sqlbldr\n}",
"func Build(q *Query) (query string, params []interface{}) {\n\tquery += q.padSpace(q.buildQuery())\n\tif q.typ == queryTypeRawQuery || q.typ == queryTypeExists {\n\t\treturn q.trim(query), q.params\n\t}\n\tquery += q.padSpace(q.buildAs())\n\tquery += q.padSpace(q.buildDuplicate())\n\tquery += q.padSpace(q.buildUnion())\n\tquery += q.padSpace(q.buildJoin())\n\tquery += q.padSpace(q.buildWhere())\n\tquery += q.padSpace(q.buildHaving())\n\tquery += q.padSpace(q.buildOrderBy())\n\tquery += q.padSpace(q.buildGroupBy())\n\tquery += q.padSpace(q.buildLimit())\n\tquery += q.padSpace(q.buildOffset())\n\tquery += q.padSpace(q.buildAfterQueryOptions())\n\treturn q.trim(query), q.params\n}",
"func NewBuilder() Builder {\n\treturn &builder{\n\t\tproduct: make(map[string]string),\n\t\tallFieldsPresent: true,\n\t}\n}",
"func (*Concrete) queryBuilder(db *gorm.DB, cond map[string]interface{}) *gorm.DB {\n\tif cond == nil {\n\t\treturn db\n\t}\n\n\tfor key, value := range cond {\n\t\tif c := condAnalyze(key); len(c) > 1 {\n\t\t\tswitch c[1] {\n\t\t\tcase NotIn:\n\t\t\t\tdb = db.Not(value)\n\t\t\tcase In:\n\t\t\t\tdb = db.Where(fmt.Sprintf(\"%s %s (?)\", c[0], c[1]), value)\n\t\t\tcase Like:\n\t\t\t\tdb = db.Where(fmt.Sprintf(\"%s %s ?\", c[0], c[1]), fmt.Sprintf(\"%%%s%%\", value))\n\t\t\tdefault:\n\t\t\t\tdb = db.Where(fmt.Sprintf(\"%s %s ?\", c[0], c[1]), value)\n\t\t\t}\n\t\t} else {\n\t\t\tdb = db.Where(fmt.Sprintf(\"%s = ?\", key), value)\n\t\t}\n\t}\n\treturn db\n}",
"func String(query string) Builder {\n\treturn sb.Prefix(query)\n}",
"func (b *SQLBuilder) From(src Source) {\n\tb.w.WriteLine(`FROM ` + b.SourceToSQL(src))\n}",
"func (ub *UnionBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) {\n\tbuf := newStringBuilder()\n\tub.injection.WriteTo(buf, unionMarkerInit)\n\n\tif len(ub.builders) > 0 {\n\t\tneedParen := flavor != SQLite\n\n\t\tif needParen {\n\t\t\tbuf.WriteLeadingString(\"(\")\n\t\t\tbuf.WriteString(ub.Var(ub.builders[0]))\n\t\t\tbuf.WriteRune(')')\n\t\t} else {\n\t\t\tbuf.WriteLeadingString(ub.Var(ub.builders[0]))\n\t\t}\n\n\t\tfor _, b := range ub.builders[1:] {\n\t\t\tbuf.WriteString(ub.opt)\n\n\t\t\tif needParen {\n\t\t\t\tbuf.WriteRune('(')\n\t\t\t}\n\n\t\t\tbuf.WriteString(ub.Var(b))\n\n\t\t\tif needParen {\n\t\t\t\tbuf.WriteRune(')')\n\t\t\t}\n\t\t}\n\t}\n\n\tub.injection.WriteTo(buf, unionMarkerAfterUnion)\n\n\tif len(ub.orderByCols) > 0 {\n\t\tbuf.WriteLeadingString(\"ORDER BY \")\n\t\tbuf.WriteString(strings.Join(ub.orderByCols, \", \"))\n\n\t\tif ub.order != \"\" {\n\t\t\tbuf.WriteRune(' ')\n\t\t\tbuf.WriteString(ub.order)\n\t\t}\n\n\t\tub.injection.WriteTo(buf, unionMarkerAfterOrderBy)\n\t}\n\n\tif ub.limit >= 0 {\n\t\tbuf.WriteLeadingString(\"LIMIT \")\n\t\tbuf.WriteString(strconv.Itoa(ub.limit))\n\n\t}\n\n\tif MySQL == flavor && ub.limit >= 0 || PostgreSQL == flavor {\n\t\tif ub.offset >= 0 {\n\t\t\tbuf.WriteLeadingString(\"OFFSET \")\n\t\t\tbuf.WriteString(strconv.Itoa(ub.offset))\n\t\t}\n\t}\n\n\tif ub.limit >= 0 {\n\t\tub.injection.WriteTo(buf, unionMarkerAfterLimit)\n\t}\n\n\treturn ub.args.CompileWithFlavor(buf.String(), flavor, initialArg...)\n}",
"func NewSQLBuilder(d Driver) SQLBuilder {\n\talias := NoAlias()\n\n\treturn SQLBuilder{sqlWriter{}, NewContext(d, alias)}\n}",
"func (q SelectQuery) Join(table Table, predicate Predicate, predicates ...Predicate) SelectQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeInner,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}",
"func Select(columns ...string) Builder {\n\treturn sb.Columns(columns...)\n}",
"func WithPrefix(prefix string) Option {\n\treturn func(o *options) {\n\t\to.mapField = func(field string) (string, error) {\n\t\t\treturn prefix + field, nil\n\t\t}\n\t}\n}",
"func ExampleBuilder_WithProjection() {\n\tsvc := dynamodb.New(session.New())\n\n\t// Construct the Key condition builder\n\tkeyCond := expression.Key(\"Artist\").Equal(expression.Value(\"No One You Know\"))\n\n\t// Create the project expression builder with a names list.\n\tproj := expression.NamesList(expression.Name(\"SongTitle\"))\n\n\t// Combine the key condition, and projection together as a DynamoDB expression\n\t// builder.\n\texpr, err := expression.NewBuilder().\n\t\tWithKeyCondition(keyCond).\n\t\tWithProjection(proj).\n\t\tBuild()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\t// Use the built expression to populate the DynamoDB Query's API input\n\t// parameters.\n\tinput := &dynamodb.QueryInput{\n\t\tExpressionAttributeNames: expr.Names(),\n\t\tExpressionAttributeValues: expr.Values(),\n\t\tKeyConditionExpression: expr.KeyCondition(),\n\t\tProjectionExpression: expr.Projection(),\n\t\tTableName: aws.String(\"Music\"),\n\t}\n\n\tresult, err := svc.Query(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase dynamodb.ErrCodeProvisionedThroughputExceededException:\n\t\t\t\tfmt.Println(dynamodb.ErrCodeProvisionedThroughputExceededException, aerr.Error())\n\t\t\tcase dynamodb.ErrCodeResourceNotFoundException:\n\t\t\t\tfmt.Println(dynamodb.ErrCodeResourceNotFoundException, aerr.Error())\n\t\t\tcase dynamodb.ErrCodeInternalServerError:\n\t\t\t\tfmt.Println(dynamodb.ErrCodeInternalServerError, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}",
"func (f *Factory) ConstructJoin(\n\tjoinOp opt.Operator, left, right memo.RelExpr, on memo.FiltersExpr, private *memo.JoinPrivate,\n) memo.RelExpr {\n\tswitch joinOp {\n\tcase opt.InnerJoinOp:\n\t\treturn f.ConstructInnerJoin(left, right, on, private)\n\tcase opt.InnerJoinApplyOp:\n\t\treturn f.ConstructInnerJoinApply(left, right, on, private)\n\tcase opt.LeftJoinOp:\n\t\treturn f.ConstructLeftJoin(left, right, on, private)\n\tcase opt.LeftJoinApplyOp:\n\t\treturn f.ConstructLeftJoinApply(left, right, on, private)\n\tcase opt.RightJoinOp:\n\t\treturn f.ConstructRightJoin(left, right, on, private)\n\tcase opt.FullJoinOp:\n\t\treturn f.ConstructFullJoin(left, right, on, private)\n\tcase opt.SemiJoinOp:\n\t\treturn f.ConstructSemiJoin(left, right, on, private)\n\tcase opt.SemiJoinApplyOp:\n\t\treturn f.ConstructSemiJoinApply(left, right, on, private)\n\tcase opt.AntiJoinOp:\n\t\treturn f.ConstructAntiJoin(left, right, on, private)\n\tcase opt.AntiJoinApplyOp:\n\t\treturn f.ConstructAntiJoinApply(left, right, on, private)\n\t}\n\tpanic(errors.AssertionFailedf(\"unexpected join operator: %v\", log.Safe(joinOp)))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetFrom sets the base [From] table in the query.
|
func (d *SelectBuilder) SetFrom(table string) *SelectBuilder {
d.From = table
return d
}
|
[
"func (q SelectQuery) From(table Table) SelectQuery {\n\tq.FromTable = table\n\treturn q\n}",
"func (stmt *SelectStmt) From(table string) *SelectStmt {\n\tstmt.Table = table\n\treturn stmt\n}",
"func From(table Table) SelectQuery {\n\treturn SelectQuery{\n\t\tFromTable: table,\n\t}\n}",
"func (tt *TruthTable) SetAllFrom(from string, value bool) {\n\tdict, ok := tt.Values[from]\n\tif !ok {\n\t\tframework.Failf(\"from-key %s not found\", from)\n\t}\n\tfor _, to := range tt.Tos {\n\t\tdict[to] = value\n\t}\n}",
"func (b *SQLBuilder) From(src Source) {\n\tb.w.WriteLine(`FROM ` + b.SourceToSQL(src))\n}",
"func FromTableCopyQuery(conf mfe.Variant, dbTypeFrom string) (s string) {\n\tqueryFrom := conf.GE(\"query_from\").Str()\n\n\tif queryFrom == \"\" {\n\n\t\tsbf := mfdb.SBulkFieldCreate(conf.GE(\"fields\"))\n\n\t\ttableFrom := conf.GE(\"table_from\").Str()\n\t\tidName := mfe.CoalesceS(conf.GE(\"id_name\").Str(), \"_id\")\n\t\tlimit := mfe.CoalesceI(conf.GE(\"limit\").Int(), 10000)\n\n\t\tvar fieldsList string\n\t\tif sbf.Any() {\n\t\t\tfieldsList = mfe.JoinS(\",\", sbf.Columns()...)\n\t\t} else {\n\t\t\tfieldsList = \"*\"\n\t\t}\n\n\t\tif dbTypeFrom == \"sqlserver\" {\n\t\t\tqueryFrom = \"select top (\" + fmt.Sprint(limit) + \") \" + fieldsList + \" from \" + tableFrom + \" order by \" + idName + \";\"\n\t\t} else {\n\t\t\tqueryFrom = \"select \" + fieldsList + \" from \" + tableFrom + \" order by \" + idName + \" limit \" + fmt.Sprint(limit) + \";\"\n\t\t}\n\n\t}\n\n\treturn queryFrom\n}",
"func (o *FindSensorDataBySensorIDAndTimeRangeParams) SetFrom(from strfmt.DateTime) {\n\to.From = from\n}",
"func (o *SalaryPayslipSearchParams) SetFrom(from *int64) {\n\to.From = from\n}",
"func (o *LedgerAccountingPeriodSearchParams) SetFrom(from *int64) {\n\to.From = from\n}",
"func (db *DB) From(arg interface{}) *From {\n\tt := reflect.Indirect(reflect.ValueOf(arg)).Type()\n\tif t.Kind() != reflect.Struct {\n\t\tpanic(fmt.Errorf(\"From: argument must be struct (or that pointer) type, got %v\", t))\n\t}\n\treturn &From{TableName: db.tableName(t)}\n}",
"func (o *ProductExternalSearchParams) SetFrom(from *int64) {\n\to.From = from\n}",
"func (o *ProductProductPriceSearchParams) SetFrom(from *int64) {\n\to.From = from\n}",
"func (s *CreateExportTaskInput) SetFrom(v int64) *CreateExportTaskInput {\n\ts.From = &v\n\treturn s\n}",
"func (s *ExportTask) SetFrom(v int64) *ExportTask {\n\ts.From = &v\n\treturn s\n}",
"func (o *SupplierSearchParams) SetFrom(from *int64) {\n\to.From = from\n}",
"func (o *GetForResourceGroupParams) SetFrom(from *strfmt.DateTime) {\n\to.From = from\n}",
"func (o *GetRealBrowserCheckRunsParams) SetFrom(from *strfmt.DateTime) {\n\to.From = from\n}",
"func (c *Core) SetFrom(f *Core) {\n\tif v := f.ExplorerURL; v != nil {\n\t\tc.ExplorerURL = v\n\t}\n\tif v := f.InsecureFastScrypt; v != nil {\n\t\tc.InsecureFastScrypt = v\n\t}\n\tif v := f.RootDir; v != nil {\n\t\tc.RootDir = v\n\t}\n\tif v := f.ShutdownGracePeriod; v != nil {\n\t\tc.ShutdownGracePeriod = v\n\t}\n\n\tc.Feature.setFrom(&f.Feature)\n\tc.Database.setFrom(&f.Database)\n\tc.TelemetryIngress.setFrom(&f.TelemetryIngress)\n\tc.AuditLogger.SetFrom(&f.AuditLogger)\n\tc.Log.setFrom(&f.Log)\n\n\tc.WebServer.setFrom(&f.WebServer)\n\tc.JobPipeline.setFrom(&f.JobPipeline)\n\n\tc.FluxMonitor.setFrom(&f.FluxMonitor)\n\tc.OCR2.setFrom(&f.OCR2)\n\tc.OCR.setFrom(&f.OCR)\n\tc.P2P.setFrom(&f.P2P)\n\tc.Keeper.setFrom(&f.Keeper)\n\n\tc.AutoPprof.setFrom(&f.AutoPprof)\n\tc.Pyroscope.setFrom(&f.Pyroscope)\n\tc.Sentry.setFrom(&f.Sentry)\n\tc.Insecure.setFrom(&f.Insecure)\n}",
"func (b *SecureGetSMSHistoryBuilder) DateFrom(v int) *SecureGetSMSHistoryBuilder {\n\tb.Params[\"date_from\"] = v\n\treturn b\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddSelectJoin adds an inner join to the query builder. The term "Select" is provided to differentiate From the AddJoin in the StatementBuilder so it is not shadowed.
|
func (d *SelectBuilder) AddSelectJoin(name string, join string, xs ...ConditionOption) *SelectBuilder {
d.addJoin(name, join, JoinTypeInner, xs...)
return d
}
|
[
"func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder {\n\treturn b.JoinClause(\"JOIN \"+join, rest...)\n}",
"func (stmt *SelectStmt) InnerJoin(table string, conds ...WhereCondition) *SelectStmt {\n\treturn stmt.Join(InnerJoin, table, nil, conds...)\n}",
"func (q SelectQuery) Join(table Table, predicate Predicate, predicates ...Predicate) SelectQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeInner,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}",
"func (self *SelectManager) InnerJoin(table interface{}) *SelectManager {\n\tswitch table.(type) {\n\tcase Accessor:\n\t\tself.Context.Source.Right = append(self.Context.Source.Right, nodes.InnerJoin(table.(Accessor).Relation(), nil))\n\tcase *nodes.RelationNode:\n\t\tself.Context.Source.Right = append(self.Context.Source.Right, nodes.InnerJoin(table.(*nodes.RelationNode), nil))\n\t}\n\n\treturn self\n}",
"func (stmt *SelectStmt) Join(\n\tjoinType JoinType,\n\ttable string,\n\tresultSet *SelectStmt,\n\tconds ...WhereCondition,\n) *SelectStmt {\n\tstmt.Joins = append(stmt.Joins, JoinClause{\n\t\tType: joinType,\n\t\tTable: table,\n\t\tResultSet: resultSet,\n\t\tConditions: append([]WhereCondition{}, conds...),\n\t})\n\n\treturn stmt\n}",
"func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder {\n\treturn builder.Append(b, \"Joins\", newPart(pred, args...)).(SelectBuilder)\n}",
"func InnerJoin(baseTable string, baseColumn string, joinTable string, joinColumn string) qm.QueryMod {\n\treturn qm.InnerJoin(fmt.Sprintf(\"%s ON %s.%s=%s.%s\",\n\t\tjoinTable,\n\t\tjoinTable,\n\t\tjoinColumn,\n\t\tbaseTable,\n\t\tbaseColumn))\n}",
"func (b *SQLBuilder) Select(withAlias bool, f ...Field) {\n\tb.w.WriteLine(`SELECT ` + b.List(f, withAlias))\n}",
"func NewInnerJoin(table Table, condition Expression) Join {\n\treturn NewJoin(types.InnerJoin, table, condition)\n}",
"func (r readableTableInterfaceImpl) INNER_JOIN(table ReadableTable, onCondition BoolExpression) joinSelectUpdateTable {\n\treturn newJoinTable(r.parent, table, jet.InnerJoin, onCondition)\n}",
"func (q SelectQuery) Select(fields ...Field) SelectQuery {\n\tq.SelectFields = append(q.SelectFields, fields...)\n\treturn q\n}",
"func (d *SelectBuilder) AddSelectLeftJoin(name string, join string, xs ...ConditionOption) *SelectBuilder {\n\td.addJoin(name, join, JoinTypeLeft, xs...)\n\treturn d\n}",
"func (q SelectQuery) CustomJoin(joinType JoinType, table Table, predicates ...Predicate) SelectQuery {\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: joinType,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}",
"func (b StatementBuilderType) Select(columns ...string) SelectCondition {\n\treturn SelectBuilder(b).Columns(columns...)\n}",
"func (stmt *UpdateStmt) FromSelect(selStmt *SelectStmt, alias string) *UpdateStmt {\n\tstmt.SelectStmt = selStmt\n\tstmt.SelectStmtAlias = alias\n\n\treturn stmt\n}",
"func (b *Builder) Join(table string, condition string, args ...string) *Builder {\n joinType := \"inner\"\n lenArgs := len(args)\n if lenArgs > 0 {\n joinType = args[0]\n }\n j := &join{table, condition, joinType}\n b.joins = append(b.joins, j)\n return b\n}",
"func (m *PlannerDefault) WalkSelect(p *Select) error {\n\n\t// u.Debugf(\"VisitSelect ctx:%p %+v\", p.Ctx, p.Stmt)\n\n\tneedsFinalProject := true\n\n\tif len(p.Stmt.From) == 0 {\n\n\t\treturn m.WalkLiteralQuery(p)\n\n\t} else if len(p.Stmt.From) == 1 {\n\n\t\tp.Stmt.From[0].Source = p.Stmt // TODO: move to a Finalize() in query parser/planner\n\n\t\tvar srcPlan *Source\n\n\t\tif p.Stmt.Where != nil && p.Stmt.Where.Source != nil { // Where subquery\n\t\t\tnegate := false\n\t\t\tvar parentJoin expr.Node\n\t\t\tif n, ok := p.Stmt.Where.Expr.(*expr.BinaryNode); ok {\n\t\t\t\tparentJoin = n.Args[0]\n\t\t\t} else if n2, ok2 := p.Stmt.Where.Expr.(*expr.UnaryNode); ok2 {\n\t\t\t\tparentJoin = n2.Arg\n\t\t\t\tnegate = true\n\t\t\t}\n\t\t\tp.Stmt.From[0].AddJoin(parentJoin)\n\n\t\t\tvar err error\n\t\t\tsrcPlan, err = NewSource(m.Ctx, p.Stmt.From[0], false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//p.From = append(p.From, srcPlan)\n\t\t\tsub := p.Stmt.Where.Source\n\t\t\t// Inject join criteria (JoinNodes, JoinExpr) on source for subquery (back to parent)\n\t\t\tsubSqlSrc := sub.From[0]\n\t\t\terr = m.Planner.WalkSourceSelect(srcPlan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsubSrc := rel.NewSqlSource(subSqlSrc.Name)\n\t\t\tsubSrc.Rewrite(sub)\n\t\t\tcols := subSrc.UnAliasedColumns()\n\t\t\tvar childJoin expr.Node\n\t\t\tif len(cols) > 1 {\n\t\t\t\treturn fmt.Errorf(\"subquery must contain only 1 select column for join\")\n\t\t\t}\n\t\t\tfor _, v := range cols {\n\t\t\t\tchildJoin = v.Expr\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif childJoin == nil {\n\t\t\t\treturn fmt.Errorf(\"subquery must contain at least 1 select column for join\")\n\t\t\t}\n\t\t\tp.Stmt.From[0].AddJoin(childJoin)\n\t\t\tsubSrc.AddJoin(childJoin)\n\t\t\tsubSrcPlan, err := NewSource(m.Ctx, subSrc, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tsubSrc.AddJoin(childJoin)\n\t\t\tif negate {\n\t\t\t\tsubSrc.JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"!=\"), parentJoin, childJoin)\n\t\t\t\tp.Stmt.From[0].JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"!=\"), parentJoin, childJoin)\n\t\t\t} else {\n\t\t\t\tsubSrc.JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"=\"), parentJoin, childJoin)\n\t\t\t\tp.Stmt.From[0].JoinExpr = expr.NewBinaryNode(lex.TokenFromOp(\"=\"), parentJoin, childJoin)\n\t\t\t}\n\t\t\terr = m.Planner.WalkSourceSelect(subSrcPlan)\n\t\t\tif err != nil {\n\t\t\t\tu.Errorf(\"Could not visitsubselect %v %s\", err, subSrcPlan)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsubQueryTask := NewJoinMerge(srcPlan, subSrcPlan, srcPlan.Stmt, subSrcPlan.Stmt)\n\t\t\tp.Add(subQueryTask)\n\t\t} else {\n\t\t\tvar err error\n\t\t\tsrcPlan, err = NewSource(m.Ctx, p.Stmt.From[0], true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.From = append(p.From, srcPlan)\n\t\t\tp.Add(srcPlan)\n\t\t\terr = m.Planner.WalkSourceSelect(srcPlan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif srcPlan.Complete && !needsFinalProjection(p.Stmt) {\n\t\t\tgoto finalProjection\n\t\t}\n\n\t} else {\n\n\t\tvar prevSource *Source\n\t\tvar prevTask Task\n\n\t\tfor i, from := range p.Stmt.From {\n\n\t\t\t// Need to rewrite the From statement to ensure all fields necessary to support\n\t\t\t// joins, wheres, etc exist but is standalone query\n\t\t\tfrom.Rewrite(p.Stmt)\n\t\t\tsrcPlan, err := NewSource(m.Ctx, from, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\terr = m.Planner.WalkSourceSelect(srcPlan)\n\t\t\tif err != nil {\n\t\t\t\tu.Errorf(\"Could not visitsubselect %v %s\", err, from)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// now fold into previous task\n\t\t\tif i != 0 {\n\t\t\t\tfrom.Seekable = true\n\t\t\t\t// fold this source into previous\n\t\t\t\tcurMergeTask := NewJoinMerge(prevTask, srcPlan, prevSource.Stmt, srcPlan.Stmt)\n\t\t\t\tprevTask = curMergeTask\n\t\t\t} else {\n\t\t\t\tprevTask = srcPlan\n\t\t\t}\n\t\t\tprevSource = srcPlan\n\t\t\t//u.Debugf(\"got task: %T\", lastSource)\n\t\t}\n\t\tp.Add(prevTask)\n\n\t}\n\n\tif p.Stmt.Where != nil {\n\t\tswitch {\n\t\tcase p.Stmt.Where.Source != nil:\n\t\t\t// SELECT id from article WHERE id in (select article_id from comments where comment_ct > 50);\n\t\t\tu.Warnf(\"Found un-supported subquery: %#v\", p.Stmt.Where)\n\t\t\treturn ErrNotImplemented\n\t\tcase p.Stmt.Where.Expr != nil:\n\t\t\tp.Add(NewWhere(p.Stmt))\n\t\tdefault:\n\t\t\tu.Warnf(\"Found un-supported where type: %#v\", p.Stmt.Where)\n\t\t\treturn fmt.Errorf(\"Unsupported Where Type\")\n\t\t}\n\t}\n\n\tif p.Stmt.IsAggQuery() {\n\t\t//u.Debugf(\"Adding aggregate/group by? %#v\", m.Planner)\n\t\tp.Add(NewGroupBy(p.Stmt))\n\t\tneedsFinalProject = false\n\t}\n\n\tif p.Stmt.Having != nil {\n\t\tp.Add(NewHaving(p.Stmt))\n\t}\n\n\tif len(p.Stmt.OrderBy) > 0 {\n\t\tp.Add(NewOrder(p.Stmt))\n\t}\n\n\tif needsFinalProject {\n\t\terr := m.WalkProjectionFinal(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\nfinalProjection:\n\tif m.Ctx.Projection == nil {\n\t\tproj, err := NewProjectionFinal(m.Ctx, p)\n\t\t//u.Infof(\"Projection: %T:%p %T:%p\", proj, proj, proj.Proj, proj.Proj)\n\t\tif err != nil {\n\t\t\tu.Errorf(\"projection error? %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tm.Ctx.Projection = proj\n\t\t//u.Debugf(\"m.Ctx: %p m.Ctx.Projection: %T:%p\", m.Ctx, m.Ctx.Projection, m.Ctx.Projection)\n\t}\n\n\tif p.Stmt.Into != nil{\n\t\tp.Add(NewInto(p.Stmt.Into))\n\t}\n\n\treturn nil\n}",
"func Select(fields ...Field) SelectQuery {\n\treturn SelectQuery{\n\t\tSelectFields: fields,\n\t}\n}",
"func InnerJoinWithFilter(baseTable string, baseColumn string, joinTable string, joinColumn string, filterColumn string, filterValue interface{}, optFilterTable ...string) qm.QueryMod {\n\tfilterTable := joinTable\n\tif len(optFilterTable) > 0 {\n\t\tfilterTable = optFilterTable[0]\n\t}\n\n\treturn qm.InnerJoin(fmt.Sprintf(\"%s ON %s.%s=%s.%s AND %s.%s=$1\",\n\t\tjoinTable,\n\t\tjoinTable,\n\t\tjoinColumn,\n\t\tbaseTable,\n\t\tbaseColumn,\n\t\tfilterTable,\n\t\tfilterColumn), filterValue)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddSelectLeftJoin adds a left (outer) join to the query builder. The term "Select" is provided to differentiate From the AddJoin in the StatementBuilder so it is not shadowed.
|
func (d *SelectBuilder) AddSelectLeftJoin(name string, join string, xs ...ConditionOption) *SelectBuilder {
d.addJoin(name, join, JoinTypeLeft, xs...)
return d
}
|
[
"func (q SelectQuery) LeftJoin(table Table, predicate Predicate, predicates ...Predicate) SelectQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeLeft,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}",
"func (f *From) LeftJoin(toTable, toTableAlias string) *Join {\n\treturn f.addJoin(LeftJoin, toTable, toTableAlias)\n}",
"func NewLeftJoin(table Table, condition Expression) Join {\n\treturn NewJoin(types.LeftJoin, table, condition)\n}",
"func LeftOuterJoin(baseTable string, baseColumn string, joinTable string, joinColumn string) qm.QueryMod {\n\treturn qm.LeftOuterJoin(fmt.Sprintf(\"%s ON %s.%s=%s.%s\",\n\t\tjoinTable,\n\t\tjoinTable,\n\t\tjoinColumn,\n\t\tbaseTable,\n\t\tbaseColumn))\n}",
"func (jc *JoinCondition) LeftJoin(table interface{}) *JoinCondition {\n\treturn jc.join(LeftJoin, table)\n}",
"func (ds *joinDataSet) leftJoin() joinDataSet {\n\texp := ResultChunk{\n\t\tColumns: ds.expResults.Columns,\n\t\tValues: append(ds.expResults.Values, ds.expLeftJoin.Values...),\n\t\toffsets: append(ds.expResults.offsets, ds.expLeftJoin.offsets...),\n\t\tFacts: append(ds.expResults.Facts, ds.expLeftJoin.Facts...),\n\t}\n\tleft := joinDataSet{\n\t\tname: ds.name + \"_left\",\n\t\tjoinVars: ds.joinVars,\n\t\tjoinType: parser.MatchOptional,\n\t\tleft: ds.left,\n\t\tright: ds.right,\n\t\tinputBinder: ds.inputBinder,\n\t\texpResults: exp,\n\t}\n\treturn left\n}",
"func (r readableTableInterfaceImpl) LEFT_JOIN(table ReadableTable, onCondition BoolExpression) joinSelectUpdateTable {\n\treturn newJoinTable(r.parent, table, jet.LeftJoin, onCondition)\n}",
"func LeftJoin(table Table, predicates ...Predicate) JoinTable {\n\treturn JoinTable{\n\t\tJoinType: JoinTypeLeft,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t}\n}",
"func LeftOuterJoinWithFilter(baseTable string, baseColumn string, joinTable string, joinColumn string, filterColumn string, filterValue interface{}, optFilterTable ...string) qm.QueryMod {\n\tfilterTable := joinTable\n\tif len(optFilterTable) > 0 {\n\t\tfilterTable = optFilterTable[0]\n\t}\n\n\treturn qm.LeftOuterJoin(fmt.Sprintf(\"%s ON %s.%s=%s.%s AND %s.%s=$1\",\n\t\tjoinTable,\n\t\tjoinTable,\n\t\tjoinColumn,\n\t\tbaseTable,\n\t\tbaseColumn,\n\t\tfilterTable,\n\t\tfilterColumn), filterValue)\n}",
"func (stmt *SelectStmt) LeftLateralJoin(rs *SelectStmt, as string, conds ...WhereCondition) *SelectStmt {\n\treturn stmt.Join(LeftLateralJoin, as, rs, conds...)\n}",
"func (stmt *SelectStmt) LeftJoinRS(rs *SelectStmt, as string, conds ...WhereCondition) *SelectStmt {\n\treturn stmt.Join(LeftJoin, as, rs, conds...)\n}",
"func (d *SelectBuilder) AddSelectJoin(name string, join string, xs ...ConditionOption) *SelectBuilder {\n\td.addJoin(name, join, JoinTypeInner, xs...)\n\treturn d\n}",
"func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) {\n\tnewRHS := ajoin.LeftExpr\n\t// If the LHS is a join, we have to parenthesize it.\n\t// Otherwise, it can be used as is.\n\tif _, ok := newRHS.(*sqlparser.JoinTableExpr); ok {\n\t\tnewRHS = &sqlparser.ParenTableExpr{\n\t\t\tExprs: sqlparser.TableExprs{newRHS},\n\t\t}\n\t}\n\tajoin.LeftExpr, ajoin.RightExpr = ajoin.RightExpr, newRHS\n\tajoin.Join = sqlparser.LeftJoinType\n}",
"func (hj *hashJoiner) leftJoin(ctx context.Context) {\n\tleftIdentityKeysUsed := make(map[string]struct{})\n\n\thj.runEqJoin(func(identityKey string, offset uint32, fs FactSet, rowValues []Value) {\n\t\tleftIdentityKeysUsed[identityKey] = struct{}{}\n\t\thj.outputTo.add(ctx, offset, fs, rowValues)\n\t})\n\n\tfor key, factsets := range hj.leftJoinValues {\n\t\tif _, exists := leftIdentityKeysUsed[key]; !exists {\n\t\t\t// this list of FactSets from the left wasn't joined to any\n\t\t\t// right factSets, so emit the left join version of them\n\t\t\tfor _, left := range factsets {\n\t\t\t\thj.outputTo.add(ctx, left.offset, left.fact, hj.joiner(left.vals, nil))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (b *SQLBuilder) Select(withAlias bool, f ...Field) {\n\tb.w.WriteLine(`SELECT ` + b.List(f, withAlias))\n}",
"func CaseSQLByPassLeftJoin(t *testing.T) {\n\ta := assert.New(t)\n\ttrainSQL := `SELECT f1.user_id, f1.fea1, f2.fea2\nFROM standard_join_test.user_fea1 AS f1 LEFT OUTER JOIN standard_join_test.user_fea2 AS f2\nON f1.user_id = f2.user_id\nWHERE f1.user_id < 3;`\n\n\tconn, err := createRPCConn()\n\ta.NoError(err)\n\tdefer conn.Close()\n\tcli := pb.NewSQLFlowClient(conn)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)\n\tdefer cancel()\n\n\tstream, err := cli.Run(ctx, sqlRequest(trainSQL))\n\tif err != nil {\n\t\ta.Fail(\"Check if the server started successfully. %v\", err)\n\t}\n\t// wait train finish\n\t_, _, _, e := ParseResponse(stream)\n\ta.NoError(e)\n}",
"func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder {\n\treturn b.JoinClause(\"JOIN \"+join, rest...)\n}",
"func (q SelectQuery) Join(table Table, predicate Predicate, predicates ...Predicate) SelectQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeInner,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}",
"func (q SelectQuery) Select(fields ...Field) SelectQuery {\n\tq.SelectFields = append(q.SelectFields, fields...)\n\treturn q\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ApplyConditions applies the Where conditions to the provided SelectBuilder.
|
func (d *SelectBuilder) ApplyConditions(qry *squirrel.SelectBuilder) {
*qry = ApplyConditions(d.Where, &ConditionOptionPreprocessParams{
BaseTable: d.From,
}, *qry)
}
|
[
"func (sqlbldr *Builder) ApplyFilter( aFilter *Builder ) *Builder {\n\tif aFilter != nil {\n\t\tif aFilter.mySql != \"\" {\n\t\t\tsqlbldr.mySql += sqlbldr.myParamPrefix + aFilter.mySql\n\t\t}\n\t\t//also merge in any params from the sub-query\n\t\tfor k, v := range aFilter.myParams {\n\t\t\tsqlbldr.myParams[k] = v\n\t\t}\n\t\tfor k, v := range aFilter.mySetParams {\n\t\t\tsqlbldr.mySetParams[k] = v\n\t\t}\n\t}\n\treturn sqlbldr\n}",
"func (dao *GenericDaoDynamodb) BuildConditionBuilder(tableName string, filter godal.FilterOpt) (*expression.ConditionBuilder, error) {\n\tif filter == nil {\n\t\treturn nil, nil\n\t}\n\trm := dao.GetRowMapper()\n\tif rm == nil {\n\t\treturn nil, errors.New(\"row-mapper is required to build ConditionBuilder\")\n\t}\n\n\tswitch filter.(type) {\n\tcase godal.FilterOptFieldOpValue:\n\t\tf := filter.(godal.FilterOptFieldOpValue)\n\t\treturn dao.BuildConditionBuilder(tableName, &f)\n\tcase *godal.FilterOptFieldOpValue:\n\t\tf := filter.(*godal.FilterOptFieldOpValue)\n\t\texp := expression.Name(rm.ToDbColName(tableName, f.FieldName))\n\t\tswitch f.Operator {\n\t\tcase godal.FilterOpEqual:\n\t\t\tt := exp.Equal(expression.Value(f.Value))\n\t\t\treturn &t, nil\n\t\tcase godal.FilterOpNotEqual:\n\t\t\tt := exp.NotEqual(expression.Value(f.Value))\n\t\t\treturn &t, nil\n\t\tcase godal.FilterOpGreater:\n\t\t\tt := exp.GreaterThan(expression.Value(f.Value))\n\t\t\treturn &t, nil\n\t\tcase godal.FilterOpGreaterOrEqual:\n\t\t\tt := exp.GreaterThanEqual(expression.Value(f.Value))\n\t\t\treturn &t, nil\n\t\tcase godal.FilterOpLess:\n\t\t\tt := exp.LessThan(expression.Value(f.Value))\n\t\t\treturn &t, nil\n\t\tcase godal.FilterOpLessOrEqual:\n\t\t\tt := exp.LessThanEqual(expression.Value(f.Value))\n\t\t\treturn &t, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"unknown filter operator: %#v\", f.Operator)\n\tcase godal.FilterOptFieldIsNull:\n\t\tf := filter.(godal.FilterOptFieldIsNull)\n\t\treturn dao.BuildConditionBuilder(tableName, &f)\n\tcase *godal.FilterOptFieldIsNull:\n\t\tf := filter.(*godal.FilterOptFieldIsNull)\n\t\tt := expression.Name(rm.ToDbColName(tableName, f.FieldName)).AttributeNotExists()\n\t\treturn &t, nil\n\tcase godal.FilterOptFieldIsNotNull:\n\t\tf := filter.(godal.FilterOptFieldIsNotNull)\n\t\treturn dao.BuildConditionBuilder(tableName, &f)\n\tcase *godal.FilterOptFieldIsNotNull:\n\t\tf := filter.(*godal.FilterOptFieldIsNotNull)\n\t\tt := expression.Name(rm.ToDbColName(tableName, f.FieldName)).AttributeExists()\n\t\treturn &t, nil\n\tcase godal.FilterOptAnd:\n\t\tf := filter.(godal.FilterOptAnd)\n\t\treturn dao.BuildConditionBuilder(tableName, &f)\n\tcase *godal.FilterOptAnd:\n\t\tf := filter.(*godal.FilterOptAnd)\n\t\tvar result *expression.ConditionBuilder = nil\n\t\tfor _, innerF := range f.Filters {\n\t\t\tinnerResult, err := dao.BuildConditionBuilder(tableName, innerF)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif result == nil {\n\t\t\t\tresult = innerResult\n\t\t\t} else {\n\t\t\t\tt := result.And(*innerResult)\n\t\t\t\tresult = &t\n\t\t\t}\n\t\t}\n\t\treturn result, nil\n\tcase godal.FilterOptOr:\n\t\tf := filter.(godal.FilterOptOr)\n\t\treturn dao.BuildConditionBuilder(tableName, &f)\n\tcase *godal.FilterOptOr:\n\t\tf := filter.(*godal.FilterOptOr)\n\t\tvar result *expression.ConditionBuilder = nil\n\t\tfor _, innerF := range f.Filters {\n\t\t\tinnerResult, err := dao.BuildConditionBuilder(tableName, innerF)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif result == nil {\n\t\t\t\tresult = innerResult\n\t\t\t} else {\n\t\t\t\tt := result.Or(*innerResult)\n\t\t\t\tresult = &t\n\t\t\t}\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn nil, fmt.Errorf(\"cannot build filter from %T\", filter)\n}",
"func (b *SQLBuilder) Conditions(c []Condition, newline bool) {\n\tfn := b.w.WriteString\n\tif newline {\n\t\tfn = b.w.WriteLine\n\t}\n\n\tif len(c) == 0 {\n\t\treturn\n\t}\n\tfn(c[0](b.Context))\n\n\tif newline {\n\t\tb.w.AddIndent()\n\t\tdefer b.w.SubIndent()\n\t}\n\n\tfor _, v := range c[1:] {\n\t\tif !newline {\n\t\t\tfn(` `)\n\t\t}\n\t\tfn(`AND ` + v(b.Context))\n\t}\n}",
"func FilterByCustomizeCondition(ok bool, k string, v ...interface{}) func(db *gorm.DB) *gorm.DB {\n\tif ok {\n\t\tfmt.Println(v...)\n\t\tif len(v) == 0 {\n\t\t\treturn func(db *gorm.DB) *gorm.DB {\n\t\t\t\treturn db.Where(k)\n\t\t\t}\n\t\t}\n\t\treturn func(db *gorm.DB) *gorm.DB {\n\t\t\treturn db.Where(k, v...)\n\t\t}\n\n\t}\n\treturn func(db *gorm.DB) *gorm.DB {\n\t\treturn db\n\t}\n\n}",
"func (b *SQLBuilder) Where(c ...Condition) {\n\tif len(c) == 0 {\n\t\treturn\n\t}\n\tb.w.WriteString(`WHERE `)\n\tb.Conditions(c, true)\n}",
"func (b StatementBuilderType) Select(columns ...string) SelectCondition {\n\treturn SelectBuilder(b).Columns(columns...)\n}",
"func (b *Builder) BuildWhere(join string) string {\n\tif len(b.Where) < 1 {\n\t\treturn \"\"\n\t}\n\n\tjoin = strings.ToLower(join)\n\tif join != \"and\" && join != \"or\" {\n\t\tjoin = \"AND\"\n\t}\n\tjoin = strings.ToUpper(join)\n\n\treturn \"WHERE (\" + strings.Join(b.Where, \") \"+join+\" (\") + \")\"\n}",
"func (q SelectQuery) Where(predicates ...Predicate) SelectQuery {\n\tq.WherePredicate.Predicates = append(q.WherePredicate.Predicates, predicates...)\n\treturn q\n}",
"func (d *SelectBuilder) ApplySort(qry *squirrel.SelectBuilder) {\n\t*qry = ApplySort(d.StatementBuilder, &ConditionOptionPreprocessParams{\n\t\tBaseTable: d.From,\n\t}, *qry)\n}",
"func (*Concrete) queryBuilder(db *gorm.DB, cond map[string]interface{}) *gorm.DB {\n\tif cond == nil {\n\t\treturn db\n\t}\n\n\tfor key, value := range cond {\n\t\tif c := condAnalyze(key); len(c) > 1 {\n\t\t\tswitch c[1] {\n\t\t\tcase NotIn:\n\t\t\t\tdb = db.Not(value)\n\t\t\tcase In:\n\t\t\t\tdb = db.Where(fmt.Sprintf(\"%s %s (?)\", c[0], c[1]), value)\n\t\t\tcase Like:\n\t\t\t\tdb = db.Where(fmt.Sprintf(\"%s %s ?\", c[0], c[1]), fmt.Sprintf(\"%%%s%%\", value))\n\t\t\tdefault:\n\t\t\t\tdb = db.Where(fmt.Sprintf(\"%s %s ?\", c[0], c[1]), value)\n\t\t\t}\n\t\t} else {\n\t\t\tdb = db.Where(fmt.Sprintf(\"%s = ?\", key), value)\n\t\t}\n\t}\n\treturn db\n}",
"func (stmt *UpdateStmt) Where(conditions ...WhereCondition) *UpdateStmt {\n\tstmt.Conditions = append(stmt.Conditions, conditions...)\n\treturn stmt\n}",
"func (b *IngressControllerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *IngressControllerStatusApplyConfiguration {\n\tfor i := range values {\n\t\tif values[i] == nil {\n\t\t\tpanic(\"nil value passed to WithConditions\")\n\t\t}\n\t\tb.Conditions = append(b.Conditions, *values[i])\n\t}\n\treturn b\n}",
"func (c *ProjectsLocationsConnectionsEntityTypesEntitiesUpdateEntitiesWithConditionsCall) Conditions(conditions string) *ProjectsLocationsConnectionsEntityTypesEntitiesUpdateEntitiesWithConditionsCall {\n\tc.urlParams_.Set(\"conditions\", conditions)\n\treturn c\n}",
"func (o *ListPatientParams) SetConditions(conditions *string) {\n\to.Conditions = conditions\n}",
"func (_u *_usersQueryBuilder) Where(filter string, values ...interface{}) (*_usersQueryBuilder) {\n if _u.err != nil {\n return _u\n }\n _u.builder.Where(filter, values...)\n return _u\n}",
"func (stmt DeleteStmt) Where(conds ...Clause) DeleteStmt {\n\tif len(conds) > 1 {\n\t\t// By default, multiple where clauses will be joined will AllOf\n\t\tstmt.cond = AllOf(conds...)\n\t} else if len(conds) == 1 {\n\t\tstmt.cond = conds[0]\n\t}\n\treturn stmt\n}",
"func (q SelectQuery) AppendSQL(buf *strings.Builder, args *[]interface{}, params map[string]int) {\n\t// WITH\n\tif !q.nested {\n\t\tappendCTEs(buf, args, q.CTEs, q.FromTable, q.JoinTables)\n\t}\n\t// SELECT\n\tif q.SelectType == \"\" {\n\t\tq.SelectType = SelectTypeDefault\n\t}\n\tbuf.WriteString(string(q.SelectType))\n\tif q.SelectType == SelectTypeDistinctOn {\n\t\tbuf.WriteString(\" (\")\n\t\tq.DistinctOn.AppendSQLExclude(buf, args, nil, nil)\n\t\tbuf.WriteString(\")\")\n\t}\n\tif len(q.SelectFields) > 0 {\n\t\tbuf.WriteString(\" \")\n\t\tq.SelectFields.AppendSQLExcludeWithAlias(buf, args, nil, nil)\n\t}\n\t// FROM\n\tif q.FromTable != nil {\n\t\tbuf.WriteString(\" FROM \")\n\t\tswitch v := q.FromTable.(type) {\n\t\tcase Query:\n\t\t\tbuf.WriteString(\"(\")\n\t\t\tv.NestThis().AppendSQL(buf, args, nil)\n\t\t\tbuf.WriteString(\")\")\n\t\tdefault:\n\t\t\tq.FromTable.AppendSQL(buf, args, nil)\n\t\t}\n\t\talias := q.FromTable.GetAlias()\n\t\tif alias != \"\" {\n\t\t\tbuf.WriteString(\" AS \")\n\t\t\tbuf.WriteString(alias)\n\t\t}\n\t}\n\t// JOIN\n\tif len(q.JoinTables) > 0 {\n\t\tbuf.WriteString(\" \")\n\t\tq.JoinTables.AppendSQL(buf, args, nil)\n\t}\n\t// WHERE\n\tif len(q.WherePredicate.Predicates) > 0 {\n\t\tbuf.WriteString(\" WHERE \")\n\t\tq.WherePredicate.toplevel = true\n\t\tq.WherePredicate.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// GROUP BY\n\tif len(q.GroupByFields) > 0 {\n\t\tbuf.WriteString(\" GROUP BY \")\n\t\tq.GroupByFields.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// HAVING\n\tif len(q.HavingPredicate.Predicates) > 0 {\n\t\tbuf.WriteString(\" HAVING \")\n\t\tq.HavingPredicate.toplevel = true\n\t\tq.HavingPredicate.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// WINDOW\n\tif len(q.Windows) > 0 {\n\t\tbuf.WriteString(\" WINDOW \")\n\t\tq.Windows.AppendSQL(buf, args, nil)\n\t}\n\t// ORDER BY\n\tif len(q.OrderByFields) > 0 {\n\t\tbuf.WriteString(\" ORDER BY \")\n\t\tq.OrderByFields.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// LIMIT\n\tif q.LimitValue != nil {\n\t\tbuf.WriteString(\" LIMIT ?\")\n\t\tif *q.LimitValue < 0 {\n\t\t\t*q.LimitValue = -*q.LimitValue\n\t\t}\n\t\t*args = append(*args, *q.LimitValue)\n\t}\n\t// OFFSET\n\tif q.OffsetValue != nil {\n\t\tbuf.WriteString(\" OFFSET ?\")\n\t\tif *q.OffsetValue < 0 {\n\t\t\t*q.OffsetValue = -*q.OffsetValue\n\t\t}\n\t\t*args = append(*args, *q.OffsetValue)\n\t}\n\tif !q.nested {\n\t\tquery := buf.String()\n\t\tbuf.Reset()\n\t\tquestionToDollarPlaceholders(buf, query)\n\t\tif q.Log != nil {\n\t\t\tvar logOutput string\n\t\t\tswitch {\n\t\t\tcase Lstats&q.LogFlag != 0:\n\t\t\t\tlogOutput = \"\\n----[ Executing query ]----\\n\" + buf.String() + \" \" + fmt.Sprint(*args) +\n\t\t\t\t\t\"\\n----[ with bind values ]----\\n\" + questionInterpolate(query, *args...)\n\t\t\tcase Linterpolate&q.LogFlag != 0:\n\t\t\t\tlogOutput = questionInterpolate(query, *args...)\n\t\t\tdefault:\n\t\t\t\tlogOutput = buf.String() + \" \" + fmt.Sprint(*args)\n\t\t\t}\n\t\t\tswitch q.Log.(type) {\n\t\t\tcase *log.Logger:\n\t\t\t\t_ = q.Log.Output(q.logSkip+2, logOutput)\n\t\t\tdefault:\n\t\t\t\t_ = q.Log.Output(q.logSkip+1, logOutput)\n\t\t\t}\n\t\t}\n\t}\n}",
"func whereApplies(where *WhereClause, colNames map[string]int, row []db.Value) bool {\n\tif where == nil {\n\t\treturn true\n\t}\n\tcolIndex := colNames[where.ColName]\n\n\trowValue := row[colIndex]\n\n\t// FIXME might want to check if types match before comparison\n\tif where.Comparison == \"=\" {\n\t\treturn rowValue.GetValue() == where.ComparisonValue.GetValue()\n\t} else if where.Comparison == \"!=\" {\n\t\treturn rowValue.GetValue() != where.ComparisonValue.GetValue()\n\t} else if where.Comparison == \"<\" { // assuming numerical types for less/greater than\n\t\treturn rowValue.GetValue().(float64) < where.ComparisonValue.GetValue().(float64)\n\t} else if where.Comparison == \"<=\" {\n\t\treturn rowValue.GetValue().(float64) <= where.ComparisonValue.GetValue().(float64)\n\t} else if where.Comparison == \">\" {\n\t\treturn rowValue.GetValue().(float64) > where.ComparisonValue.GetValue().(float64)\n\t} else if where.Comparison == \">=\" {\n\t\treturn rowValue.GetValue().(float64) >= where.ComparisonValue.GetValue().(float64)\n\t}\n\n\treturn false\n}",
"func (p *LogicalJoin) buildSelectionWithConds(leftAsOuter bool) (*Selection, []*expression.CorrelatedColumn) {\n\tvar (\n\t\touterSchema *expression.Schema\n\t\tinnerChild Plan\n\t\tinnerConditions []expression.Expression\n\t)\n\tif leftAsOuter {\n\t\touterSchema = p.children[0].Schema()\n\t\tinnerConditions = p.RightConditions\n\t\tinnerChild = p.children[1]\n\t} else {\n\t\touterSchema = p.children[1].Schema()\n\t\tinnerConditions = p.LeftConditions\n\t\tinnerChild = p.children[0]\n\t}\n\tif sel, ok := innerChild.(*Selection); ok {\n\t\tinnerConditions = append(innerConditions, sel.Conditions...)\n\t\tinnerChild = sel.children[0]\n\t}\n\tcorCols := make([]*expression.CorrelatedColumn, 0, outerSchema.Len())\n\tfor _, col := range outerSchema.Columns {\n\t\tcorCol := &expression.CorrelatedColumn{Column: *col, Data: new(types.Datum)}\n\t\tcorCol.Column.ResolveIndices(outerSchema)\n\t\tcorCols = append(corCols, corCol)\n\t}\n\tselection := Selection{}.init(p.allocator, p.ctx)\n\tselection.SetSchema(innerChild.Schema().Clone())\n\tselection.SetChildren(innerChild)\n\tconds := make([]expression.Expression, 0, len(p.EqualConditions)+len(innerConditions)+len(p.OtherConditions))\n\tfor _, cond := range p.EqualConditions {\n\t\tnewCond := expression.ConvertCol2CorCol(cond, corCols, outerSchema)\n\t\tconds = append(conds, newCond)\n\t}\n\tselection.Conditions = conds\n\t// Currently only eq conds will be considered when we call checkScanController, and innerConds from the below sel may contain correlated column,\n\t// which will have side effect when we do check. So we do check before append other conditions into selection.\n\tselection.controllerStatus = selection.checkScanController()\n\tif selection.controllerStatus == notController {\n\t\treturn nil, nil\n\t}\n\tfor _, cond := range innerConditions {\n\t\tconds = append(conds, cond)\n\t}\n\tfor _, cond := range p.OtherConditions {\n\t\tnewCond := expression.ConvertCol2CorCol(cond, corCols, outerSchema)\n\t\tnewCond.ResolveIndices(innerChild.Schema())\n\t\tconds = append(conds, newCond)\n\t}\n\tselection.Conditions = conds\n\treturn selection, corCols\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ApplyPaging applies the Limit and Offset to the provided SelectBuilder.
|
func (d *SelectBuilder) ApplyPaging(qry *squirrel.SelectBuilder) {
*qry = ApplyPaging(d.StatementBuilder, *qry)
}
|
[
"func (r *Request) ApplyPaginationToSelect(sb *squirrel.SelectBuilder) *squirrel.SelectBuilder {\n\tif r == nil || r.Pagination == nil {\n\t\treturn sb\n\t}\n\n\treturn r.Pagination.ApplyToSelect(sb)\n}",
"func (opt *SQLOptions) Apply(db *gorm.DB, defaultLimit, defaultPage, maxRecords int,\n\tallowNoLimitArg ...bool) (\n\tresult *gorm.DB, limit, page, totalPage, totalCount int, err error) {\n\n\tallowNoLimit := false\n\tif len(allowNoLimitArg) > 0 {\n\t\tallowNoLimit = allowNoLimitArg[0]\n\t}\n\n\tresult = db\n\n\tresult, err = opt.applyFilterIfNeeded(result)\n\tif err != nil {\n\t\treturn nil, -1, -1, -1, -1, err\n\t}\n\n\tresult, err = opt.applyOrderIfNeeded(result)\n\tif err != nil {\n\t\treturn nil, -1, -1, -1, -1, err\n\t}\n\n\tif defaultLimit < 1 {\n\t\terr = errors.New(\"default limit should be greater than 1\")\n\t\treturn nil, -1, -1, -1, -1, err\n\t}\n\n\tif defaultPage < 1 {\n\t\terr = errors.New(\"default page should be greater than 1\")\n\t\treturn nil, -1, -1, -1, -1, err\n\t}\n\n\tvar count int\n\tif maxRecords != 0 {\n\t\tcount = maxRecords\n\t} else {\n\t\tcountResult := result.Count(&count)\n\t\tif countResult.Error != nil {\n\t\t\terr = fmt.Errorf(\"failed to count result. err: %v\", countResult.Error)\n\t\t\treturn nil, -1, -1, -1, 1, err\n\t\t}\n\t}\n\n\ttotalCount = count\n\tlimitPtr := opt.limit(defaultLimit, allowNoLimit)\n\tif limitPtr != nil {\n\t\tlimit = *limitPtr\n\t\tpage = opt.page(defaultPage)\n\t\toffset := opt.offset(limit, page)\n\t\tresult = result.Limit(limit).Offset(offset)\n\t\ttotalPage = count / limit\n\t\tif count%limit != 0 {\n\t\t\ttotalPage++\n\t\t}\n\t}\n\n\treturn\n}",
"func (b *PeopleBuilder) Limit(maxRowCount int, offset int) *PeopleBuilder {\n\tb.builder.Limit(maxRowCount, offset)\n\treturn b\n}",
"func paginate(pageSize int, choices []core.OptionAnswer, sel int) ([]core.OptionAnswer, int) {\n\tvar start, end, cursor int\n\n\tif len(choices) < pageSize {\n\t\t// if we dont have enough options to fill a page\n\t\tstart = 0\n\t\tend = len(choices)\n\t\tcursor = sel\n\n\t} else if sel < pageSize/2 {\n\t\t// if we are in the first half page\n\t\tstart = 0\n\t\tend = pageSize\n\t\tcursor = sel\n\n\t} else if len(choices)-sel-1 < pageSize/2 {\n\t\t// if we are in the last half page\n\t\tstart = len(choices) - pageSize\n\t\tend = len(choices)\n\t\tcursor = sel - start\n\n\t} else {\n\t\t// somewhere in the middle\n\t\tabove := pageSize / 2\n\t\tbelow := pageSize - above\n\n\t\tcursor = pageSize / 2\n\t\tstart = sel - above\n\t\tend = sel + below\n\t}\n\n\t// return the subset we care about and the index\n\treturn choices[start:end], cursor\n}",
"func Paginate(db *gorm.DB, value interface{}, options ...Option) (*Result, error) {\n\treturn New(db, options...).Paginate(value)\n}",
"func PaginateSlice(options *v1alpha1.ListOptions, pointer interface{}) error {\n\tv := reflect.ValueOf(pointer)\n\tif v.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"non-pointer %v\", v.Type())\n\t}\n\t// get the value that the pointer v points to.\n\tv = v.Elem()\n\tif v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"can't fill non-slice value\")\n\t}\n\n\tpage := options.Page\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\toffset := (page - 1) * options.Limit\n\tlimit := options.Limit\n\tcount := uint64(v.Len())\n\tif offset >= count {\n\t\toffset = 0\n\t\tlimit = 0\n\t}\n\tif offset+limit > count {\n\t\tlimit = count - offset\n\t}\n\tv.Set(v.Slice(int(offset), int(offset+limit)))\n\treturn nil\n}",
"func (q *OrderBookSummaryQuery) Select(ctx context.Context, dest interface{}) error {\n\tvar sql bytes.Buffer\n\n\t// append the limit first to the arguments, so we can use\n\t// a fixed placeholder (in this case $1)\n\tq.pushArg(OrderBookSummaryPageSize)\n\n\terr := OrderBookSummaryTemplate.Execute(&sql, q)\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\terr = q.SqlQuery.SelectRaw(ctx, sql.String(), q.args, dest)\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\treturn nil\n}",
"func BuildPagination(query *orm.Query, sort string, limit, offset int) (*orm.Query, int, error) {\n\tvar (\n\t\ttmp string\n\t\tresult []string\n\t\tcount int\n\t)\n\n\tsortItem := strings.Split(sort, \",\")\n\n\tfor _, item := range sortItem {\n\t\tif strings.HasPrefix(item, \"-\") {\n\t\t\tcolumnName := item\n\t\t\ttmp = columnName[1:] + \" DESC\"\n\t\t} else {\n\t\t\ttmp = item\n\t\t}\n\n\t\tresult = append(result, tmp)\n\t}\n\n\tfor _, srt := range result {\n\t\tif len(srt) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tquery.Order(srt)\n\t}\n\n\tcount, err := query.Count()\n\tif err != nil {\n\t\treturn query, count, err\n\t}\n\n\tif limit > 0 {\n\t\tquery.Limit(limit)\n\t}\n\n\tif offset > 0 {\n\t\tquery.Offset(offset)\n\t}\n\n\treturn query, count, nil\n}",
"func Apply(opts []Option) *LimitOpts {\n\tlimitOpts := &LimitOpts{}\n\tfor _, opt := range opts {\n\t\topt(limitOpts)\n\t}\n\n\treturn limitOpts\n}",
"func (s *SelectStmt) LimitOffset(limit, offset int) *SelectStmt {\n\tvar t = *s\n\tt.limit = &limit\n\tt.offset = &offset\n\treturn &t\n}",
"func TestPaginate_MakePageOptions(t *testing.T) {\n\tdefaultRequest := pageHandler.DefaultPageRequest()\n\tpageRequestFor10 := &paginatepb.PageRequest{\n\t\tPage: 10,\n\t\tPageSize: 10,\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tgiven *paginatepb.PageRequest\n\t\twant *Options\n\t}{\n\t\t{\n\t\t\tname: \"#准备分页选项#defaut\",\n\t\t\tgiven: defaultRequest,\n\t\t\twant: &Options{\n\t\t\t\tRequest: defaultRequest,\n\n\t\t\t\tWhere: []*Where{},\n\t\t\t\tOrder: []*OrderBy{},\n\t\t\t\tLimit: defaultRequest.PageSize,\n\t\t\t\tOffset: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"#准备分页选项#每页10条之第10页\",\n\t\t\tgiven: pageRequestFor10,\n\t\t\twant: &Options{\n\t\t\t\tRequest: pageRequestFor10,\n\n\t\t\t\tWhere: []*Where{},\n\t\t\t\tOrder: []*OrderBy{},\n\t\t\t\tLimit: pageRequestFor10.PageSize,\n\t\t\t\tOffset: (pageRequestFor10.Page - 1) * pageRequestFor10.PageSize,\n\t\t\t},\n\t\t},\n\t}\n\tfor _, param := range tests {\n\t\tt.Run(param.name, func(t *testing.T) {\n\t\t\tgot := pageHandler.MakePageOptions(param.given)\n\t\t\tassert.Equal(t, param.want.Limit, got.Limit, \"Limit\")\n\t\t\tassert.Equal(t, param.want.Offset, got.Offset, \"Offset\")\n\t\t})\n\t}\n}",
"func (self *SelectManager) Offset(skip int) *SelectManager {\n\tself.Tree.Offset = nodes.Offset(skip)\n\treturn self\n}",
"func (b *Builder) buildLimitOffset(ev memo.ExprView) (execPlan, error) {\n\tinput, err := b.buildRelational(ev.Child(0))\n\tif err != nil {\n\t\treturn execPlan{}, err\n\t}\n\tvalueExpr := ev.Child(1)\n\tif valueExpr.Operator() != opt.ConstOp {\n\t\treturn execPlan{}, errors.Errorf(\"only constant LIMIT/OFFSET supported\")\n\t}\n\tdatum := valueExpr.Private().(tree.Datum)\n\tvalue, ok := datum.(*tree.DInt)\n\tif !ok {\n\t\treturn execPlan{}, errors.Errorf(\"non-integer LIMIT/OFFSET\")\n\t}\n\tvar limit, offset int64\n\tif ev.Operator() == opt.LimitOp {\n\t\tlimit, offset = int64(*value), 0\n\t} else {\n\t\tlimit, offset = math.MaxInt64, int64(*value)\n\t}\n\tnode, err := b.factory.ConstructLimit(input.root, limit, offset)\n\tif err != nil {\n\t\treturn execPlan{}, err\n\t}\n\treturn execPlan{root: node, outputCols: input.outputCols}, nil\n}",
"func (d *SelectBuilder) ApplySort(qry *squirrel.SelectBuilder) {\n\t*qry = ApplySort(d.StatementBuilder, &ConditionOptionPreprocessParams{\n\t\tBaseTable: d.From,\n\t}, *qry)\n}",
"func (dc PaginationConfig) DoPagination(datas interface{}) (interface{}, error) {\r\n\tv := reflect.ValueOf(datas)\r\n\tif !(v.Kind() == reflect.Slice || v.Kind() == reflect.Array) {\r\n\t\treturn nil, errors.New(\"Only Array or Slice\")\r\n\t}\r\n\r\n\tif v.Len() <= 0 {\r\n\t\treturn nil, errors.New(\"Empty data\")\r\n\t}\r\n\r\n\tvar FilterTypes []filters\r\n\tfor _, filter := range dc.ExpressionFilters {\r\n\t\tfieldtype, err := getexiststype(datas, filter.PropertyName)\r\n\t\tif err == nil {\r\n\r\n\t\t\tfilter := filters{\r\n\t\t\t\tComparison: filter.Comparison,\r\n\t\t\t\tPropertyName: filter.PropertyName,\r\n\t\t\t\tType: fieldtype,\r\n\t\t\t\tValue: filter.Value,\r\n\t\t\t}\r\n\r\n\t\t\tFilterTypes = append(FilterTypes, filter)\r\n\t\t}\r\n\t}\r\n\tif dc.SortColumn != \"\" {\r\n\t\tsortdata(datas, dc.SortColumn, dc.IsAscending)\r\n\r\n\t}\r\n\r\n\tif len(FilterTypes) > 0 {\r\n\t\tif dc.AndLogic {\r\n\t\t\tfilterdatas, err := dofilter(datas, FilterTypes, true)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\tdatas = filterdatas\r\n\t\t} else {\r\n\t\t\tfilterdatas, err := dofilter(datas, FilterTypes, false)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\tdatas = filterdatas\r\n\t\t}\r\n\t}\r\n\tif dc.Take != 0 {\r\n\t\tdatas = takecountofdatas(datas, dc.Skip, dc.Take)\r\n\t}\r\n\r\n\treturn datas, nil\r\n}",
"func (m *MGO) paginatedFilterBuilder(printer *message.Printer, filter bson.D, orderBy string, obj interface{}) (bson.D, error) {\n\tv := reflect.ValueOf(obj)\n\tif v.Kind() != reflect.Struct {\n\t\tm.errorLogger.Printf(\"unexpected type of given object in filterBuilder(), expected Struct, got %s\", v.Kind().String())\n\t\treturn nil, status.Errorf(codes.Internal, printer.Sprintf(\"internal error while building filter\"))\n\t}\n\tid := v.FieldByName(\"Id\")\n\tif !id.IsValid() {\n\t\tm.errorLogger.Printf(\"given object does not have an 'Id' - %+v\", obj)\n\t\treturn nil, status.Errorf(codes.Internal, printer.Sprintf(\"internal error while building filter\"))\n\t}\n\tidFilter := bson.E{\n\t\tKey: \"_id\",\n\t\tValue: bson.E{\n\t\t\tKey: \"$oid\",\n\t\t\tValue: bson.E{\n\t\t\t\tKey: \"$gt\",\n\t\t\t\tValue: id.Interface(),\n\t\t\t},\n\t\t},\n\t}\n\t// build pagination filter which makes sure that the\n\t// next page starts with the document coming after the given one\n\t// (depending on how the documents are / will be sorted)\n\tvar pageFilter bson.D\n\tvar next, exact bson.D\n\tif orderBy != \"\" {\n\t\t// if sorting is requested\n\t\tsorts := strings.Split(orderBy, \",\")\n\t\tfor _, name := range sorts {\n\t\t\tname = strings.TrimSpace(name)\n\t\t\tif len(name) == 0 {\n\t\t\t\treturn nil, status.Errorf(codes.InvalidArgument, printer.Sprintf(\"orderBy field has a length of 0\"))\n\t\t\t}\n\t\t\t// ensure snake case\n\t\t\tname = strings.ToLower(name[:1]) + name[1:]\n\t\t\t// operator\n\t\t\top := \"$gt\"\n\t\t\tif name[0:1] == \"-\" {\n\t\t\t\top = \"$lt\"\n\t\t\t\tname = name[1:]\n\n\t\t\t}\n\t\t\tif name[0:1] == \"+\" {\n\t\t\t\tname = name[1:]\n\t\t\t}\n\t\t\t// handle id field which is special\n\t\t\tif name == \"id\" {\n\t\t\t\t// like id filter but considering\n\t\t\t\t// the operator (which might be $lt)\n\t\t\t\tflt := bson.E{\n\t\t\t\t\tKey: \"_id\",\n\t\t\t\t\tValue: bson.E{\n\t\t\t\t\t\tKey: \"$oid\",\n\t\t\t\t\t\tValue: bson.E{\n\t\t\t\t\t\t\tKey: op,\n\t\t\t\t\t\t\tValue: id.Interface(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\t// if the only orderBy string is id\n\t\t\t\tif len(sorts) == 1 {\n\t\t\t\t\tpageFilter = bson.D{flt}\n\t\t\t\t\t// no need to do more\n\t\t\t\t\t// as id is the only field\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// for id, we know that it is unique\n\t\t\t\t// therefore there cannot be exact\n\t\t\t\t// matches so we only set next and continue\n\t\t\t\tnext = append(next, flt)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// get field value by it's name (camel case)\n\t\t\tf := v.FieldByName(strings.Title(name))\n\t\t\tif !f.IsValid() {\n\t\t\t\t// if field does not exist or has zero value\n\t\t\t\t// it cannot be used as a filter\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvalue := f.Interface()\n\t\t\t// A) either the next document needs to have a\n\t\t\t// greater or smaller value for the given field\n\t\t\t// (depending on sort direction)\n\t\t\tnext = append(next, bson.E{\n\t\t\t\tKey: name,\n\t\t\t\tValue: bson.E{\n\t\t\t\t\tKey: op,\n\t\t\t\t\tValue: value,\n\t\t\t\t},\n\t\t\t})\n\t\t\t// B) or it needs to be an exact match\n\t\t\t// but the id needs to be greater, a criteria\n\t\t\t// we add after the loop\n\t\t\texact = append(exact, bson.E{\n\t\t\t\tKey: name,\n\t\t\t\tValue: value,\n\t\t\t})\n\t\t}\n\t\t// if only id was given as orderBy string\n\t\t// exact has a length of 0\n\t\tif len(exact) > 0 {\n\t\t\t// ensure the id is greater for exact matches\n\t\t\texact = append(exact, idFilter)\n\t\t\t// put together page filter\n\t\t\tpageFilter = bson.D{\n\t\t\t\t{\n\t\t\t\t\tKey: \"$or\",\n\t\t\t\t\tValue: bson.A{\n\t\t\t\t\t\tnext,\n\t\t\t\t\t\texact,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n\t// if no (valid) sorting is given, it will be sorted by\n\t// id, which makes the pagination filter quite simple\n\tif len(pageFilter) == 0 {\n\t\tpageFilter = bson.D{idFilter}\n\t}\n\tif len(filter) > 0 {\n\t\t// merge given and pagination filters\n\t\tfilter = bson.D{\n\t\t\t{\n\t\t\t\tKey: \"$and\",\n\t\t\t\tValue: bson.A{\n\t\t\t\t\tfilter,\n\t\t\t\t\tpageFilter,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\t// no filter given as input\n\t\t// resulting filter equals the pagination filter\n\t\tfilter = pageFilter\n\t}\n\treturn filter, nil\n}",
"func (q SelectQuery) Offset(offset int) SelectQuery {\n\tnum := int64(offset)\n\tq.OffsetValue = &num\n\treturn q\n}",
"func Paginate(\n\tindex Index,\n\tctx context.Context,\n\trequest *PaginationRequest,\n\tonItem func(proto.Message),\n\toptions ...ormlist.Option,\n) (*PaginationResponse, error) {\n\toffset := int(request.Offset)\n\tif len(request.Key) != 0 {\n\t\tif offset > 0 {\n\t\t\treturn nil, fmt.Errorf(\"can only specify one of cursor or offset\")\n\t\t}\n\n\t\toptions = append(options, ormlist.Cursor(request.Key))\n\t}\n\n\tif request.Reverse {\n\t\toptions = append(options, ormlist.Reverse())\n\t}\n\n\tit, err := index.Iterator(ctx, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer it.Close()\n\n\tlimit := int(request.Limit)\n\tif limit == 0 {\n\t\treturn nil, fmt.Errorf(\"limit not specified\")\n\t}\n\n\ti := 0\n\tif offset != 0 {\n\t\tfor ; i < offset; i++ {\n\t\t\tif !it.Next() {\n\t\t\t\treturn &PaginationResponse{\n\t\t\t\t\tPageResponse: &queryv1beta1.PageResponse{Total: uint64(i)},\n\t\t\t\t}, nil\n\t\t\t}\n\t\t}\n\t}\n\n\thaveMore := false\n\tcursors := make([]ormlist.CursorT, 0, limit)\n\tdone := limit + offset\n\tfor it.Next() {\n\t\tif i == done {\n\t\t\thaveMore = true\n\t\t\tif request.CountTotal {\n\t\t\t\tfor {\n\t\t\t\t\ti++\n\t\t\t\t\tif !it.Next() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tmessage, err := it.GetMessage()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif request.Filter != nil && !request.Filter(message) {\n\t\t\tcontinue\n\t\t}\n\n\t\ti++\n\t\tcursors = append(cursors, it.Cursor())\n\t\tonItem(message)\n\t}\n\n\tpageRes := &queryv1beta1.PageResponse{}\n\tif request.CountTotal {\n\t\tpageRes.Total = uint64(i)\n\t}\n\tn := len(cursors)\n\tif n != 0 {\n\t\tpageRes.NextKey = cursors[n-1]\n\t}\n\treturn &PaginationResponse{\n\t\tPageResponse: pageRes,\n\t\tHaveMore: haveMore,\n\t\tCursors: cursors,\n\t}, nil\n}",
"func createPaginatedControl(query elastic.Query, filters map[string][]string) (*elastic.NestedQuery, error) {\n\tfrom, size, err := paginatedParams(filters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnestedQuery := elastic.NewNestedQuery(\"profiles.controls\", query)\n\tnestedQuery = nestedQuery.InnerHit(elastic.NewInnerHit().From(from).Size(size))\n\treturn nestedQuery, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ApplySort applies the sort fields to the provided SelectBuilder.
|
func (d *SelectBuilder) ApplySort(qry *squirrel.SelectBuilder) {
*qry = ApplySort(d.StatementBuilder, &ConditionOptionPreprocessParams{
BaseTable: d.From,
}, *qry)
}
|
[
"func (sqlbldr *Builder) ApplySortList( aSortList *OrderByList ) *Builder {\n\treturn sqlbldr.ApplyOrderByList(aSortList)\n}",
"func (sqlbldr *Builder) ApplyOrderByList( aOrderByList *OrderByList ) *Builder {\n\tif aOrderByList != nil && sqlbldr.myDbModel != nil {\n\t\ttheSortKeyword := \"ORDER BY\"\n\t\t/* in case we find diff keywords later...\n\t\tdriverName := sqlbldr.myDbModel.GetDbMeta().Name\n\t\tswitch driverName {\n\t\tcase MySQL:\n\t\tcase PostgreSQL:\n\t\tdefault:\n\t\t\ttheSortKeyword = \"ORDER BY\"\n\t\t}//switch\n\t\t*/\n\t\tsqlbldr.Add(theSortKeyword)\n\n\t\ttheOrderByList := make([]string, len(*aOrderByList))\n\t\tidx := 0\n\t\tfor k, v := range *aOrderByList {\n\t\t\ttheEntry := k + \" \"\n\t\t\tif strings.ToUpper(strings.TrimSpace(v)) == ORDER_BY_DESCENDING {\n\t\t\t\ttheEntry += ORDER_BY_DESCENDING\n\t\t\t} else {\n\t\t\t\ttheEntry += ORDER_BY_ASCENDING\n\t\t\t}\n\t\t\ttheOrderByList[idx] = theEntry\n\t\t\tidx += 1\n\t\t}\n\t\tsqlbldr.Add(strings.Join(theOrderByList, \",\"))\n\t}\n\treturn sqlbldr\n}",
"func (q SelectQuery) OrderBy(fields ...Field) SelectQuery {\n\tq.OrderByFields = append(q.OrderByFields, fields...)\n\treturn q\n}",
"func (_u *_usersQueryBuilder) OrderBy(fields ...string) (*_usersQueryBuilder) {\n if _u.err != nil {\n return _u\n }\n sortFields := make([]query.Sort, len(fields))\n for i, field := range fields {\n if len(field) == 0 {\n _u.err = errors.Wrap(mapping.ErrInvalidModelField, \"cannot set sorting order for an empty field for model: 'NRN_Users'\")\n return _u\n }\n var order query.SortOrder\n if field[0] == '-' {\n order = query.DescendingOrder\n field = field[1:]\n }\n structField, ok := _u.builder.Scope().ModelStruct.FieldByName(field)\n if !ok {\n _u.err = errors.Wrapf(mapping.ErrInvalidModelField, \"field: '%s' is not valid for model: 'NRN_Users'\", field)\n return _u\n }\n sortFields[i] = query.SortField{StructField: structField, SortOrder: order}\n }\n _u.builder.OrderBy(sortFields...)\n return _u\n}",
"func ApplyOrderFilter(param string, value string, args interface{}) dao.FilterFunc {\n\tallowedFilters := args.([]string)\n\treturn func(s *utils.Context) *utils.Context {\n\t\tisdesc := \"\"\n\t\tif strings.Index(value, \"-\") == 0 {\n\t\t\tisdesc = \" DESC\"\n\t\t}\n\t\treplacer := strings.NewReplacer(\"+\", \"\", \"-\", \"\")\n\t\tval := strings.TrimSpace(replacer.Replace(value))\n\n\t\tfor _, f := range allowedFilters {\n\t\t\tif f == val {\n\t\t\t\ts.Get(\"c\").(*gorm.DB).Order(\"`\" + val + \"`\" + isdesc)\n\t\t\t}\n\t\t}\n\t\treturn s\n\t}\n}",
"func (s *CatFielddataService) Sort(fields ...string) *CatFielddataService {\n\ts.sort = fields\n\treturn s\n}",
"func doSort(stat *stat.PGresult, opts *ReportOptions) {\n\tvar sortKey int\n\n\t// set ascending order if required\n\tif opts.OrderColName[0] == ascFlag[0] {\n\t\topts.OrderDesc = false // set to Asc\n\t\topts.OrderColName = strings.TrimLeft(opts.OrderColName, ascFlag)\n\t}\n\n\tfor k, v := range stat.Cols {\n\t\tif v == opts.OrderColName {\n\t\t\tsortKey = k\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// use descending order by default\n\tstat.Sort(sortKey, opts.OrderDesc)\n}",
"func (o *CombinedQueryVulnerabilitiesParams) SetSort(sort *string) {\n\to.Sort = sort\n}",
"func (b SelectBuilder) OrderByClause(pred interface{}, args ...interface{}) SelectBuilder {\n\treturn builder.Append(b, \"OrderByParts\", newPart(pred, args...)).(SelectBuilder)\n}",
"func NewNestedSortValueBuilder() *NestedSortValueBuilder {\n\tr := NestedSortValueBuilder{\n\t\t&NestedSortValue{},\n\t}\n\n\treturn &r\n}",
"func (b *SQLBuilder) OrderBy(o ...FieldOrder) {\n\tif len(o) == 0 {\n\t\treturn\n\t}\n\ts := `ORDER BY `\n\tfor k, v := range o {\n\t\tif k > 0 {\n\t\t\ts += COMMA\n\t\t}\n\t\ts += b.FieldToSQL(v.Field) + ` ` + v.Order\n\t}\n\tb.w.WriteLine(s)\n}",
"func (sqlbldr *Builder) ApplyFilter( aFilter *Builder ) *Builder {\n\tif aFilter != nil {\n\t\tif aFilter.mySql != \"\" {\n\t\t\tsqlbldr.mySql += sqlbldr.myParamPrefix + aFilter.mySql\n\t\t}\n\t\t//also merge in any params from the sub-query\n\t\tfor k, v := range aFilter.myParams {\n\t\t\tsqlbldr.myParams[k] = v\n\t\t}\n\t\tfor k, v := range aFilter.mySetParams {\n\t\t\tsqlbldr.mySetParams[k] = v\n\t\t}\n\t}\n\treturn sqlbldr\n}",
"func (r *Request) ApplyPaginationToSelect(sb *squirrel.SelectBuilder) *squirrel.SelectBuilder {\n\tif r == nil || r.Pagination == nil {\n\t\treturn sb\n\t}\n\n\treturn r.Pagination.ApplyToSelect(sb)\n}",
"func (qs *QueryStatement) parseOrderBy(orderByClause *parser.OrderByClauseContext) {\n\tif orderByClause != nil {\n\t\tiSortFieldsContext := orderByClause.SortFields()\n\t\tif iSortFieldsContext != nil {\n\t\t\tsortFieldsContext := iSortFieldsContext.(*parser.SortFieldsContext)\n\t\t\tsortFieldContextList := sortFieldsContext.AllSortField()\n\t\t\tif len(sortFieldContextList) > 0 {\n\t\t\t\tsortFieldContext := sortFieldContextList[0].(*parser.SortFieldContext)\n\t\t\t\tif len(sortFieldContext.AllT_ASC()) > 0 {\n\t\t\t\t\tqs.desc = false\n\t\t\t\t} else {\n\t\t\t\t\tqs.desc = true\n\t\t\t\t}\n\n\t\t\t\texpr := new(proto.Expr)\n\t\t\t\texprContext := sortFieldContext.Expr().(*parser.ExprContext)\n\t\t\t\tqs.parseExpr(exprContext)\n\t\t\t\t//todo\n\t\t\t\t//if new(proto.Expr_Ref).Ref == expr.GetRef() {\n\t\t\t\t//\tref := expr.GetRef().RefName\n\t\t\t\t//\ti := 0\n\t\t\t\t//\tlength := len(qs.fieldExprList)\n\t\t\t\t//\tfor ; i < length; i++ {\n\t\t\t\t//\t\tfieldExpr := qs.fieldExprList[0]\n\t\t\t\t//\t\tif strings.EqualFold(fieldExpr.Alias, ref) {\n\t\t\t\t//\t\t\tqs.orderByExpr = fieldExpr.GetExpr()\n\t\t\t\t//\t\t\treturn\n\t\t\t\t//\t\t}\n\t\t\t\t//\t}\n\t\t\t\t//}\n\t\t\t\tqs.orderByExpr = expr\n\t\t\t}\n\t\t}\n\t}\n}",
"func (d *SelectBuilder) ApplyPaging(qry *squirrel.SelectBuilder) {\n\t*qry = ApplyPaging(d.StatementBuilder, *qry)\n}",
"func ByBio(opts ...sql.OrderTermOption) OrderOption {\n\treturn sql.OrderByField(FieldBio, opts...).ToFunc()\n}",
"func (cq *CollectionQuery) Sort(sorters ...*Sorter) *CollectionQuery {\n\tc := *cq\n\n\tfinalSorters := bson.M{}\n\n\tfor _, s := range sorters {\n\t\tfinalSorters[s.field] = s.order\n\t}\n\n\tc.pipes = append(c.pipes, &bson.M{\n\t\t\"$sort\": finalSorters,\n\t})\n\n\treturn &c\n}",
"func (mu *MenuUpdate) AddSort(i int) *MenuUpdate {\n\tmu.mutation.AddSort(i)\n\treturn mu\n}",
"func SelectSort(data Sortable) {\n\tfor i := 0; i < data.Len(); i++ {\n\t\tmin := i\n\t\tfor j := i + 1; j < data.Len(); j++ {\n\t\t\tif data.Less(j, min) {\n\t\t\t\tmin = j\n\t\t\t}\n\t\t}\n\t\tdata.Swap(min, i)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RunPreparation `prepares` the database, meaning that it will create one or more table for each entity, depending on their Create method. Dropping each table requires an explicit optin flag.
|
func (m *Manger) RunPreparation() {
var err error
if m.dropSchema {
for _, s := range m.entities {
if err = s.Drop(); err != nil {
log.Fatal(err)
}
}
}
for _, s := range m.entities {
if err = s.Create(); err != nil {
log.Fatal(err)
}
}
}
|
[
"func PrepareTestData(clearSqls []string, initSqls []string) {\n\to := GetOrmer()\n\n\tfor _, sql := range clearSqls {\n\t\tfmt.Printf(\"Exec sql:%v\\n\", sql)\n\t\t_, err := o.Raw(sql).Exec()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to clear database, sql:%v, error: %v\", sql, err)\n\t\t}\n\t}\n\n\tfor _, sql := range initSqls {\n\t\t_, err := o.Raw(sql).Exec()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to init database, sql:%v, error: %v\", sql, err)\n\t\t}\n\t}\n}",
"func Prepare(conn *pgx.Conn, schemas []string, includeTables, excludeTables []string) error {\n\ttx, err := conn.Begin()\n\tif err != nil {\n\t\treturn errTransactionBegin\n\t}\n\n\terr = createSchema(tx)\n\tif err != nil {\n\t\t// https://www.postgresql.org/docs/10/errcodes-appendix.html\n\t\tpgErr, ok := err.(pgx.PgError)\n\t\tif ok && pgErr.Code == \"42P06\" {\n\t\t\treturn errDuplicateSchema\n\t\t}\n\t\tif ok {\n\t\t\tlog.Printf(\"%v+\", pgErr)\n\t\t}\n\t\treturn errCreateSchema\n\t}\n\n\terr = createChangesetsTable(tx)\n\tif err != nil {\n\t\t// https://www.postgresql.org/docs/10/errcodes-appendix.html\n\t\tpgErr, ok := err.(pgx.PgError)\n\t\tif ok && pgErr.Code == \"42P07\" {\n\t\t\treturn errDuplicateTable\n\t\t}\n\t\treturn errCreateTable\n\t}\n\n\terr = createTriggerFunc(tx)\n\tif err != nil {\n\t\treturn errCreateTriggerFunc\n\t}\n\n\tregisterTables, err := GenerateTablesList(conn, schemas, includeTables, excludeTables)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, table := range registerTables {\n\t\tif len(table.PKeyFields) == 0 {\n\t\t\treturn fmt.Errorf(`table \"%s\".\"%s\" has no primary key.`, table.Schema, table.Name)\n\t\t}\n\t\terr = registerTrigger(tx, table.Schema, table.Name)\n\t\tif err != nil {\n\t\t\tpgErr, ok := err.(pgx.PgError)\n\t\t\tif ok {\n\t\t\t\tlog.Printf(\"%+v\", pgErr)\n\t\t\t}\n\t\t\treturn errRegisterTrigger\n\t\t}\n\t}\n\n\tif err = tx.Commit(); err != nil {\n\t\tlog.WithError(err).Error(errTransactionCommit.Error())\n\t\treturn errTransactionCommit\n\t}\n\n\treturn nil\n}",
"func preProcess(_ context.Context, _ dispatcher.TaskHandle, gTask *proto.Task, taskMeta *TaskMeta, logger *zap.Logger) error {\n\tlogger.Info(\"pre process\")\n\t// TODO: drop table indexes depends on the option.\n\t// if err := dropTableIndexes(ctx, handle, taskMeta, logger); err != nil {\n\t// \treturn err\n\t// }\n\treturn updateMeta(gTask, taskMeta)\n}",
"func (m *Model) CreateTablesIfNeeded() {\n\tm.db.Exec(`\ncreate table if not exists Drinks (\nbarcode varchar(255) primary key,\nbrand varchar(255),\nname varchar(255),\nabv real,\nibu real,\ntype varchar(255),\nshorttype varchar(255),\nlogo varchar(255),\ncountry varchar(255),\ndate integer)\n`)\n\tm.db.Exec(`\ncreate table if not exists Input (\nid integer primary key,\nbarcode varchar(255),\nquantity integer,\ndate integer)\n`)\n\tm.db.Exec(`\ncreate table if not exists Output (\nid integer primary key,\nbarcode varchar(255),\nquantity integer,\ndate integer)\n`)\n}",
"func PrepareDatabase(ctx context.Context, db *sqlx.DB, schemaName string) error {\n\tif _, err := db.ExecContext(ctx, fmt.Sprintf(\"CREATE SCHEMA IF NOT EXISTS %s;\", schemaName)); err != nil {\n\t\treturn fmt.Errorf(\"creating schema '%s': %w\", schemaName, err)\n\t}\n\n\tif _, err := db.ExecContext(ctx, strings.ReplaceAll(bootstrapDB, \"{schemaName}\", schemaName)); err != nil {\n\t\treturn fmt.Errorf(\"bootstrapping tables and indexes schema '%s': %w\", schemaName, err)\n\t}\n\n\treturn nil\n}",
"func PrepareDatabase(traceInfo trace.Info) (*sql.DB, string, error) {\n\n\tschema := \"schema_\" + fmt.Sprintf(\"%x\", md5.Sum([]byte(traceInfo.FunctionName)))\n\tmigrateDbConnPool := db.InitDatabase(os.Getenv(\"DATABASE_URL\"))\n\tdefer func() {\n\t\tmigrateDbConnPool.Close()\n\t}()\n\tmigrateDbConnPool.Exec(\"DROP SCHEMA IF EXISTS \" + schema + \" CASCADE\")\n\t_, err := migrateDbConnPool.Exec(\"CREATE SCHEMA \" + schema)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %s\", err.Error())\n\t}\n\tdbConnPool := db.InitDatabase(os.Getenv(\"DATABASE_URL\") + \"&search_path=\" + schema)\n\tdriver, err := postgres.WithInstance(dbConnPool, &postgres.Config{})\n\tif err != nil {\n\t\tlog.Fatalf(\"=====error: %s\", err.Error())\n\t\treturn nil, schema, err\n\t}\n\tm, err := migrate.NewWithDatabaseInstance(\n\t\t\"file://\"+projectpath.Root+\"/data/migrations\",\n\t\t\"postgres\", driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"=====error: %s\", err.Error())\n\t\treturn nil, schema, err\n\t}\n\tm.Up()\n\treturn dbConnPool, schema, err\n}",
"func InitDB(m ...*Model) {\n\t// TODO: refactor this to create the database file ONLY. Table creation should be done by another function.\n\n\tdb, err := sql.Open(\"sqlite3\", DB_NAME)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t// Loop through the model args, and create a table for each model\n\tfor _, v := range m {\n\t\tcommand := v.GenCreateTable()\n\t\t_, err = db.Exec(command)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%q: %s\\n\", err, command)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (db *DB) Setup() (err error) {\n\tq := `\tDROP TABLE IF EXISTS pairs;\n\t\tCREATE TABLE pairs (\n\t\ttoken TEXT PRIMARY KEY,\n\t\ttarget TEXT,\n\t\ttimes_used INTEGER,\n\t\tlast_used TIMESTAMP,\n\t\tcreated_at TIMESTAMP\n\t\t);`\n\t_, err = db.pool.Exec(q)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}",
"func setupTables(session *r.Session) {\n\t// TODO: Make this use the default database param.\n\tr.DB(\"test\").TableCreate(\"users\").Exec(session)\n}",
"func setupDatabase(db *sql.DB, ddlMap *ddl.Map) error {\n\t// ping the database\n\terr := pingDatabase(db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the tables in the database\n\terr = createTables(db, ddlMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the indexes in the database\n\terr = createIndexes(db, ddlMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func InitDB() {\n\tCreateDatabase(dbName)\n\tfor runtime := range runtimes {\n\t\tCreateProcessingTimeTable(dbName, runtime)\n\t}\n\tCreateDeploymentTimeTable(dbName)\n\tCreateAppVersionTable(dbName)\n\tCreateLogTimeTable(dbName)\n}",
"func (q Query) Prepare(ctx context.Context, db *database.Database) error {\n\tvar err error\n\tvar tx *database.Transaction\n\n\tfor _, stmt := range q.Statements {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\tp, ok := stmt.(statement.Preparer)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tif tx == nil {\n\t\t\ttx = db.GetAttachedTx()\n\t\t\tif tx == nil {\n\t\t\t\ttx, err = db.BeginTx(ctx, &database.TxOptions{\n\t\t\t\t\tReadOnly: true,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdefer tx.Rollback()\n\t\t\t}\n\t\t}\n\n\t\terr = p.Prepare(tx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func CreateDevDB() {\n\t// creating new tables in exact order\n\tcreateUserTable()\n\tcreatePollTable()\n\tcreatePollOption()\n\tcreateVoteTable()\n}",
"func (m Migrator) CreateTable(values ...interface{}) error {\n\tfor _, value := range m.ReorderModels(values, false) {\n\t\ttx := m.DB.Session(&gorm.Session{})\n\t\tif err := m.RunWithValue(value, func(stmt *gorm.Statement) (err error) {\n\t\t\tvar (\n\t\t\t\tcreateTableSQL = \"CREATE TABLE ? (\"\n\t\t\t\tvalues = []interface{}{m.CurrentTable(stmt)}\n\t\t\t\thasPrimaryKeyInDataType bool\n\t\t\t)\n\n\t\t\tfor _, dbName := range stmt.Schema.DBNames {\n\t\t\t\tfield := stmt.Schema.FieldsByDBName[dbName]\n\t\t\t\tif !field.IgnoreMigration {\n\t\t\t\t\tcreateTableSQL += \"? ?\"\n\t\t\t\t\thasPrimaryKeyInDataType = hasPrimaryKeyInDataType || strings.Contains(strings.ToUpper(string(field.DataType)), \"PRIMARY KEY\")\n\t\t\t\t\tvalues = append(values, clause.Column{Name: dbName}, m.DB.Migrator().FullDataTypeOf(field))\n\t\t\t\t\tcreateTableSQL += \",\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !hasPrimaryKeyInDataType && len(stmt.Schema.PrimaryFields) > 0 {\n\t\t\t\tcreateTableSQL += \"PRIMARY KEY ?,\"\n\t\t\t\tprimaryKeys := make([]interface{}, 0, len(stmt.Schema.PrimaryFields))\n\t\t\t\tfor _, field := range stmt.Schema.PrimaryFields {\n\t\t\t\t\tprimaryKeys = append(primaryKeys, clause.Column{Name: field.DBName})\n\t\t\t\t}\n\n\t\t\t\tvalues = append(values, primaryKeys)\n\t\t\t}\n\n\t\t\tfor _, idx := range stmt.Schema.ParseIndexes() {\n\t\t\t\tif m.CreateIndexAfterCreateTable {\n\t\t\t\t\tdefer func(value interface{}, name string) {\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\terr = tx.Migrator().CreateIndex(value, name)\n\t\t\t\t\t\t}\n\t\t\t\t\t}(value, idx.Name)\n\t\t\t\t} else {\n\t\t\t\t\tif idx.Class != \"\" {\n\t\t\t\t\t\tcreateTableSQL += idx.Class + \" \"\n\t\t\t\t\t}\n\t\t\t\t\tcreateTableSQL += \"INDEX ? ?\"\n\n\t\t\t\t\tif idx.Comment != \"\" {\n\t\t\t\t\t\tcreateTableSQL += fmt.Sprintf(\" COMMENT '%s'\", idx.Comment)\n\t\t\t\t\t}\n\n\t\t\t\t\tif idx.Option != \"\" {\n\t\t\t\t\t\tcreateTableSQL += \" \" + idx.Option\n\t\t\t\t\t}\n\n\t\t\t\t\tcreateTableSQL += \",\"\n\t\t\t\t\tvalues = append(values, clause.Column{Name: idx.Name}, tx.Migrator().(BuildIndexOptionsInterface).BuildIndexOptions(idx.Fields, stmt))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !m.DB.DisableForeignKeyConstraintWhenMigrating && !m.DB.IgnoreRelationshipsWhenMigrating {\n\t\t\t\tfor _, rel := range stmt.Schema.Relationships.Relations {\n\t\t\t\t\tif rel.Field.IgnoreMigration {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif constraint := rel.ParseConstraint(); constraint != nil {\n\t\t\t\t\t\tif constraint.Schema == stmt.Schema {\n\t\t\t\t\t\t\tsql, vars := buildConstraint(constraint)\n\t\t\t\t\t\t\tcreateTableSQL += sql + \",\"\n\t\t\t\t\t\t\tvalues = append(values, vars...)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, chk := range stmt.Schema.ParseCheckConstraints() {\n\t\t\t\tcreateTableSQL += \"CONSTRAINT ? CHECK (?),\"\n\t\t\t\tvalues = append(values, clause.Column{Name: chk.Name}, clause.Expr{SQL: chk.Constraint})\n\t\t\t}\n\n\t\t\tcreateTableSQL = strings.TrimSuffix(createTableSQL, \",\")\n\n\t\t\tcreateTableSQL += \")\"\n\n\t\t\tif tableOption, ok := m.DB.Get(\"gorm:table_options\"); ok {\n\t\t\t\tcreateTableSQL += fmt.Sprint(tableOption)\n\t\t\t}\n\n\t\t\terr = tx.Exec(createTableSQL, values...).Error\n\t\t\treturn err\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func CreateTables(db *sql.DB) error {\n\t// Create all of the data tables.\n\tfor _, pair := range createTableStatements {\n\t\terr := createTable(db, pair[0], pair[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn ClearTestingTable(db)\n}",
"func createTables() {\r\n\tdb.AutoMigrate(&Note{})\r\n}",
"func setupDatabase(db *gorm.DB) error {\n\t// auto migrate all of the models\n\tdb.AutoMigrate(&models.Category{})\n\tdb.AutoMigrate(&models.Group{})\n\tdb.AutoMigrate(&models.Permission{})\n\tdb.AutoMigrate(&models.Post{})\n\tdb.AutoMigrate(&models.Thread{})\n\tdb.AutoMigrate(&models.User{})\n\tdb.AutoMigrate(&models.Token{})\n\tdb.AutoMigrate(&models.File{})\n\tdb.AutoMigrate(&models.Permission{})\n\treturn db.Error\n}",
"func (p *testFixturePreImpl) Prepare(ctx context.Context, s *testing.PreState) interface{} {\n\tctx, st := timing.Start(ctx, p.String()+\"_prepare\")\n\tdefer st.End()\n\n\tif p.tf != nil {\n\t\terr := p.tf.Reinit(ctx)\n\t\tif err == nil { // Reinit succeeded, we're done.\n\t\t\treturn p.tf\n\t\t}\n\t\t// Reinit failed, close the old TestFixture.\n\t\ts.Log(\"Try recreating the TestFixture as it failed to re-initialize, err: \", err)\n\t\tif err := p.tf.Close(ctx); err != nil {\n\t\t\ts.Log(\"Failed to close the broken TestFixture, err: \", err)\n\t\t}\n\t\tp.tf = nil\n\t\t// Fallthrough the creation of TestFixture.\n\t}\n\n\t// Create TestFixture.\n\tvar ops []TFOption\n\t// Read router/pcap variable. If not available or empty, NewTestFixture\n\t// will fall back to Default{Router,Pcap}Host.\n\tif p.features&TFFeaturesRouters != 0 {\n\t\tif routers, ok := s.Var(\"routers\"); ok && routers != \"\" {\n\t\t\ts.Log(\"routers: \", routers)\n\t\t\tslice := strings.Split(routers, \",\")\n\t\t\tif len(slice) < 2 {\n\t\t\t\ts.Fatal(\"You must provide at least two router names\")\n\t\t\t}\n\t\t\tops = append(ops, TFRouter(slice...))\n\t\t} else {\n\t\t\trouters := []string{\n\t\t\t\tcompanionName(s, dut.CompanionSuffixRouter),\n\t\t\t\t// Use AP named as packet capturer as the second router\n\t\t\t\t// when TFFeaturesRouters is set.\n\t\t\t\tcompanionName(s, dut.CompanionSuffixPcap),\n\t\t\t}\n\t\t\ts.Log(\"companion routers: \", routers)\n\t\t\tops = append(ops, TFRouter(routers...))\n\t\t}\n\t} else {\n\t\trouter, ok := s.Var(\"router\")\n\t\tif ok && router != \"\" {\n\t\t\ts.Log(\"router: \", router)\n\t\t\tops = append(ops, TFRouter(router))\n\t\t} // else: let TestFixture resolve the name.\n\t}\n\tpcap, ok := s.Var(\"pcap\")\n\tif ok && pcap != \"\" {\n\t\ts.Log(\"pcap: \", pcap)\n\t\tops = append(ops, TFPcap(pcap))\n\t} // else: let TestFixture resolve the name.\n\t// Read attenuator variable.\n\tif p.features&TFFeaturesAttenuator != 0 {\n\t\tatten, ok := s.Var(\"attenuator\")\n\t\tif !ok || atten == \"\" {\n\t\t\t// Attenuator is not typical companion, so we synthesize its name here.\n\t\t\tatten = companionName(s, \"-attenuator\")\n\t\t}\n\t\ts.Log(\"attenuator: \", atten)\n\t\tops = append(ops, TFAttenuator(atten))\n\t}\n\t// Enable capturing.\n\tif p.features&TFFeaturesCapture != 0 {\n\t\tops = append(ops, TFCapture(true))\n\t}\n\ttf, err := NewTestFixture(ctx, s.PreCtx(), s.DUT(), s.RPCHint(), ops...)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to set up test fixture: \", err)\n\t}\n\tp.tf = tf\n\n\treturn p.tf\n}",
"func TSetUpDB() {\n\tcmd := exec.Command(\"make\", \"-C\", \"../\", \"reset-db-test\")\n\tfmt.Println(\"Resetting the test database...\")\n\terr := cmd.Run()\n\tif err != nil {\n\t\tpanic(fmt.Sprint(\"Failed to reset the database:\", err))\n\t}\n\n\tdatabaseURL := \"postgres://goblog:password@localhost:5432/blog_test\"\n\terr = InitDB(databaseURL)\n\tif err != nil {\n\t\tpanic(fmt.Sprint(\"Could not connect to database\", err))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewLabel creates a new label from the given names.
|
func NewLabel(names ...string) *Label {
simpleName := strings.Join(names, ".")
return &Label{
names: names,
simpleName: simpleName,
wildcard: strings.Contains(simpleName, "*"),
}
}
|
[
"func NewLabels(labels map[string]string) *Labels {\n\treturn &Labels{\n\t\tbase: newBase(typeLabels, nil),\n\t\tConfig: LabelsConfig{\n\t\t\tLabels: labels,\n\t\t},\n\t}\n}",
"func NewLabel() Label {\n\tuniqId++\n\treturn Label{\n\t\tId: uniqId,\n\t}\n}",
"func NewLabel(id string) (l Label) {\n\tparams := []interface{}{id}\n\tl = Label(NewTuple(params...))\n\treturn l\n}",
"func LabelNew(str string) (*Label, error) {\n\tcstr := C.CString(str)\n\tdefer C.free(unsafe.Pointer(cstr))\n\tc := C.gtk_label_new((*C.gchar)(cstr))\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := wrapObject(unsafe.Pointer(c))\n\treturn wrapLabel(obj), nil\n}",
"func NewLabel(prefix string, offset uint64, k uint) (*Label, error) {\n\tif len(prefix) > 0 && !(strings.HasSuffix(prefix, \":\") || strings.HasSuffix(prefix, \",\")) {\n\t\tprefix = prefix + \":\"\n\t}\n\treturn &Label{\n\t\tPrefix: prefix,\n\t\tCategories: categoriesForOffset(offset, maxCategories, k),\n\t}, nil\n}",
"func NewLabel(properties LabelProperties) *Label {\n\tthis := Label{}\n\n\tthis.Properties = &properties\n\n\treturn &this\n}",
"func (ls *LabelStore) New(label *models.Label) error {\n\terr := ls.db.QueryRow(`INSERT INTO labels (name) VALUES ($1)\n\t\t\t\t\t\t RETURNING id;`, label.Name).\n\t\tScan(&label.ID)\n\treturn handlePqErr(err)\n}",
"func (cvr *CStorVolumeReplica) WithLabelsNew(labels map[string]string) *CStorVolumeReplica {\n\tcvr.Labels = make(map[string]string)\n\tfor key, value := range labels {\n\t\tcvr.Labels[key] = value\n\t}\n\treturn cvr\n}",
"func LabelNewWithMnemonic(str string) (*Label, error) {\n\tcstr := C.CString(str)\n\tdefer C.free(unsafe.Pointer(cstr))\n\tc := C.gtk_label_new_with_mnemonic((*C.gchar)(cstr))\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := wrapObject(unsafe.Pointer(c))\n\treturn wrapLabel(obj), nil\n}",
"func NewLabelEvent(prev, current *v1.Node) *LabelEvent {\n\tevent := &LabelEvent{\n\t\tNodeName: current.Name,\n\t\tModified: make(map[string]string),\n\t\tAdded: make(map[string]string),\n\t\tRemoved: make(map[string]string),\n\t}\n\n\tswitch {\n\tcase prev == nil && current != nil:\n\t\tevent.Added = current.Labels\n\tcase prev != nil && current == nil:\n\t\tevent.Removed = prev.Labels\n\tcase prev != nil && current != nil:\n\t\tevent.addModified(prev, current)\n\t\tevent.addAdded(prev, current)\n\t\tevent.addRemoved(prev, current)\n\t}\n\n\treturn event\n}",
"func genLabel(name, label string) string { return fmt.Sprintf(\"%s$%s\", name, label) }",
"func NewCreateLabel() *CreateLabel {\n\tthis := CreateLabel{}\n\treturn &this\n}",
"func (trello *Trello) AddLabel(name string) string {\n /* Pick up a color first */\n colors := [...]string { \"green\", \"yellow\", \"orange\", \"red\", \"purple\", \"blue\", \"sky\", \"lime\", \"pink\", \"black\" }\n\n var labels []Object\n GenGET(trello, \"/boards/\" + trello.BoardId + \"/labels/\", &labels)\n\n /* TODO: avoid duplicates too */\n\n /* Create a label with appropriate color */\n col := colors[ (len(labels)-6) % len(colors) ]\n log.Printf(\"Creating a new %s label name %s in Trello.\", col, name)\n data := Object{}\n GenPOSTForm(trello, \"/labels/\", &data, url.Values{\n \"name\": { name },\n \"idBoard\": { trello.BoardId },\n \"color\": { col } })\n\n trello.labelCache[name] = data.Id\n\n return data.Id\n}",
"func (cvc *CStorVolumeConfig) WithLabelsNew(labels map[string]string) *CStorVolumeConfig {\n\tcvc.Labels = make(map[string]string)\n\tfor key, value := range labels {\n\t\tcvc.Labels[key] = value\n\t}\n\treturn cvc\n}",
"func NewLabeller(len int, fn LabelFunc) Labeller {\n\treturn Labeller{len: len, fn: fn}\n}",
"func NewLabel(frame foundation.Rect) TextField {\n\ttf := NewTextField(frame)\n\ttf.SetBezeled(false)\n\ttf.SetDrawsBackground(false)\n\ttf.SetEditable(false)\n\ttf.SetSelectable(false)\n\treturn tf\n}",
"func NewLabelStack() *LabelStack {\n\treturn &LabelStack{\n\t\tLabels: make([]*Label, InitialLabelStackHeight),\n\t\tPtr: -1,\n\t}\n}",
"func NewLabelData(id string) *LabelModel {\n\treturn &LabelModel{\n\t\tLabelID: id,\n\t}\n}",
"func makeLabel(labelName string, labelVal string) map[string]string {\n\treturn map[string]string{labelName: labelVal}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wildcard returns true iff the label contains a wildcard.
|
func (label *Label) Wildcard() bool {
return label.wildcard
}
|
[
"func wildcard(s string) bool {\n\treturn (s == \"*\" || s == \"any\")\n}",
"func wildcard(s string) bool {\n\treturn s == \"*\" || s == \"any\"\n}",
"func ContainsWildcard(topic string) bool {\n\treturn strings.Contains(topic, SingleWildCard) || strings.Contains(topic, MultipleWildCard)\n}",
"func (s *selectorManager) IsWildcard() bool {\n\treturn s.key == wildcardSelectorKey\n}",
"func (s StringLabel) Wildcards() bool {\n\treturn false\n}",
"func (k Key) Wildcards() bool {\n\tif len(k) > 0 {\n\t\tfor _, label := range k {\n\t\t\tif !label.Wildcards() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func ContainsLabel(labels []*scm.Label, label string) bool {\n\tfor _, l := range labels {\n\t\tif l != nil && l.Name == label {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (s Subject) HasWildCards() bool {\n\tv := string(s)\n\treturn strings.HasSuffix(v, \".>\") ||\n\t\tstrings.Contains(v, \".*.\") ||\n\t\tstrings.HasSuffix(v, \".*\") ||\n\t\tstrings.HasPrefix(v, \"*.\") ||\n\t\tv == \"*\" ||\n\t\tv == \">\"\n}",
"func (label *Label) Match(other *Label) bool {\n\tif label == other {\n\t\treturn true\n\t}\n\tif !label.wildcard && !other.wildcard {\n\t\treturn label.simpleName == other.simpleName\n\t}\n\tif len(label.names) != len(other.names) {\n\t\treturn false\n\t}\n\tfor i := range label.names {\n\t\tif !label.nameMatch(label.names[i], other.names[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func WildcardMatchSimple(pattern, name string) bool {\n\tif pattern == \"\" {\n\t\treturn name == pattern\n\t}\n\tif pattern == \"*\" {\n\t\treturn true\n\t}\n\tsimple := true // Does only wildcard '*' match.\n\treturn deepMatchRune(name, pattern, simple)\n}",
"func (g *GlobStringMatcher) Contains(value string) bool {\n\treturn g.glob.Contains(value)\n}",
"func (m *LabelMatcher) Match(labels *BackendLabels) bool {\n\tfor _, item := range labels.Labels {\n\t\tif m.backendLabel == item {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (usage Usage) matchesLabel(labels []string) bool {\n\t// if asset has no labels always match\n\tif len(labels) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, validLabel := range usage.Labels {\n\t\tfor _, label := range labels {\n\t\t\tif label == validLabel {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func hostnameMatchesWildcardHostname(hostname, wildcardHostname string) bool {\n\tif !strings.HasSuffix(hostname, strings.TrimPrefix(wildcardHostname, \"*\")) {\n\t\treturn false\n\t}\n\n\twildcardMatch := strings.TrimSuffix(hostname, strings.TrimPrefix(wildcardHostname, \"*\"))\n\treturn len(wildcardMatch) > 0\n}",
"func hasLabel(n []parser.Node, file int, target string) bool {\n\tfor i := range n {\n\t\tswitch tt := n[i].(type) {\n\t\tcase parser.NodeCollection:\n\t\t\tif hasLabel(tt.Children(), file, target) {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\tcase *parser.Label:\n\t\t\tif tt.File() == file && tt.Data == target {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\t// @_@ These are not the labels you are looking for. @_@\n\t\t}\n\t}\n\n\treturn false\n}",
"func filterWildcards(delta gojsondiff.Delta) bool {\n\tswitch delta.(type) {\n\tcase *gojsondiff.Modified:\n\t\td := delta.(*gojsondiff.Modified)\n\t\tif v, ok := d.NewValue.(string); ok && v == wildcard {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func LabelMatch(pod api.Pod, queryKey, queryValue string) bool {\n\tfor key, value := range pod.Labels {\n\t\tif queryKey == key && queryValue == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (config *HTTPConfig) LabelFilter(filter map[string]string) bool {\n\tif len(filter) == 0 {\n\t\treturn false\n\t}\n\tif len(config.Label) == 0 {\n\t\treturn false\n\t}\n\tfor k, v := range filter {\n\t\tif value, ok := config.Label[k]; ok && v == value {\n\t\t\tcontinue\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func match(a, b string) bool {\n\tif wildcard(a) {\n\t\treturn true\n\t}\n\tif wildcard(b) {\n\t\treturn true\n\t}\n\treturn strings.EqualFold(a, b)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Names returns a copy of the names in the label.
|
func (label *Label) Names() []string {
result := make([]string, 0, len(label.names))
result = append(result, label.names...)
return result
}
|
[
"func (ii *InvertedIndex) LabelNames(shard *astmapper.ShardAnnotation) ([]string, error) {\n\tif err := ii.validateShard(shard); err != nil {\n\t\treturn nil, err\n\t}\n\tshards := ii.getShards(shard)\n\tresults := make([][]string, 0, len(shards))\n\tfor i := range shards {\n\t\tshardResult := shards[i].labelNames(nil)\n\t\tresults = append(results, shardResult)\n\t}\n\n\treturn mergeStringSlices(results), nil\n}",
"func onlyLabelNames(labels []github.Label) []string {\n\tlist := make([]string, len(labels))\n\tfor index, label := range labels {\n\t\tlist[index] = label.GetName()\n\t}\n\tsort.Strings(list)\n\treturn list\n}",
"func (o *ContextOptions) GetLabelNames() (labels []string) {\n\tif o.Source != ContextDisabled {\n\t\tlabels = append(labels, \"source\")\n\t}\n\n\tif o.Destination != ContextDisabled {\n\t\tlabels = append(labels, \"destination\")\n\t}\n\n\treturn\n}",
"func (c *CoordinatorClient) LabelNames(\n\treq LabelNamesRequest,\n\theaders Headers,\n) (model.LabelNames, error) {\n\turlPathAndQuery := fmt.Sprintf(\"%s?%s\", route.LabelNamesURL, req.String())\n\tresp, err := c.runQuery(urlPathAndQuery, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parsedResp labelResponse\n\tif err := json.Unmarshal([]byte(resp), &parsedResp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlabelNames := make(model.LabelNames, 0, len(parsedResp.Data))\n\tfor _, label := range parsedResp.Data {\n\t\tlabelNames = append(labelNames, model.LabelName(label))\n\t}\n\n\treturn labelNames, nil\n}",
"func (l *Labels) NamesAndValues() []string {\n\tif l == nil {\n\t\treturn nil\n\t}\n\n\treturn l.pairs\n}",
"func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) {\n\tif h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {\n\t\treturn []string{}, nil\n\t}\n\n\tif len(matchers) == 0 {\n\t\tlabelNames := h.head.postings.LabelNames()\n\n\t\tsort.Strings(labelNames)\n\t\treturn labelNames, nil\n\t}\n\n\treturn labelNamesWithMatchers(h, matchers...)\n}",
"func TestGetNames_NamesAndLabelsArray(t *testing.T) {\n\tusmcLabel := map[string]string{\"expose.name\": \"usmc1\"}\n\tusmcName := []string{\"testconnector-connector\"}\n\tnameLocal, err := getNames(usmcLabel, usmcName, namespace)\n\tif err != nil || !reflect.DeepEqual(nameLocal, usmcName) {\n\t\tt.Errorf(\"Returned incorrectly names for %v, got: %v, want: %v, error: %v\", usmcName, nameLocal, usmcName, err)\n\t}\n}",
"func (s step) Names() []string {\n\tif s.seq.count == 0 {\n\t\tif s.srvc == \"\" {\n\t\t\treturn []string{}\n\t\t}\n\t\treturn []string{s.srvc}\n\t}\n\n\tnames := make([]string, 0, s.seq.count)\n\tcurr := s.seq.head\n\tfor curr != nil {\n\t\tnames = append(names, curr.Names()...)\n\t\tcurr = curr.next\n\t}\n\n\treturn names\n}",
"func (h *headIndexReader) LabelNamesFor(ids ...uint64) ([]string, error) {\n\tnamesMap := make(map[string]struct{})\n\tfor _, id := range ids {\n\t\tmemSeries := h.head.series.getByID(id)\n\t\tif memSeries == nil {\n\t\t\treturn nil, ErrNotFound\n\t\t}\n\t\tfor _, lbl := range memSeries.lset {\n\t\t\tnamesMap[lbl.Name] = struct{}{}\n\t\t}\n\t}\n\tnames := make([]string, 0, len(namesMap))\n\tfor name := range namesMap {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names, nil\n}",
"func (l *Labels) StringSlice() []string {\n\tlabelsString := []string{}\n\n\tfor _, label := range *l {\n\t\tlabelsString = append(labelsString, label.String())\n\t}\n\n\treturn labelsString\n}",
"func (m MapAssigner) Names() []string { return stringset.FromKeys(m).Elements() }",
"func (m *Metric) LabelValues() []string {\n\tvalues := []string{}\n\n\tfor _, v := range m.Labels {\n\t\tvalues = append(values, v)\n\t}\n\n\treturn values\n}",
"func (o RegionDiskOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *RegionDisk) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}",
"func (e naiveLabeler) Label(keys []string) []LabelKey {\n\tlabelKeys := make([]LabelKey, len(keys))\n\tfor i, key := range keys {\n\t\tstr := hex.EncodeToString([]byte(key))\n\t\tlabelKeys[i] = LabelKey{\n\t\t\tKey: str,\n\t\t\tLabels: []string{str},\n\t\t}\n\t}\n\treturn labelKeys\n}",
"func (s *Scope) Names() []string {}",
"func (node *nodeValue) Labels() []string {\n\treturn node.labels\n}",
"func (s *trieStringly) Labels(node interface{}) []interface{} {\n\tn := s.trieNode(node)\n\trst := []interface{}{}\n\tfor _, b := range n.Branches {\n\t\trst = append(rst, b)\n\t}\n\treturn rst\n}",
"func (o ManagedZoneOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ManagedZone) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}",
"func TestGetDeploymentNamesWithLabels_MultipleExistingLabels(t *testing.T) {\n\tusmcLabel := map[string]string{\"expose.name\": \"usmc1\", \"expose.group\": \"usmc\"}\n\tusmcName := []string{\"testconnector-connector\"}\n\tnameLocal, err := GetDeploymentNamesWithLabels(usmcLabel, namespace)\n\tif err != nil || !reflect.DeepEqual(nameLocal, usmcName) {\n\t\tt.Errorf(\"Returned incorrectly names for %v, got: %v, want: %v, error: %v\", usmcLabel, nameLocal, usmcName, err)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Match returns true iff the label matches the other label or vice versa taking wildcards into account. When matching one label to another label then a wildcard will match any name in the other label at the same position.
|
func (label *Label) Match(other *Label) bool {
if label == other {
return true
}
if !label.wildcard && !other.wildcard {
return label.simpleName == other.simpleName
}
if len(label.names) != len(other.names) {
return false
}
for i := range label.names {
if !label.nameMatch(label.names[i], other.names[i]) {
return false
}
}
return true
}
|
[
"func (m *LabelMatcher) Match(labels *BackendLabels) bool {\n\tfor _, item := range labels.Labels {\n\t\tif m.backendLabel == item {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func LabelMatch(pod api.Pod, queryKey, queryValue string) bool {\n\tfor key, value := range pod.Labels {\n\t\tif queryKey == key && queryValue == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (b *Bucket) Match(s Selector) bool {\n\tif s == nil {\n\t\treturn false\n\t}\n\n\tfor k, v := range s {\n\t\tvv, ok := b.Labels[k]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tfound := false\n\t\tfor _, vvor := range strings.Split(v, \"|\") {\n\t\t\tfound = (found || (vvor == vv))\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (usage Usage) matchesLabel(labels []string) bool {\n\t// if asset has no labels always match\n\tif len(labels) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, validLabel := range usage.Labels {\n\t\tfor _, label := range labels {\n\t\t\tif label == validLabel {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (m *Matcher) Match(lset model.LabelSet) bool {\n\t// Unset labels are treated as unset labels globally. Thus, if a\n\t// label is not set we retrieve the empty label which is correct\n\t// for the comparison below.\n\tv := lset[m.Name]\n\n\tif m.isRegex {\n\t\treturn m.regex.MatchString(string(v))\n\t}\n\treturn string(v) == m.Value\n}",
"func (w *Workload) LabelsMatch(role, app, env, loc string, labelMap map[string]Label) bool {\n\tif (role == \"*\" || w.GetRole(labelMap).Value == role) &&\n\t\t(app == \"*\" || w.GetApp(labelMap).Value == app) &&\n\t\t(env == \"*\" || w.GetEnv(labelMap).Value == env) &&\n\t\t(loc == \"*\" || w.GetLoc(labelMap).Value == loc) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func LabelsMatch(pod api.Pod, labelQuery *map[string]string) bool {\n\tif labelQuery == nil {\n\t\treturn true\n\t}\n\tfor key, value := range *labelQuery {\n\t\tif !LabelMatch(pod, key, value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func match(a, b string) bool {\n\tif wildcard(a) {\n\t\treturn true\n\t}\n\tif wildcard(b) {\n\t\treturn true\n\t}\n\treturn strings.EqualFold(a, b)\n}",
"func (p LabelPrefix) matches(l labels.Label) (bool, int) {\n\tif p.Source != \"\" && p.Source != l.Source {\n\t\treturn false, 0\n\t}\n\n\t// If no regular expression is available, fall back to prefix matching\n\tif p.expr == nil {\n\t\treturn strings.HasPrefix(l.Key, p.Prefix), len(p.Prefix)\n\t}\n\n\tres := p.expr.FindStringIndex(l.Key)\n\n\t// No match if regexp was not found\n\tif res == nil {\n\t\treturn false, 0\n\t}\n\n\t// Otherwise match if match was found at start of key\n\treturn res[0] == 0, res[1]\n}",
"func (nin *NotInType) Match(match *Element, labels map[string]string) (bool, error) {\n\tvalues, ok := match.Value.([]interface{})\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"invalid nin oper with value: %v, should be array string\", match.Value)\n\t}\n\n\tto, exists := labels[match.Key]\n\tif !exists {\n\t\treturn false, nil\n\t}\n\n\tfor _, val := range values {\n\t\tif val == to {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}",
"func (m LabelSelector) Matches(s LabelSet) bool {\n\tif matchSelector(m.Exclude.Labels, s.Labels, false) {\n\t\treturn false // At least one excluded label is matched.\n\t} else if !matchSelector(m.Include.Labels, s.Labels, true) {\n\t\treturn false // Not every included label is matched.\n\t}\n\treturn true\n}",
"func (s *KubeflowLabels) matchLabels(parameters map[string]string) []string {\n\tregex := []*regexp.Regexp{}\n\n\tfor prefix, value := range parameters {\n\t\tif strings.TrimSpace(value) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\texpr := fmt.Sprintf(\"%v.*/.*%v.*\", strings.ToLower(prefix), strings.ToLower(value))\n\t\tm, err := regexp.Compile(expr)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not compile the regex: %v ; error %v\", expr, err)\n\t\t\tcontinue\n\t\t}\n\t\tregex = append(regex, m)\n\t}\n\n\tlabels := []string{}\n\tfor label, _ := range s.Labels {\n\t\tfor _, p := range regex {\n\t\t\ts := strings.ToLower(label)\n\t\t\tif match := p.MatchString(s); match {\n\t\t\t\tlabels = append(labels, label)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn labels\n}",
"func (s *LabelSelector) Matches(labels map[string]string) bool {\n\tfor label, expectedValue := range s.Selector {\n\t\tvalue, ok := labels[label]\n\t\tif !ok || expectedValue != value {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (m SuffixMatcher) Match(chain []string) bool {\n\treturn EqualSlicesFoldSuffix(chain, m)\n}",
"func (b *bucketBlock) matchRelabelLabels(matchers []*labels.Matcher) bool {\n\tfor _, m := range matchers {\n\t\tif !m.Matches(b.relabelLabels.Get(m.Name)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (lt *LessThanType) Match(match *Element, labels map[string]string) (bool, error) {\n\tif !isNumeric(match.Value) {\n\t\treturn false, fmt.Errorf(\"invalid lt oper with value: %v, should be number\", match.Value)\n\t}\n\n\tfrom := mustFloat64(match.Value)\n\n\tcompare, exists := labels[match.Key]\n\tif !exists {\n\t\treturn false, nil\n\t}\n\n\tto, err := strconv.ParseFloat(compare, 32)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"parse lt oper's target label value: %s to float failed, err: %v\", compare, err)\n\t}\n\n\treturn to < from, nil\n}",
"func (n *NotStartsWithOp) Match(match interface{}, with interface{}) (bool, error) {\n\tm, ok := match.(string)\n\tif !ok {\n\t\treturn false, errors.New(\"invalid parameter\")\n\t}\n\n\tw, ok := with.(string)\n\tif !ok {\n\t\treturn false, errors.New(\"invalid parameter\")\n\t}\n\n\treturn !strings.HasPrefix(m, w), nil\n}",
"func (m PrefixMatcher) Match(chain []string) bool {\n\treturn EqualSlicesFoldPrefix(chain, m)\n}",
"func (w *WebhookConstraintMatcher) Match(\n\tr admissionregistrationv1beta1.RuleWithOperations,\n\tobjLabelSelector *metav1.LabelSelector,\n\tnamespaceLabelSelector *metav1.LabelSelector,\n) bool {\n\tvar (\n\t\tobjLabels = w.ObjectLabels\n\t\tnsLabels = w.NamespaceLabels\n\t)\n\n\tif objLabels == nil {\n\t\tobjLabels = labels.Set{}\n\t}\n\n\tif nsLabels == nil {\n\t\tnsLabels = labels.Set{}\n\t}\n\n\tnsSelector, err := defaultEmptySelector(namespaceLabelSelector)\n\tif err != nil {\n\t\t// this should really not happen\n\t\treturn true\n\t}\n\n\tobjSelector, err := defaultEmptySelector(objLabelSelector)\n\tif err != nil {\n\t\t// this should really not happen\n\t\treturn true\n\t}\n\n\tmatchObj := objSelector.Empty() || objSelector.Matches(objLabels)\n\tmatchNS := nsSelector.Empty() || nsSelector.Matches(nsLabels)\n\n\trm := ruleMatcher{rule: r, gvr: w.GVR, subresource: w.Subresource}\n\tif !w.ClusterScoped {\n\t\trm.namespace = \"dummy\"\n\t}\n\n\treturn matchObj && (w.ClusterScoped || matchNS) && rm.Matches()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
LoadPackages load the available packages in the user's folder
|
func LoadPackages(systemPackageFolder, userPackageFolder string) {
availablePackages = nil
packagesDriversFileInfo = nil
unzipPackages(systemPackageFolder, userPackageFolder)
_, dirErr := os.Stat(systemPackageFolder)
if os.IsNotExist(dirErr) {
return
}
loadAvailablePackages(systemPackageFolder)
}
|
[
"func loadPackages(mod *Module) ([]*Package, error) {\n\tattr := attr{\n\t\tDir: mod.Dir,\n\t}\n\targv := []string{\"-json\", \"./...\"}\n\tstdout, err := invokeGo(\"list\", argv, &attr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpkglist, err := decode(stdout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pkglist, nil\n}",
"func LoadPackages(names ...string) (a []*Package, err error) {\n\tdebugln(\"LoadPackages\", names)\n\tif len(names) == 0 {\n\t\treturn nil, nil\n\t}\n\tfor _, i := range importPaths(names) {\n\t\tp, err := listPackage(i)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ta = append(a, p)\n\t}\n\treturn a, nil\n}",
"func LoadInstalledPackages(path string) PackageList {\n\tpackages := make(PackageList)\n\t/* #nosec G304 */\n\treader, err := os.Open(path)\n\tif err != nil {\n\t\tfmt.Printf(\"File %s does not exists. \\nOnly DEBIAN based distributions are supported.\\n\", path)\n\t\tos.Exit(1)\n\t}\n\n\tscanner := bufio.NewScanner(reader)\n\tpkgName := \"\"\n\tpkgVersion := \"\"\n\tpkgInstalled := false\n\tfor scanner.Scan() {\n\t\trow := scanner.Text()\n\t\titems := strings.Split(row, \":\")\n\t\tkey := items[0]\n\n\t\tif len(items) > 1 {\n\t\t\tvalue := strings.Trim(items[1], \" \")\n\t\t\tif key == \"Package\" {\n\t\t\t\tpkgName = value\n\t\t\t\tpkgInstalled = false\n\t\t\t} else if key == \"Version\" && pkgInstalled {\n\t\t\t\tpkgVersion = value\n\t\t\t\tif len(items) > 2 {\n\t\t\t\t\tpkgVersion = items[1] + \":\" + items[2]\n\t\t\t\t}\n\n\t\t\t\tpackages[pkgName] = pkgVersion\n\t\t\t} else if key == \"Status\" {\n\t\t\t\tif strings.Contains(value, \"installed\") {\n\t\t\t\t\tpkgInstalled = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn packages\n}",
"func (p *dummyPersistency) LoadPackages(packageList *[]PackageInfo) bool {\n\treturn true\n}",
"func (s *Module) Load(filterFns ...func(d fs.DirEntry) bool) error {\n\tif len(filterFns) == 0 {\n\t\tfilterFns = append(filterFns, DefaultFilterGoPackages)\n\t}\n\n\tpaths, fileSources, err := s.loadFiles(filterFns...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r := s.loadPackages(s.dir, paths); r != nil {\n\t\treturn r\n\t}\n\n\ts.fillRawFiles(fileSources)\n\n\tif r := s.processPackages(); r != nil {\n\t\treturn r\n\t}\n\n\ts.buildCallGraph()\n\n\treturn nil\n}",
"func loadPackage(dir string) (pkg pkg, err error) {\n\tpkg.files = map[string][]byte{}\n\tpkg.f = token.NewFileSet()\n\tpackages, err := parser.ParseDir(pkg.f, dir, func(f os.FileInfo) bool {\n\t\t// exclude test files\n\t\treturn !strings.HasSuffix(f.Name(), \"_test.go\")\n\t}, 0)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(packages) < 1 {\n\t\terr = fmt.Errorf(\"didn't find any packages in '%s'. Length: %d\", dir, len(packages))\n\t\treturn\n\t}\n\tif len(packages) > 1 {\n\t\terr = fmt.Errorf(\"found more than one package in '%s'. Length: %d\", dir, len(packages))\n\t\treturn\n\t}\n\tfor pn := range packages {\n\t\tp := packages[pn]\n\t\t// trim any non-exported nodes\n\t\tif exp := ast.PackageExports(p); !exp {\n\t\t\terr = fmt.Errorf(\"package '%s' doesn't contain any exports\", pn)\n\t\t\treturn\n\t\t}\n\t\tpkg.p = p\n\t\treturn\n\t}\n\t// shouldn't ever get here...\n\tpanic(\"failed to return package\")\n}",
"func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPackage func(ctx context.Context, path string, m module.Version) (versionOk bool)) {\n\topts := modload.PackageOpts{\n\t\tTags: imports.AnyTags(),\n\t\tVendorModulesInGOROOTSrc: true,\n\t\tLoadTests: *getT,\n\t\tAssumeRootsImported: true, // After 'go get foo', imports of foo should build.\n\t\tSilencePackageErrors: true, // May be fixed by subsequent upgrades or downgrades.\n\t}\n\n\topts.AllowPackage = func(ctx context.Context, path string, m module.Version) error {\n\t\tif m.Path == \"\" || m.Version == \"\" {\n\t\t\t// Packages in the standard library and main modules are already at their\n\t\t\t// latest (and only) available versions.\n\t\t\treturn nil\n\t\t}\n\t\tif ok := findPackage(ctx, path, m); !ok {\n\t\t\treturn errVersionChange\n\t\t}\n\t\treturn nil\n\t}\n\n\t_, pkgs := modload.LoadPackages(ctx, opts, patterns...)\n\tfor _, path := range pkgs {\n\t\tconst (\n\t\t\tparentPath = \"\"\n\t\t\tparentIsStd = false\n\t\t)\n\t\t_, _, err := modload.Lookup(parentPath, parentIsStd, path)\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif errors.Is(err, errVersionChange) {\n\t\t\t// We already added candidates during loading.\n\t\t\tcontinue\n\t\t}\n\n\t\tvar (\n\t\t\timportMissing *modload.ImportMissingError\n\t\t\tambiguous *modload.AmbiguousImportError\n\t\t)\n\t\tif !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) {\n\t\t\t// The package, which is a dependency of something we care about, has some\n\t\t\t// problem that we can't resolve with a version change.\n\t\t\t// Leave the error for the final LoadPackages call.\n\t\t\tcontinue\n\t\t}\n\n\t\tpath := path\n\t\tr.work.Add(func() {\n\t\t\tfindPackage(ctx, path, module.Version{})\n\t\t})\n\t}\n\t<-r.work.Idle()\n}",
"func Load(ctx context.Context, pattern string) (*packages.Package, error) {\n\tpkgs, err := loadPackages(ctx, \"\", pattern, os.Environ())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load packages\")\n\t}\n\n\tfor _, pkg := range pkgs {\n\t\tif pkg.ID == pattern {\n\t\t\treturn pkg, nil\n\t\t}\n\t}\n\n\treturn nil, errors.Errorf(\"package %s not found\", pattern)\n}",
"func Load(pkg string) (*Package, error) {\n\tpackageMutex.Lock()\n\tdefer packageMutex.Unlock()\n\tp := packages[pkg]\n\tif p != nil {\n\t\treturn p, nil\n\t}\n\tp = &Package{Path: pkg}\n\tif err := p.load(); err != nil {\n\t\treturn nil, err\n\t}\n\tpackages[pkg] = p\n\treturn p, nil\n}",
"func (g *Golang) DiscoverPackages() error {\n\tif err := g.validateEnvVars(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid environment variables\")\n\t}\n\n\tprojectPath, err := g.absProjectPath()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting project path as an absolute path\")\n\t}\n\n\tif err := filepath.Walk(projectPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfileName := filepath.Base(info.Name())\n\t\tif fileName == golangVendorDir {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif !strings.Contains(fileName, golangTestFileSuffix) {\n\t\t\treturn nil\n\t\t}\n\t\tdir := filepath.Dir(path)\n\t\tdir, err = filepath.Rel(projectPath, dir)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"making package path '%s' relative to root package\", path)\n\t\t}\n\t\t// If package has already been added, skip adding it.\n\t\tif _, err = g.GetPackageByPath(dir); err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tpkg := GolangPackage{\n\t\t\tPath: dir,\n\t\t}\n\n\t\tg.Packages = append(g.Packages, pkg)\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn errors.Wrapf(err, \"walking the file system tree starting from path '%s'\", projectPath)\n\t}\n\n\treturn nil\n}",
"func allPackages(deps []*cfg.Dependency, res *dependency.Resolver, addTest bool) ([]string, error) {\n\tif len(deps) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\tvdir, err := gpath.Vendor()\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvdir += string(os.PathSeparator)\n\tll, err := res.ResolveAll(deps, addTest)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tfor i := 0; i < len(ll); i++ {\n\t\tll[i] = strings.TrimPrefix(ll[i], vdir)\n\t}\n\treturn ll, nil\n}",
"func (ws *workspace) getPackages(packagePattern, version string) ([]*packages.Package, error) {\n\tif packagePattern == \"\" {\n\t\treturn nil, fmt.Errorf(\"package path is empty\")\n\t}\n\n\tpkgKey := packagePattern\n\tif version != \"\" {\n\t\tpkgKey += \"@\" + version\n\t}\n\n\t// if we've already processed this pattern, reuse it\n\tif cached := ws.cachedPackages(pkgKey); len(cached) > 0 {\n\t\treturn cached, nil\n\t}\n\n\t// as of Go 1.16, running \"go get\" is always required for module tooling to work\n\t// properly (https://golang.org/issue/40728) - only need to do it once per workspace\n\tws.mu.Lock()\n\tdefer ws.mu.Unlock()\n\tif !ws.alreadyGotModule(packagePattern) {\n\t\tcmd := exec.Command(\"go\", \"get\", pkgKey)\n\t\tcmd.Dir = ws.dir\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"exec %v: %v\", cmd.Args, err)\n\t\t}\n\n\t\t// remember that we 'go got' this package's module, so we don't have to do it again\n\t\tpkgInfo, err := runGoList(ws.dir, packagePattern)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"listing package to get module: %v\", err)\n\t\t}\n\t\tws.goGets[pkgInfo.Module.Path] = struct{}{}\n\t}\n\n\t// finally, load and parse the package\n\tcfg := &packages.Config{\n\t\tDir: ws.dir,\n\t\tMode: packages.NeedSyntax |\n\t\t\tpackages.NeedImports |\n\t\t\tpackages.NeedDeps |\n\t\t\tpackages.NeedTypes |\n\t\t\tpackages.NeedModule |\n\t\t\tpackages.NeedTypesInfo,\n\n\t\t// on Linux, leaving CGO_ENABLED to the default value of 1 would\n\t\t// cause an error: \"could not import C (no metadata for C)\", but\n\t\t// only on Linux... on my Mac it worked fine either way (ca. 2020)\n\t\tEnv: append(os.Environ(), \"CGO_ENABLED=0\"),\n\t}\n\tpkgs, err := packages.Load(cfg, packagePattern)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"packages.Load: %v\", err)\n\t}\n\n\t// generate and cache the list of top-level packages from the single input pattern;\n\t// this allows us to recall the parsed packages later without recomputing it all\n\tvar pkgNames []string\n\tfor _, pkg := range pkgs {\n\t\tpkgNames = append(pkgNames, packageKey(pkg))\n\t}\n\t// TODO: these should probably expire, esp. if using 'latest' or a branch name\n\tws.packagePatterns[pkgKey] = pkgNames\n\n\t// visit all packages (including imported ones) to cache them for future use,\n\t// (shaves a *ton* of time off future processing; core Caddy package goes from\n\t// taking 5 minutes to 5 seconds); and also to see if there are any errors in\n\t// the import graph\n\tpackages.Visit(pkgs, nil, func(pkg *packages.Package) {\n\t\t// cache parsed package for future use; key by both the versioned and\n\t\t// non-versioned form of the package key, since future gets might not\n\t\t// have or know a version (not perfect, but no harm yet?)\n\t\t// TODO: make this cache ephemeral (workspace-scoped), there's just not enough memory for all the versions.\n\t\tws.parsedPackages[pkg.ID] = pkg\n\t\tws.parsedPackages[packageKey(pkg)] = pkg\n\n\t\t// check for errors\n\t\tfor i, e := range pkg.Errors {\n\t\t\tvar prefix string\n\t\t\tif i > 0 {\n\t\t\t\tprefix = \"\\n\"\n\t\t\t}\n\t\t\tlog.Printf(\"[WARNING] Load '%s': found error while visiting package on import graph %s: %v - skipping\",\n\t\t\t\tpackagePattern, prefix, e)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pkgs, nil\n}",
"func (i Import) LoadPackage() (*Package, error) {\n\tif i.Folder == \"\" {\n\t\treturn nil, fmt.Errorf(\"the package '%s' is not resolved\", i.Path)\n\t}\n\tif i.pkg == nil {\n\t\tp, err := parsePackageFullPath(i.Path, i.Folder)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti.pkg = p\n\t}\n\treturn i.pkg, nil\n\n}",
"func loadPackageDynamic(parentDir string, pkgPath string) (*load.Package, error) {\n\t/*if len(cfg.Gopath) < 10 {\n\t\tlog.Debugf(\"Adding path=%q to GOPATH\", filepath.Dir(parentDir))\n\t\tcfg.Gopath = append(cfg.Gopath, filepath.Dir(parentDir)) // GOROOTsrc = parentDir\n\t}*/\n\t//cfg.GOROOTsrc = parentDir\n\t//cfg.BuildContext.GOROOT = filepath.Dir(parentDir)\n\tcfg.BuildContext.GOPATH = filepath.Dir(parentDir)\n\n\t//cfg.Gopath = filepath.SplitList(cfg.BuildContext.GOPATH + \":\" + parentDir)\n\t//defer func() { cfg.GOROOTsrc = GOROOTsrcBackup }()\n\n\tlps := load.Packages([]string{pkgPath})\n\tfor _, lp := range lps {\n\t\tif lp.Error != nil {\n\t\t\treturn lp, errors.New(lp.Error.Error())\n\t\t}\n\t\treturn lp, nil\n\t}\n\treturn nil, fmt.Errorf(\"no pkg found\")\n}",
"func Load() {\n\tfor i, path := range i18nConfigPath {\n\t\ti18nLoadData := parseI18nConfig(path)\n\t\tif i > 0 {\n\t\t\ti18nLoadData = combineLanguageConfig(i18nCachedData[0], i18nLoadData)\n\t\t}\n\t\ti18nCachedData = append(i18nCachedData, i18nLoadData)\n\t\tzap.S().Infof(\"[i18n] Load %v success\", path)\n\t}\n}",
"func loadPackagesFromDir(baseDir string) (map[string]*packages.Package, error) {\n\toutPkgs := make(map[string]*packages.Package)\n\n\tif err := filepath.WalkDir(baseDir, func(path string, dirEntry fs.DirEntry, _ error) error {\n\t\tif !dirEntry.IsDir() {\n\t\t\t// We only care about directories.\n\t\t\treturn nil\n\t\t}\n\n\t\tif skipDirs.Has(filepath.Base(path)) {\n\t\t\t// This directory and any subdirectories should be skipped.\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tcfg := &packages.Config{\n\t\t\tDir: path,\n\t\t\tLogf: klog.V(4).Infof,\n\t\t}\n\n\t\tpkgs, err := packages.Load(cfg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not load package at path %s: %w\", path, err)\n\t\t}\n\n\t\tif len(pkgs) != 1 {\n\t\t\treturn fmt.Errorf(\"unexpected number of go packages found for path %s: %d\", path, len(pkgs))\n\t\t}\n\n\t\tif len(pkgs[0].GoFiles) == 0 {\n\t\t\t// No go files means there's nothing parse, skip this directory but continue to subfolders.\n\t\t\treturn nil\n\t\t}\n\n\t\toutPkgs[path] = pkgs[0]\n\n\t\tklog.V(3).Infof(\"Found directory: %s and package: %+v\", path, pkgs[0])\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not walk directory %s: %w\", baseDir, err)\n\t}\n\n\treturn outPkgs, nil\n}",
"func Load() {\n\tdir := \"/usr/lib/sackson-server\"\n\tfiles, _ := ioutil.ReadDir(dir)\n\tif len(files) == 0 {\n\t\tlog.Printf(\"No files found in %s\\n\", dir)\n\t\treturn\n\t}\n\n\tfor _, f := range files {\n\t\tplug, err := plugin.Open(dir + \"/\" + f.Name())\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tname := driverName(f)\n\t\tdriver, err := plug.Lookup(\"New\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tdrivers[name] = driver\n\t\t}\n\t\tlog.Printf(\"Loaded driver \\\"%s\\\"\\n\", name)\n\t}\n}",
"func importPaths(ctx Context, cwd string, args []string) []string {\n\targs = importPathsNoDotExpansion(ctx, cwd, args)\n\tvar out []string\n\tfor _, a := range args {\n\t\tif strings.Contains(a, \"...\") {\n\t\t\tpkgs, err := ctx.AllPackages(a)\n\t\t\tif err != nil {\n\t\t\t\tfatalf(\"could not load all packages: %v\", err)\n\t\t\t}\n\t\t\tout = append(out, pkgs...)\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, a)\n\t}\n\treturn out\n}",
"func (bp *Blueprint) LoadTemplates() error {\n\ttemplates, err := template.ParseGlob(bp.TemplateDir() + \"/*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tbp.templates = templates\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetAvailablePackages returns a list of available packages
|
func GetAvailablePackages() []model.Package {
return availablePackages
}
|
[
"func AllAvailableByPackageName(store *storage.Service) (map[string]*Package, error) {\n\tallAvailable, err := AllAvailable(store)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve all available packages: %s\", err)\n\t}\n\tret := map[string]*Package{}\n\tfor _, ps := range allAvailable {\n\t\tfor _, p := range ps {\n\t\t\tret[p.Name] = p\n\t\t}\n\t}\n\treturn ret, nil\n}",
"func (a Plugin) GetMissingPackages() ([]string, error) {\n\tcmd := \"mas list | awk '{print $1;}'\"\n\tstdout, err := a.Commander(\"bash\", \"-c\", cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogrus.WithField(\"output\", string(stdout)).Debug(\"mas stdout\")\n\tinstalledApps := strings.Split(string(stdout), \"\\n\")\n\tinstalledMap := map[string]bool{}\n\tfor _, p := range installedApps {\n\t\tinstalledMap[p] = true\n\t}\n\n\tmissingApps := []string{}\n\tfor _, app := range a.Apps {\n\t\tif ok := installedMap[app.ID]; !ok {\n\t\t\tmissingApps = append(missingApps, app.ID)\n\t\t}\n\t}\n\n\treturn missingApps, nil\n}",
"func AllAvailable(store *storage.Service) (map[string][]*Package, error) {\n\treq := store.Objects.List(bucketName).Prefix(\"debs\")\n\tret := map[string][]*Package{}\n\tfor {\n\t\tobjs, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to list debian packages in Google Storage: %s\", err)\n\t\t}\n\t\tfor _, o := range objs.Items {\n\t\t\tkey := safeGet(o.Metadata, \"appname\", \"\")\n\t\t\tif key == \"\" {\n\t\t\t\tsklog.Errorf(\"Debian package without proper metadata: %s\", o.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp := &Package{\n\t\t\t\tName: o.Name[5:], // Strip of debs/ from the beginning.\n\t\t\t\tHash: safeGet(o.Metadata, \"hash\", \"\"),\n\t\t\t\tUserID: safeGet(o.Metadata, \"userid\", \"\"),\n\t\t\t\tBuilt: safeGetTime(o.Metadata, \"datetime\"),\n\t\t\t\tDirty: safeGetBool(o.Metadata, \"dirty\"),\n\t\t\t\tNote: safeGet(o.Metadata, \"note\", \"\"),\n\t\t\t\tServices: safeGetStringSlice(o.Metadata, \"services\"),\n\t\t\t}\n\t\t\tif _, ok := ret[key]; !ok {\n\t\t\t\tret[key] = []*Package{}\n\t\t\t}\n\t\t\tret[key] = append(ret[key], p)\n\t\t}\n\t\tif objs.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treq.PageToken(objs.NextPageToken)\n\t}\n\tfor _, value := range ret {\n\t\tsort.Sort(PackageSlice(value))\n\t}\n\treturn ret, nil\n}",
"func AllAvailableApp(store *storage.Service, appName string) ([]*Package, error) {\n\tprefix := fmt.Sprintf(\"debs/%s/\", appName)\n\treq := store.Objects.List(bucketName).Prefix(prefix)\n\tret := []*Package{}\n\tfor {\n\t\tobjs, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to list debian packages in Google Storage: %s\", err)\n\t\t}\n\t\tfor _, o := range objs.Items {\n\t\t\tkey := safeGet(o.Metadata, \"appname\", \"\")\n\t\t\tif key == \"\" {\n\t\t\t\tsklog.Errorf(\"Debian package without proper metadata: %s\", o.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp := &Package{\n\t\t\t\tName: o.Name[len(prefix):], // Strip of debs/ from the beginning.\n\t\t\t\tHash: safeGet(o.Metadata, \"hash\", \"\"),\n\t\t\t\tUserID: safeGet(o.Metadata, \"userid\", \"\"),\n\t\t\t\tBuilt: safeGetTime(o.Metadata, \"datetime\"),\n\t\t\t\tDirty: safeGetBool(o.Metadata, \"dirty\"),\n\t\t\t\tNote: safeGet(o.Metadata, \"note\", \"\"),\n\t\t\t\tServices: safeGetStringSlice(o.Metadata, \"services\"),\n\t\t\t}\n\t\t\tret = append(ret, p)\n\t\t}\n\t\tif objs.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treq.PageToken(objs.NextPageToken)\n\t}\n\tsort.Sort(PackageSlice(ret))\n\treturn ret, nil\n}",
"func (client *ClientImpl) GetPackages(ctx context.Context, args GetPackagesArgs) (*[]Package, error) {\n\trouteValues := make(map[string]string)\n\tif args.Project != nil && *args.Project != \"\" {\n\t\trouteValues[\"project\"] = *args.Project\n\t}\n\tif args.FeedId == nil || *args.FeedId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.FeedId\"}\n\t}\n\trouteValues[\"feedId\"] = *args.FeedId\n\n\tqueryParams := url.Values{}\n\tif args.ProtocolType != nil {\n\t\tqueryParams.Add(\"protocolType\", *args.ProtocolType)\n\t}\n\tif args.PackageNameQuery != nil {\n\t\tqueryParams.Add(\"packageNameQuery\", *args.PackageNameQuery)\n\t}\n\tif args.NormalizedPackageName != nil {\n\t\tqueryParams.Add(\"normalizedPackageName\", *args.NormalizedPackageName)\n\t}\n\tif args.IncludeUrls != nil {\n\t\tqueryParams.Add(\"includeUrls\", strconv.FormatBool(*args.IncludeUrls))\n\t}\n\tif args.IncludeAllVersions != nil {\n\t\tqueryParams.Add(\"includeAllVersions\", strconv.FormatBool(*args.IncludeAllVersions))\n\t}\n\tif args.IsListed != nil {\n\t\tqueryParams.Add(\"isListed\", strconv.FormatBool(*args.IsListed))\n\t}\n\tif args.GetTopPackageVersions != nil {\n\t\tqueryParams.Add(\"getTopPackageVersions\", strconv.FormatBool(*args.GetTopPackageVersions))\n\t}\n\tif args.IsRelease != nil {\n\t\tqueryParams.Add(\"isRelease\", strconv.FormatBool(*args.IsRelease))\n\t}\n\tif args.IncludeDescription != nil {\n\t\tqueryParams.Add(\"includeDescription\", strconv.FormatBool(*args.IncludeDescription))\n\t}\n\tif args.Top != nil {\n\t\tqueryParams.Add(\"$top\", strconv.Itoa(*args.Top))\n\t}\n\tif args.Skip != nil {\n\t\tqueryParams.Add(\"$skip\", strconv.Itoa(*args.Skip))\n\t}\n\tif args.IncludeDeleted != nil {\n\t\tqueryParams.Add(\"includeDeleted\", strconv.FormatBool(*args.IncludeDeleted))\n\t}\n\tif args.IsCached != nil {\n\t\tqueryParams.Add(\"isCached\", strconv.FormatBool(*args.IsCached))\n\t}\n\tif args.DirectUpstreamId != nil {\n\t\tqueryParams.Add(\"directUpstreamId\", (*args.DirectUpstreamId).String())\n\t}\n\tlocationId, _ := uuid.Parse(\"7a20d846-c929-4acc-9ea2-0d5a7df1b197\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", routeValues, queryParams, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue []Package\n\terr = client.Client.UnmarshalCollectionBody(resp, &responseValue)\n\treturn &responseValue, err\n}",
"func (provider BindataBuildpacksProvider) Available() []string {\n\tm := map[string]struct{}{}\n\tfor _, name := range data.AssetNames() {\n\t\tname = strings.Split(name, \"/\")[0]\n\t\tif _, ok := m[name]; !ok {\n\t\t\tm[name] = struct{}{}\n\t\t}\n\t}\n\tnames := []string{}\n\tfor name, _ := range m {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}",
"func (p Plugin) GetMissingPackages() ([]string, error) {\n\tmissingModules := []string{}\n\tstdout, err := p.Commander(juliaExe, \"-e\", \"import Pkg;Pkg.status()\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogrus.WithField(\"output\", string(stdout)).Debug(\"julia stdout\")\n\tinstalledFormulae := strings.Split(strings.Trim(string(stdout), \"\\n\"), \"\\n\")[1:]\n\tlogrus.WithField(\"output\", installedFormulae).Debug(\"julia installed modules\")\n\tinstalledMap := map[string]bool{}\n\tfor _, p := range installedFormulae {\n\t\tlogrus.WithField(\"output\", p).Debug(\"processing julia installed module\")\n\t\twords := strings.Fields(p)\n\t\tinstalledMap[words[1]] = true\n\t}\n\n\tfor _, module := range p.Modules {\n\t\tif ok := installedMap[module]; !ok {\n\t\t\tmissingModules = append(missingModules, module)\n\t\t}\n\t}\n\n\treturn missingModules, nil\n}",
"func ListAptPackages() ([]string, error) {\n\tcmd := exec.Command(\"apt\", \"list\")\n\tstdout, err := cmd.Output()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutput := strings.Split(string(stdout), \"\\n\")\n\tinstalled := []string{}\n\n\tfor ith, val := range output {\n\t\tif ith > 0 {\n\t\t\tparts := strings.Split(val, \"/\")\n\t\t\tinstalled = append(installed, parts[0])\n\t\t}\n\t}\n\n\treturn installed, nil\n}",
"func ListPackages() {\n\tappdb.View(func(tx *bolt.Tx) error {\n\t\tc := tx.Bucket([]byte(bucketName)).Cursor()\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\tlog.LogStd(fmt.Sprintf(\"key=%s\", k), true)\n\t\t\tlog.LogStd(fmt.Sprintf(\"value=%s\\n\", pretty.Formatter(v)), true)\n\t\t}\n\t\treturn nil\n\t})\n}",
"func (c *client) ListPackages(packageName, namespace string) (*kapppkg.PackageList, error) {\n\tvar selectors []crtclient.ListOption\n\tpackageVersionList := &kapppkg.PackageList{}\n\n\tif packageName != \"\" {\n\t\tselectors = []crtclient.ListOption{\n\t\t\tcrtclient.MatchingFields(map[string]string{\"spec.refName\": packageName}),\n\t\t\tcrtclient.InNamespace(namespace),\n\t\t}\n\t}\n\n\tif err := c.client.List(context.Background(), packageVersionList, selectors...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn packageVersionList, nil\n}",
"func ListPackages(ctx *context.Context) {\n\tshared_user.PrepareContextForProfileBigAvatar(ctx)\n\tpage := ctx.FormInt(\"page\")\n\tif page <= 1 {\n\t\tpage = 1\n\t}\n\tquery := ctx.FormTrim(\"q\")\n\tpackageType := ctx.FormTrim(\"type\")\n\n\tpvs, total, err := packages_model.SearchLatestVersions(ctx, &packages_model.PackageSearchOptions{\n\t\tPaginator: &db.ListOptions{\n\t\t\tPageSize: setting.UI.PackagesPagingNum,\n\t\t\tPage: page,\n\t\t},\n\t\tOwnerID: ctx.ContextUser.ID,\n\t\tType: packages_model.Type(packageType),\n\t\tName: packages_model.SearchValue{Value: query},\n\t\tIsInternal: util.OptionalBoolFalse,\n\t})\n\tif err != nil {\n\t\tctx.ServerError(\"SearchLatestVersions\", err)\n\t\treturn\n\t}\n\n\tpds, err := packages_model.GetPackageDescriptors(ctx, pvs)\n\tif err != nil {\n\t\tctx.ServerError(\"GetPackageDescriptors\", err)\n\t\treturn\n\t}\n\n\trepositoryAccessMap := make(map[int64]bool)\n\tfor _, pd := range pds {\n\t\tif pd.Repository == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif _, has := repositoryAccessMap[pd.Repository.ID]; has {\n\t\t\tcontinue\n\t\t}\n\n\t\tpermission, err := access_model.GetUserRepoPermission(ctx, pd.Repository, ctx.Doer)\n\t\tif err != nil {\n\t\t\tctx.ServerError(\"GetUserRepoPermission\", err)\n\t\t\treturn\n\t\t}\n\t\trepositoryAccessMap[pd.Repository.ID] = permission.HasAccess()\n\t}\n\n\thasPackages, err := packages_model.HasOwnerPackages(ctx, ctx.ContextUser.ID)\n\tif err != nil {\n\t\tctx.ServerError(\"HasOwnerPackages\", err)\n\t\treturn\n\t}\n\n\tshared_user.RenderUserHeader(ctx)\n\n\tctx.Data[\"Title\"] = ctx.Tr(\"packages.title\")\n\tctx.Data[\"IsPackagesPage\"] = true\n\tctx.Data[\"Query\"] = query\n\tctx.Data[\"PackageType\"] = packageType\n\tctx.Data[\"AvailableTypes\"] = packages_model.TypeList\n\tctx.Data[\"HasPackages\"] = hasPackages\n\tctx.Data[\"PackageDescriptors\"] = pds\n\tctx.Data[\"Total\"] = total\n\tctx.Data[\"RepositoryAccessMap\"] = repositoryAccessMap\n\n\terr = shared_user.LoadHeaderCount(ctx)\n\tif err != nil {\n\t\tctx.ServerError(\"LoadHeaderCount\", err)\n\t\treturn\n\t}\n\n\t// TODO: context/org -> HandleOrgAssignment() can not be used\n\tif ctx.ContextUser.IsOrganization() {\n\t\torg := org_model.OrgFromUser(ctx.ContextUser)\n\t\tctx.Data[\"Org\"] = org\n\t\tctx.Data[\"OrgLink\"] = ctx.ContextUser.OrganisationLink()\n\n\t\tif ctx.Doer != nil {\n\t\t\tctx.Data[\"IsOrganizationMember\"], _ = org_model.IsOrganizationMember(ctx, org.ID, ctx.Doer.ID)\n\t\t\tctx.Data[\"IsOrganizationOwner\"], _ = org_model.IsOrganizationOwner(ctx, org.ID, ctx.Doer.ID)\n\t\t} else {\n\t\t\tctx.Data[\"IsOrganizationMember\"] = false\n\t\t\tctx.Data[\"IsOrganizationOwner\"] = false\n\t\t}\n\t}\n\n\tpager := context.NewPagination(int(total), setting.UI.PackagesPagingNum, page, 5)\n\tpager.AddParam(ctx, \"q\", \"Query\")\n\tpager.AddParam(ctx, \"type\", \"PackageType\")\n\tctx.Data[\"Page\"] = pager\n\n\tctx.HTML(http.StatusOK, tplPackagesList)\n}",
"func (r *Repository) GetPackages(db libdb.Database, pool *Pool, pkgName string) ([]*libeopkg.MetaPackage, error) {\n\tvar pkgs []*libeopkg.MetaPackage\n\n\tentry, err := r.GetEntry(db, pkgName)\n\tif err != nil || entry == nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, id := range entry.Available {\n\t\tp, err := pool.GetEntry(db, id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpkgs = append(pkgs, p.Meta)\n\t}\n\n\treturn pkgs, nil\n}",
"func (s *Server) GetAvailablePackageVersions(ctx context.Context, request *corev1.GetAvailablePackageVersionsRequest) (*corev1.GetAvailablePackageVersionsResponse, error) {\n\tlog.Infof(\"+fluxv2 GetAvailablePackageVersions [%v]\", request)\n\tdefer log.Infof(\"-fluxv2 GetAvailablePackageVersions\")\n\n\tif request.GetPkgVersion() != \"\" {\n\t\treturn nil, status.Errorf(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"not supported yet: request.GetPkgVersion(): [%v]\",\n\t\t\trequest.GetPkgVersion())\n\t}\n\n\tpackageRef := request.GetAvailablePackageRef()\n\tnamespace := packageRef.GetContext().GetNamespace()\n\tif namespace == \"\" || packageRef.GetIdentifier() == \"\" {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"required context or identifier not provided\")\n\t}\n\n\tcluster := packageRef.Context.Cluster\n\tif cluster != \"\" && cluster != s.kubeappsCluster {\n\t\treturn nil, status.Errorf(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"not supported yet: request.AvailablePackageRef.Context.Cluster: [%v]\",\n\t\t\tcluster)\n\t}\n\n\trepoName, chartName, err := pkgutils.SplitChartIdentifier(packageRef.Identifier)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Infof(\"Requesting chart [%s] in namespace [%s]\", chartName, namespace)\n\trepo := types.NamespacedName{Namespace: namespace, Name: repoName}\n\tchart, err := s.getChart(ctx, repo, chartName)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if chart != nil {\n\t\t// found it\n\t\treturn &corev1.GetAvailablePackageVersionsResponse{\n\t\t\tPackageAppVersions: pkgutils.PackageAppVersionsSummary(\n\t\t\t\tchart.ChartVersions,\n\t\t\t\ts.pluginConfig.VersionsInSummary),\n\t\t}, nil\n\t} else {\n\t\treturn nil, status.Errorf(codes.Internal, \"unable to retrieve versions for chart: [%s]\", packageRef.Identifier)\n\t}\n}",
"func (opts *goTest) packages() []string {\n\tpkgs := opts.pkgs\n\tif opts.pkg != \"\" {\n\t\tpkgs = append(pkgs[:len(pkgs):len(pkgs)], opts.pkg)\n\t}\n\tif len(pkgs) == 0 {\n\t\tpanic(\"no packages\")\n\t}\n\treturn pkgs\n}",
"func GetAvailableVersions() ([]string, error) {\n\tcfg := getConfig()\n\n\tminVersion, err := semver.Make(minLegalVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversions, err := getPythonVersions(cfg.PMirror)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversionStrs := make([]string, 0, len(versions))\n\tfor _, semver := range versions {\n\t\tif semver.Compare(minVersion) < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tversionStrs = append(versionStrs, semver.String())\n\t}\n\treturn versionStrs, nil\n}",
"func List() ([]string, error) {\n\tpkgDir := filepath.Join(homedir.SecPkg(), \"pkgs\")\n\texists, err := file.Exists(pkgDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"no package installed: '%s' does not exist\", pkgDir)\n\t}\n\tfiles, err := ioutil.ReadDir(pkgDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pkgs []string\n\tfor _, file := range files {\n\t\tpkgs = append(pkgs, file.Name())\n\t}\n\treturn pkgs, nil\n}",
"func (s packagesServer) GetAvailablePackageVersions(ctx context.Context, request *packages.GetAvailablePackageVersionsRequest) (*packages.GetAvailablePackageVersionsResponse, error) {\n\tcontextMsg := fmt.Sprintf(\"(cluster=%q, namespace=%q)\", request.GetAvailablePackageRef().GetContext().GetCluster(), request.GetAvailablePackageRef().GetContext().GetNamespace())\n\tlog.Infof(\"+core GetAvailablePackageVersions %s\", contextMsg)\n\n\tif request.GetAvailablePackageRef().GetPlugin() == nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"Unable to retrieve the plugin (missing AvailablePackageRef.Plugin)\")\n\t}\n\n\t// Retrieve the plugin with server matching the requested plugin name\n\tpluginWithServer := s.getPluginWithServer(request.AvailablePackageRef.Plugin)\n\tif pluginWithServer == nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Unable to get the plugin %v\", request.AvailablePackageRef.Plugin)\n\t}\n\n\t// Get the response from the requested plugin\n\tresponse, err := pluginWithServer.server.GetAvailablePackageVersions(ctx, request)\n\tif err != nil {\n\t\treturn nil, status.Errorf(status.Convert(err).Code(), \"Unable to get the available package versions for the package %q using the plugin %q: %v\", request.AvailablePackageRef.Identifier, request.AvailablePackageRef.Plugin.Name, err)\n\t}\n\n\t// Validate the plugin response\n\tif response.PackageAppVersions == nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Invalid GetAvailablePackageVersions response from the plugin %v: %v\", pluginWithServer.plugin.Name, err)\n\t}\n\n\t// Build the response\n\treturn &packages.GetAvailablePackageVersionsResponse{\n\t\tPackageAppVersions: response.PackageAppVersions,\n\t}, nil\n}",
"func (o *PackagesRequest) GetPackageList() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.PackageList\n}",
"func allPackages(deps []*cfg.Dependency, res *dependency.Resolver, addTest bool) ([]string, error) {\n\tif len(deps) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\tvdir, err := gpath.Vendor()\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvdir += string(os.PathSeparator)\n\tll, err := res.ResolveAll(deps, addTest)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tfor i := 0; i < len(ll); i++ {\n\t\tll[i] = strings.TrimPrefix(ll[i], vdir)\n\t}\n\treturn ll, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ProcessPackageFile validates if the driver needs transformation and creates the write xog files according to the installation environment
|
func ProcessPackageFile(file *model.DriverFile, selectedVersion *model.Version, packageFolder, writeFolder string, environments *model.Environments, soapFunc util.Soap) model.Output {
if file.PackageTransform && file.NeedPackageTransform() {
file.InitXML(constant.Read, constant.Undefined)
file.RunAuxXML(environments.Target, soapFunc)
}
return transform.ProcessPackageFile(file, packageFolder, writeFolder, selectedVersion.Definitions)
}
|
[
"func InstallPackageFile(file *model.DriverFile, environments *model.Environments, soapFunc util.Soap) model.Output {\n\toutput := model.Output{Code: constant.OutputSuccess, Debug: constant.Undefined}\n\n\tutil.ValidateFolder(constant.FolderDebug + file.Type + util.GetPathFolder(file.Path))\n\n\tfile.InitXML(constant.Write, constant.FolderWrite)\n\n\tiniTagRegexpStr, endTagRegexpStr := file.TagCDATA()\n\tif iniTagRegexpStr != constant.Undefined && endTagRegexpStr != constant.Undefined {\n\t\tresponseString := transform.IncludeCDATA(file.GetXML(), iniTagRegexpStr, endTagRegexpStr)\n\t\tfile.SetXML(responseString)\n\t}\n\n\terr := file.RunXML(constant.Write, constant.FolderWrite, environments, soapFunc)\n\txogResponse := etree.NewDocument()\n\txogResponse.ReadFromString(file.GetXML())\n\toutput, err = validate.Check(xogResponse)\n\tif err != nil {\n\t\treturn output\n\t}\n\tfile.Write(constant.FolderDebug)\n\treturn output\n}",
"func ProcessFile(logger *log.Logger, path string, file string, pack string, output string, format bool) error {\n\n\text := filepath.Ext(file)\n\tbase := strings.TrimSuffix(file, ext)\n\n\tif output == \"\" {\n\t\toutput = filepath.Join(path, base+\"_yago\"+ext)\n\t}\n\n\tfiledata := FileData{Package: pack, Imports: make(map[string]bool)}\n\n\tstructs, err := ParseFile(filepath.Join(path, file))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstructsByName := make(map[string]*StructData)\n\tfor _, str := range structs {\n\t\tprepareStructData(str, filedata)\n\t\tstructsByName[str.Name] = str\n\t\tif !str.NoTable {\n\t\t\tfiledata.HasTables = true\n\t\t}\n\t}\n\totherStructs := loadEmbedded(path, structsByName, filedata)\n\tfor _, str := range otherStructs {\n\t\tif _, ok := structsByName[str.Name]; !ok {\n\t\t\tstr.Imported = true\n\t\t\tstructsByName[str.Name] = str\n\t\t}\n\t}\n\tpostPrepare(&filedata, structsByName)\n\n\toutf, err := os.Create(output)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t{\n\t\tdefer outf.Close()\n\n\t\tif err := prologTemplate.Execute(outf, &filedata); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, str := range structs {\n\t\t\tif err := structPreambleTemplate.Execute(outf, &str); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif str.NoTable {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := structTemplate.Execute(outf, &str); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif format {\n\t\tcmd := exec.Command(\"gofmt\", \"-s\", \"-w\", output)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *reportModule) Execute(targets map[string]pgs.File, pkgs map[string]pgs.Package) []pgs.Artifact {\n\tbuf := &bytes.Buffer{}\n\n\t// firstly, understanding what to generate - servicemodel (E2SM) or E2AP\n\tsm, _ := m.Parameters().Bool(\"sm\")\n\n\tdir, err := os.Getwd()\n\t// handle err\n\tif err != nil {\n\t\treturn nil\n\t}\n\t//printFiles(path)\n\t_, err = fmt.Fprintf(buf, \"Working directory is %v\\n\", dir)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tfiles, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, file := range files {\n\t\t_, err = fmt.Fprintf(buf, \"Found file %v\\n\", file.Name())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// creating structure to generate encoder package\n\tenc := make([]encoder, 0)\n\tloggerPresence := true\n\n\t//creating structure to generate servicemodel package\n\tsmodel := servicemodel{}\n\n\tif sm {\n\t\t_, err = fmt.Fprintf(buf, \"Gathering data for encoder and servicemodel packages generation\\n\")\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tsmBasicInfoFilled := false\n\t\tfor _, f := range targets { // Input .proto files\n\t\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\t\t\t_, err = fmt.Fprintf(buf, \"Leading target comments were found, they are:\\n%v\\n\", f.SourceCodeInfo().LeadingDetachedComments())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// understanding if canonical choice ordering is present\n\t\t\tcanonicalChoice := canonicalOrderingIsPresent(f.AllMessages())\n\n\t\t\t// looking for a proto path here\n\t\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\n\t\t\tfor _, msg := range f.AllMessages() {\n\t\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t// This indicates us that we've found top-level message\n\t\t\t\tif strings.Contains(msg.Name().String(), \"E2Sm\") &&\n\t\t\t\t\t!strings.Contains(msg.Name().String(), \"Format\") && !strings.Contains(msg.Name().String(), \"Item\") {\n\n\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Top-level message was found!! It is %v\\n\", msg.Name().String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Comments were found, they are:\\n%v\\n\", msg.SourceCodeInfo().LeadingComments())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\tpdu := encoder{\n\t\t\t\t\t\tProtoName: adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String()),\n\t\t\t\t\t\tMessageName: msg.Name().String(),\n\t\t\t\t\t\tMessageNameInLogging: adjustMessageNameForLogging(msg.Name().String()),\n\t\t\t\t\t\tChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"Choicemap\",\n\t\t\t\t\t\tCanonicalChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"CanonicalChoicemap\",\n\t\t\t\t\t\tCanonicalChoicePresence: canonicalChoice,\n\t\t\t\t\t\tParameters: lookUpMessageParameters(msg.SourceCodeInfo().LeadingComments()),\n\t\t\t\t\t}\n\t\t\t\t\tif loggerPresence {\n\t\t\t\t\t\tpdu.Logger = true\n\t\t\t\t\t\tloggerPresence = false\n\t\t\t\t\t} else {\n\t\t\t\t\t\tpdu.Logger = false\n\t\t\t\t\t}\n\t\t\t\t\tpdu.Imports = pdu.ProtoName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\t\t\t\t\tenc = append(enc, pdu)\n\n\t\t\t\t\t// filling in some information about SM\n\t\t\t\t\tsmodel.ParsePdu(msg.Name().String())\n\t\t\t\t\tif !smBasicInfoFilled {\n\t\t\t\t\t\tsmodel.ParseSmData(f.SourceCodeInfo().LeadingDetachedComments())\n\t\t\t\t\t\tsmodel.Imports = smodel.SmName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\t\t\t\t\t\tsmodel.AddEncoderImport(protoFilePath)\n\t\t\t\t\t\tsmBasicInfoFilled = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// There is only single top-level PDU so far, but leaving it here for future\n\t\tfor _, f := range targets { // Input .proto files\n\t\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\t\t\t// understanding if canonical choice ordering is present\n\t\t\tcanonicalChoice := canonicalOrderingIsPresent(f.AllMessages())\n\n\t\t\t// looking for a proto path here\n\t\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\n\t\t\tfor _, msg := range f.AllMessages() {\n\t\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t// This indicates us that we've found top-level message\n\t\t\t\tif strings.Contains(msg.Name().String(), \"E2ApPdu\") &&\n\t\t\t\t\t!strings.Contains(msg.Name().String(), \"Format\") && !strings.Contains(msg.Name().String(), \"Item\") {\n\n\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Top-level message was found!! It is %v\\n\", msg.Name().String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\t// Stick to the E2AP message\n\t\t\t\t\tpdu := encoder{\n\t\t\t\t\t\tLogger: true,\n\t\t\t\t\t\tProtoName: adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String()),\n\t\t\t\t\t\tMessageName: msg.Name().String(),\n\t\t\t\t\t\tMessageNameInLogging: \"E2AP\",\n\t\t\t\t\t\tChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"Choicemap\",\n\t\t\t\t\t\tCanonicalChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"CanonicalChoicemap\",\n\t\t\t\t\t\tCanonicalChoicePresence: canonicalChoice,\n\t\t\t\t\t\tParameters: lookUpMessageParameters(msg.SourceCodeInfo().LeadingComments()),\n\t\t\t\t\t}\n\t\t\t\t\tpdu.Imports = pdu.ProtoName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\t\t\t\t\tenc = append(enc, pdu)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// creating list of all messages and their correspondence to certain .proto file/package\n\ttree := map[string]protoItem{}\n\t_, err = fmt.Fprintf(buf, \"There are multiple .proto files passed at input, building a simple tree of the messages\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, f := range targets { // Input .proto files\n\t\t_, err = fmt.Fprintf(buf, \"Proto package name is %v\\n\", f.Package().ProtoName().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tpackageName := adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())\n\n\t\t_, err = fmt.Fprintf(buf, \"Iterating over the messages in %v\\n\", f.Package().ProtoName().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, msg := range f.AllMessages() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\titemName := adjustFieldName(msg.Name().String())\n\t\t\tleaf := protoItem{\n\t\t\t\tPackageName: packageName,\n\t\t\t\tProtoFilePath: lookUpProtoFilePath(dir, f.File().InputPath()),\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v, oneOf items are %v, non-oneOf items are %v\\n\", msg.Name().String(), len(msg.OneOfs()), len(msg.NonOneOfFields()))\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// avoiding parsing constants && checking if this is a list\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") &&\n\t\t\t\tlen(msg.Fields()) == 1 && strings.Contains(strings.ToLower(msg.Name().String()), \"list\") {\n\t\t\t\tleaf.IsList = msg.Fields()[0].Type().IsRepeated()\n\t\t\t}\n\t\t\t// avoiding parsing constants && checking if this is a oneOf (CHOICE)\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") &&\n\t\t\t\tlen(msg.OneOfs()) > 0 && len(msg.NonOneOfFields()) == 0 { // it also excludes the case when optional items is interpreted as a oneOf\n\t\t\t\tleaf.IsChoice = true\n\t\t\t}\n\n\t\t\ttree[itemName] = leaf\n\t\t}\n\n\t\t// processing enumerators and adding to the tree\n\t\tfor _, en := range f.AllEnums() { // Constants\n\t\t\tleaf := protoItem{\n\t\t\t\tPackageName: packageName,\n\t\t\t\tProtoFilePath: lookUpProtoFilePath(dir, f.File().InputPath()),\n\t\t\t\tIsEnum: true,\n\t\t\t}\n\t\t\ttree[en.Name().String()] = leaf\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(buf, \"Obtained Protobuf tree:\\n%v\\n\", tree)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// gathering data for builder\n\t_, err = fmt.Fprintf(buf, \"Gathering data for builder package generation\\n\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, f := range targets { // Input .proto files\n\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\n\t\tpackageName := adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())\n\t\t// this package should be located in the same directory as .pb.go\n\t\tbldr := builder{\n\t\t\tPackageName: packageName,\n\t\t\tImports: \"\",\n\t\t\tInstances: make([]builderInstance, 0),\n\t\t}\n\n\t\t_, err = fmt.Fprintf(buf, \"Processing file %v\\n\", f.Name().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// iterating over messages and collecting set of OPTIONAL items\n\t\tfor _, msg := range f.AllMessages() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoiding parsing constants\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") {\n\t\t\t\t// iterating over fields of the message\n\t\t\t\tfor _, dep := range msg.Fields() {\n\t\t\t\t\t// This indicates us that we've found OPTIONAL item in the message\n\t\t\t\t\tif strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"optional\") {\n\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Hooray! Found OPTIONAL item - %v\\n\", dep.Name().String())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tinstanceName := msg.Name().String()\n\t\t\t\t\t\titemType := adjustFieldName(extractItemMessageType(dep))\n\t\t\t\t\t\t// checking if the message is of type BitString (special case)\n\t\t\t\t\t\tif strings.Contains(itemType, \"BitString\") {\n\t\t\t\t\t\t\tbldr.Imports = bldr.Imports + \"\\n\\\"github.com/onosproject/onos-lib-go/api/asn1/v1/asn1\\\"\\n\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// checking if the message is of the elementary type\n\t\t\t\t\t\telementaryType := false\n\t\t\t\t\t\telementaryType = isElementaryType(itemType)\n\n\t\t\t\t\t\titemName := adjustFieldName(composeItemName(dep.Name().String()))\n\t\t\t\t\t\tinstance := builderInstance{\n\t\t\t\t\t\t\tInstance: instanceName,\n\t\t\t\t\t\t\tFunctionName: itemName,\n\t\t\t\t\t\t\tItemName: itemName,\n\t\t\t\t\t\t\tItemType: itemType,\n\t\t\t\t\t\t\tVariableName: doLinting(strings.ToLower(itemName[:1]) + itemName[1:]),\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\tie, ok := tree[itemType]\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Couldn't find the message %v in the Protobuf tree\\n\", itemName)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// checking if the message is defined in the other Protobuf file\n\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Current package name is %v, item %v is from package %v\\n\", packageName, adjustFieldName(itemType), ie.PackageName)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif ie.fromOtherProto(packageName) && !elementaryType {\n\t\t\t\t\t\t\ttmp := instance.ItemType\n\t\t\t\t\t\t\tinstance.ItemType = ie.PackageName + \".\" + tmp\n\t\t\t\t\t\t\tif !strings.Contains(bldr.Imports, ie.PackageName) {\n\t\t\t\t\t\t\t\tbldr.Imports = bldr.Imports + \"\\n\" + ie.getImport()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// avoiding case when it's an enumerator\n\t\t\t\t\t\tif strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueLB:\") && strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\tinstance.VariableNamePtr = \"&\" + instance.VariableName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif !elementaryType {\n\t\t\t\t\t\t\t\tinstance.ItemType = \"*\" + instance.ItemType\n\t\t\t\t\t\t\t\tinstance.VariableNamePtr = instance.VariableName\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t//treating special case\n\t\t\t\t\t\t\t\tif !strings.Contains(instance.ItemType, \"[]byte\") {\n\t\t\t\t\t\t\t\t\tinstance.VariableNamePtr = \"&\" + instance.VariableName\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tinstance.VariableNamePtr = instance.VariableName\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// treating the case of the list\n\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(instance.ItemName), \"list\") && !strings.Contains(strings.ToLower(instance.ItemType), \"list\") {\n\t\t\t\t\t\t\t\tinstance.ItemType = \"[]\" + instance.ItemType\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// linting some of the fields\n\t\t\t\t\t\tinstance.doLinting()\n\t\t\t\t\t\tbldr.Instances = append(bldr.Instances, instance)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_, err = fmt.Fprintf(buf, \"We're about to start generating builder for Protobuf\\nObtained structure is %v\\n\", bldr)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// looking for a proto path to locate where to store builder file\n\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\t\t_, err = fmt.Fprintf(buf, \"Protobuf file path is %v\\nFile's Input path is %v\\n\", protoFilePath, f.File().InputPath().Dir().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// composing builder's output path to be the same as the generated Go Protobuf is located\n\t\tindex := strings.Index(protoFilePath, f.File().InputPath().Dir().String())\n\t\tif index == -1 {\n\t\t\t_, err = fmt.Fprintf(buf, \"Something went wrong in searching for the output path to store generated builder file..\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tprotoFilePath = protoFilePath[index:]\n\t\tindex = strings.Index(protoFilePath, \"/\")\n\t\tif index == -1 {\n\t\t\t_, err = fmt.Fprintf(buf, \"Something went wrong in searching for the output path to store generated builder file..\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\toutputPath := protoFilePath[index+1:]\n\t\t_, err = fmt.Fprintf(buf, \"Output file path is %v\\nFile's Input path is %v\\n\", protoFilePath, f.File().InputPath().Dir().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t//Generating new .go file\n\t\tm.OverwriteGeneratorTemplateFile(outputPath+\"/builder.go\", templateBuilder.Lookup(\"builder.tpl\"), bldr)\n\t}\n\n\t// gathering data for pdubuilder\n\t_, err = fmt.Fprintf(buf, \"Gathering data for pdubuilder package generation\\n\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\tpdubldr := pdubuilder{\n\t\tImports: \"\",\n\t\tOneOfs: make([]oneOf, 0),\n\t\tEnums: make([]enum, 0),\n\t\tMessages: make([]message, 0),\n\t\t//Lists: make([]list, 0),\n\t}\n\tfor _, f := range targets { // Input .proto files\n\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\t\t// adding target file to import (all Protobuf files are linked to each other, so should be present in import)\n\t\t// if the dependency is not required, GoFmt() post-processor will take care of it\n\t\t// looking for a proto path here\n\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\t\tpackageName := adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())\n\t\tpdubldr.Imports = pdubldr.Imports + \"\\n\" + packageName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\n\t\tif len(f.AllMessages()) > 0 {\n\t\t\tpdubldr.MessagePresence = true\n\t\t}\n\t\tif len(f.AllEnums()) > 0 {\n\t\t\tpdubldr.EnumPresence = true\n\t\t}\n\n\t\tfor _, msg := range f.AllMessages() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoiding parsing constants\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") {\n\t\t\t\tif len(msg.OneOfs()) > 0 {\n\t\t\t\t\t// handling the OneOf case\n\t\t\t\t\tfor _, oneOfField := range msg.OneOfFields() {\n\t\t\t\t\t\t// avoiding OPTIONAL case\n\t\t\t\t\t\tif !strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"optional\") {\n\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"CHOICE message is %v\\n\", adjustFieldName(msg.Name().String()))\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpdubldr.OneOfPresence = true\n\t\t\t\t\t\t\titemName := adjustFieldName(composeItemName(oneOfField.Name().String()))\n\t\t\t\t\t\t\titemType := adjustFieldName(extractItemMessageType(oneOfField))\n\t\t\t\t\t\t\tms := oneOf{\n\t\t\t\t\t\t\t\tPackageName: packageName,\n\t\t\t\t\t\t\t\tFunctionName: doLinting(msg.Name().String() + itemName),\n\t\t\t\t\t\t\t\tFunctionOutputType: adjustFieldName(msg.Name().String()),\n\t\t\t\t\t\t\t\tVariableType: itemType,\n\t\t\t\t\t\t\t\tVariableName: doLinting(strings.ToLower(itemName[:1]) + itemName[1:]),\n\t\t\t\t\t\t\t\tItemName: itemName,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\t\tif isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t// treating special case (BitString) here\n\t\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(itemType), \"bitstring\") {\n\t\t\t\t\t\t\t\t\tms.VariableType = \"*\" + ms.VariableType\n\t\t\t\t\t\t\t\t\tif !strings.Contains(pdubldr.Imports, \"/asn1/v1/asn1\") {\n\t\t\t\t\t\t\t\t\t\tpdubldr.Imports = pdubldr.Imports + \"\\n\\\"github.com/onosproject/onos-lib-go/api/asn1/v1/asn1\\\"\\n\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tie, ok := tree[itemType]\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Couldn't find the message %v in the Protobuf tree\\n\", itemName)\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t// checking if the message is defined in the other Protobuf file\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Current package name is %v, item %v is from package %v\\n\", packageName, adjustFieldName(itemType), ie.PackageName)\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tif ie.fromOtherProto(adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())) && !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\tif ie.IsEnum {\n\t\t\t\t\t\t\t\t\t\t\tms.VariableType = ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tif strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueLB:\") && strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\t\t\t\t\t\tms.VariableType = ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\t\tms.VariableType = \"*\" + ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} else if !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\tif strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueLB:\") && strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\t\t\t\t\tms.VariableType = packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tms.VariableType = \"*\" + packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif oneOfField.Type().IsRepeated() {\n\t\t\t\t\t\t\t\t\tms.VariableType = \"[]\" + ms.VariableType\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpdubldr.OneOfs = append(pdubldr.OneOfs, ms)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tms := message{\n\t\t\t\t\t\tPackageName: packageName,\n\t\t\t\t\t\tFunctionName: doLinting(msg.Name().String()),\n\t\t\t\t\t\tFunctionOutputType: adjustFieldName(msg.Name().String()),\n\t\t\t\t\t\tItems: make([]item, 0),\n\t\t\t\t\t}\n\t\t\t\t\tif len(msg.Fields()) == 1 {\n\t\t\t\t\t\tms.SingleItem = true\n\t\t\t\t\t}\n\t\t\t\t\t// iterating over fields of the message\n\t\t\t\t\tfor _, dep := range msg.Fields() {\n\t\t\t\t\t\t// don't want to include optional items in the message\n\t\t\t\t\t\tif !strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"optional\") {\n\t\t\t\t\t\t\titemName := adjustFieldName(composeItemName(dep.Name().String()))\n\t\t\t\t\t\t\titemType := adjustFieldName(extractItemMessageType(dep))\n\n\t\t\t\t\t\t\ti := item{\n\t\t\t\t\t\t\t\tVariableName: doLinting(strings.ToLower(itemName[:1]) + itemName[1:]),\n\t\t\t\t\t\t\t\tFieldName: itemName,\n\t\t\t\t\t\t\t\tItemType: itemType,\n\t\t\t\t\t\t\t\tIsChoice: false,\n\t\t\t\t\t\t\t\tIsEnum: false,\n\t\t\t\t\t\t\t\tIsList: false,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\t\tif isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t// treating special case (BitString) here\n\t\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(itemType), \"bitstring\") {\n\t\t\t\t\t\t\t\t\ti.ItemType = \"*\" + i.ItemType\n\t\t\t\t\t\t\t\t\tif !strings.Contains(pdubldr.Imports, \"/asn1/v1/asn1\") {\n\t\t\t\t\t\t\t\t\t\tpdubldr.Imports = pdubldr.Imports + \"\\n\\\"github.com/onosproject/onos-lib-go/api/asn1/v1/asn1\\\"\\n\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\t\t\tie, ok := tree[itemType]\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Couldn't find the message %v in the Protobuf tree\\n\", itemName)\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t// checking if the message is defined in the other Protobuf file\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Current package name is %v, item %v is from package %v. It is of elementary type %v and from other proto %v\\n\",\n\t\t\t\t\t\t\t\t\t\tpackageName, adjustFieldName(itemType), ie.PackageName, isElementaryType(itemType), ie.fromOtherProto(packageName))\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t// avoiding the case when it's an enumerator\n\t\t\t\t\t\t\t\t\tif !isElementaryType(itemType) && !strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueLB:\") && !strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\t\t\t\tif ie.fromOtherProto(packageName) && !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = \"*\" + ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else if !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = \"*\" + packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tif ie.fromOtherProto(packageName) && !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else if !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tif dep.Type().IsRepeated() {\n\t\t\t\t\t\t\t\t\t\ti.ItemType = \"[]\" + i.ItemType\n\t\t\t\t\t\t\t\t\t\ti.IsList = true\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tif ie.IsChoice {\n\t\t\t\t\t\t\t\t\t\ti.IsChoice = true\n\t\t\t\t\t\t\t\t\t} else if ie.IsList {\n\t\t\t\t\t\t\t\t\t\ti.IsList = true\n\t\t\t\t\t\t\t\t\t} else if ie.IsEnum {\n\t\t\t\t\t\t\t\t\t\ti.IsEnum = true\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// treating the special case when it is a list\n\t\t\t\t\t\t\tif ms.SingleItem && dep.Type().IsRepeated() && !strings.Contains(i.ItemType, \"[]\") {\n\t\t\t\t\t\t\t\ti.ItemType = \"[]\" + i.ItemType\n\t\t\t\t\t\t\t\ti.IsList = true\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tms.Items = append(ms.Items, i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tpdubldr.Messages = append(pdubldr.Messages, ms)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, en := range f.AllEnums() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Enumerator is %v\\n\", en.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ie := range en.Values() {\n\t\t\t\t_, err = fmt.Fprintf(buf, \"Enumerator value is %v\\n\", composeItemName(strings.ToLower(ie.Name().String())))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\te := enum{\n\t\t\t\t\tPackageName: packageName,\n\t\t\t\t\tFunctionName: composeItemName(strings.ToLower(ie.Name().String())),\n\t\t\t\t\tFunctionOutputType: packageName + \".\" + adjustFieldName(en.Name().String()),\n\t\t\t\t\tItem: adjustFieldName(en.Name().String()) + \"_\" + ie.Name().String(),\n\t\t\t\t}\n\t\t\t\tpdubldr.Enums = append(pdubldr.Enums, e)\n\t\t\t}\n\t\t}\n\t}\n\n\t_, err = fmt.Fprintf(buf, \"We're about to start generating encoder package\\nObtained structure is %v\\n\", enc)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t//printing encoder package\n\tfor _, e := range enc {\n\t\t_, err = fmt.Fprintf(buf, \"Generating template for %v with name %v\\n\", e.MessageName, e.MessageNameInLogging)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t//Generating new .go file\n\t\tm.OverwriteGeneratorTemplateFile(\"encoder/\"+e.MessageNameInLogging+\".go\", templateEncoder.Lookup(\"encoder.tpl\"), e)\n\t}\n\n\t// printing pdubuilder package\n\t//Generating new .go file\n\tm.OverwriteGeneratorTemplateFile(\"pdubuilder/pdubuilder.go\", templatePdubuilder.Lookup(\"pdubuilder.tpl\"), pdubldr)\n\n\tif sm {\n\t\t_, err = fmt.Fprintf(buf, \"We're about to start generating servicemodel package\\nObtained structure is %v\\n\", smodel)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\t//Generating new .go file\n\t\tm.OverwriteGeneratorTemplateFile(\"servicemodel/servicemodel.go\", templateServicemodel.Lookup(\"servicemodel.tpl\"), smodel)\n\t}\n\n\tout := m.OutputPath()\n\t_, err = fmt.Fprintf(buf, \"Output path is\\n%v\\n\", out)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tm.OverwriteCustomFile(\n\t\t\"/tmp/report.txt\",\n\t\tbuf.String(),\n\t\t0644,\n\t)\n\n\treturn m.Artifacts()\n}",
"func (r RawPackageGenerator) Preprocess() (pg PackageGenerator, err error) {\n\tpg.SrcPath = r.SrcPath\n\tpg.OneShell = r.OneShell\n\tnpg := PackageGenerator{}\n\tfor _, name := range r.Tools {\n\t\ttool := tools[name]\n\t\tif tool == nil {\n\t\t\treturn npg, fmt.Errorf(\"Tool %q not found\", name)\n\t\t}\n\t}\n\ttf := pg.toolfuncs()\n\ttf[\"make\"] = func(dir string, args ...string) string {\n\t\tlines := make([]string, len(args))\n\t\tfor i, a := range args {\n\t\t\tlines[i] = fmt.Sprintf(\"$(MAKE) -C %s %s\", dir, a)\n\t\t}\n\t\treturn strings.Join(lines, \"\\n\")\n\t}\n\ttf[\"extract\"] = func(name string, ext string) string {\n\t\treturn strings.Join(\n\t\t\t[]string{\n\t\t\t\tfmt.Sprintf(\"tar -xf src/%s-%s.tar.%s\", name, r.Version, ext),\n\t\t\t\tfmt.Sprintf(\"mv %s-%s %s\", name, r.Version, name),\n\t\t\t},\n\t\t\t\"\\n\")\n\t}\n\ttf[\"pkmv\"] = func(file string, srcpkg string, destpkg string) string {\n\t\tif strings.HasSuffix(file, \"/\") { //cut off trailing /\n\t\t\tfile = file[:len(file)-2]\n\t\t}\n\t\tdir, _ := filepath.Split(file)\n\t\tmv := fmt.Sprintf(\"mv %s %s\",\n\t\t\tfilepath.Join(\"out\", srcpkg, file),\n\t\t\tfilepath.Join(\"out\", destpkg, dir),\n\t\t)\n\t\tif dir != \"\" {\n\t\t\treturn strings.Join([]string{\n\t\t\t\tfmt.Sprintf(\"mkdir -p %s\", filepath.Join(\"out\", destpkg, dir)),\n\t\t\t\tmv,\n\t\t\t}, \"\\n\")\n\t\t}\n\t\treturn mv\n\t}\n\ttf[\"mvman\"] = func(pkg string) string {\n\t\treturn fmt.Sprintf(\"mkdir -p out/%s-man/usr/share\\nmv out/%s/usr/share/man out/%s-man/usr/share/man\", pkg, pkg, pkg)\n\t}\n\ttf[\"mvhdr\"] = func(pkg string) string {\n\t\treturn fmt.Sprintf(\"mkdir -p out/%s-headers/usr\\nmv out/%s/usr/include out/%s-include/usr/include\", pkg, pkg, pkg)\n\t}\n\ttf[\"configure\"] = func(dir string) string {\n\t\tif r.Data[\"configure\"] == nil {\n\t\t\tr.Data[\"configure\"] = []interface{}{}\n\t\t}\n\t\tcar := r.Data[\"configure\"].([]interface{})\n\t\tca := make([]string, len(car))\n\t\tfor i, v := range car {\n\t\t\tca[i] = v.(string)\n\t\t}\n\t\treturn fmt.Sprintf(\"(cd %s && ./configure %s)\", dir, strings.Join(ca, \" \"))\n\t}\n\ttf[\"confarch\"] = func() string {\n\t\treturn map[string]string{\n\t\t\t\"x86_64\": \"x86_64\",\n\t\t\t\"x86\": \"i686\",\n\t\t}[r.Arch]\n\t}\n\tpg.Version, err = version.NewVersion(r.Version)\n\tif err != nil {\n\t\treturn npg, err\n\t}\n\tpg.Sources = make([]*url.URL, len(r.Sources))\n\tfor i, v := range r.Sources {\n\t\ttmpl, err := template.New(\"sources\").Funcs(tf).Parse(v)\n\t\tif err != nil {\n\t\t\treturn npg, err\n\t\t}\n\t\tbuf := bytes.NewBuffer(nil)\n\t\terr = tmpl.Execute(buf, r)\n\t\tif err != nil {\n\t\t\treturn npg, err\n\t\t}\n\t\tsstr := buf.String()\n\t\tsrc, err := url.Parse(sstr)\n\t\tif err != nil {\n\t\t\treturn npg, err\n\t\t}\n\t\tpg.Sources[i] = src\n\t}\n\tpg.BuildDependencies = make([]string, len(r.BuildDependencies))\n\tfor i, v := range r.BuildDependencies {\n\t\ttmpl, err := template.New(\"build_dependencies\").Funcs(tf).Parse(v)\n\t\tif err != nil {\n\t\t\treturn npg, err\n\t\t}\n\t\tbuf := bytes.NewBuffer(nil)\n\t\terr = tmpl.Execute(buf, r)\n\t\tif err != nil {\n\t\t\treturn npg, err\n\t\t}\n\t\tpg.BuildDependencies[i] = buf.String()\n\t}\n\tfor _, v := range pg.Tools {\n\t\tif v.Dependencies != nil {\n\t\t\tpg.BuildDependencies = append(pg.BuildDependencies, v.Dependencies...)\n\t\t}\n\t}\n\tnval := []string{}\n\tpg.Pkgs = make(pkgmap)\n\tfor x, y := range r.Packages {\n\t\tpg.Pkgs[x] = new(pkg)\n\t\tif y != nil && y.Dependencies != nil {\n\t\t\tpg.Pkgs[x].Dependencies = make([]string, len(y.Dependencies))\n\t\t\tfor i, v := range y.Dependencies {\n\t\t\t\ttmpl, err := template.New(\"dependencies\").Funcs(tf).Parse(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn npg, err\n\t\t\t\t}\n\t\t\t\tbuf := bytes.NewBuffer(nil)\n\t\t\t\terr = tmpl.Execute(buf, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn npg, err\n\t\t\t\t}\n\t\t\t\tpg.Pkgs[x].Dependencies[i] = buf.String()\n\t\t\t}\n\t\t} else {\n\t\t\tpg.Pkgs[x].Dependencies = nval\n\t\t}\n\t}\n\tstmpl, err := template.New(\"script\").Funcs(tf).Parse(strings.Join(r.Script, \"\\n\"))\n\tif err != nil {\n\t\treturn npg, err\n\t}\n\tbuf := bytes.NewBuffer(nil)\n\terr = stmpl.Execute(buf, r)\n\tif err != nil {\n\t\treturn npg, err\n\t}\n\tpg.Script = buf.String()\n\treturn\n}",
"func initPackage(file string, args []string,\n\tin io.Reader, out io.Writer) error {\n\n\tvar p pack.Pack\n\tvar err error\n\ts := bufio.NewScanner(in)\n\n\tvar wd string\n\twd, err = os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = os.Stat(file)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(out, \"Creating initial packfile...\")\n\n\t// Get package name\n\tp.Name = filepath.Base(wd)\n\tgetInput(s, out, \"Name\", &p.Name)\n\n\t// Get import path\n\tp.ImportPath, err = getImportPath(wd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgetInput(s, out, \"Import Path\", &p.ImportPath)\n\n\t// Misc Details\n\tgetInput(s, out, \"Summary\", &p.Summary)\n\tgetInput(s, out, \"Description\", &p.Description)\n\tgetInput(s, out, \"Homepage\", &p.Homepage)\n\tp.License = \"MIT\"\n\tgetInput(s, out, \"License\", &p.License)\n\n\terr = p.WritePackFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func repackage(config Config, outputDir string) error {\n\tif outputDirInfo, err := os.Stat(outputDir); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to stat output directory: %s\", outputDir)\n\t} else if !outputDirInfo.IsDir() {\n\t\treturn errors.Wrapf(err, \"not a directory: %s\", outputDir)\n\t}\n\n\tvendorDir := path.Join(outputDir, internalDir)\n\t// remove output directory if it already exists\n\tif err := os.RemoveAll(vendorDir); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to remove directory: %s\", vendorDir)\n\t}\n\n\tif err := os.Mkdir(vendorDir, 0755); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create %s directory at %s\", internalDir, vendorDir)\n\t}\n\n\tprocessedPkgs := make(map[SrcPkg]bool, len(config.Pkgs))\n\tfor _, currName := range sortedKeys(config.Pkgs) {\n\t\tcurrPkg := config.Pkgs[currName]\n\n\t\t// if multiple keys specify the exact same source package, only process once\n\t\tif processedPkgs[currPkg] {\n\t\t\tcontinue\n\t\t}\n\n\t\tmainPkg, err := build.Import(currPkg.MainPkg, outputDir, build.FindOnly)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to get information for package %s for output directory %s\", currPkg.MainPkg, outputDir)\n\t\t}\n\n\t\t// get location of main package on disk\n\t\tmainDir := mainPkg.Dir\n\n\t\t// get project import path and location of project package directory\n\t\tprojectRootDir := mainDir\n\t\tprojectImportPath := currPkg.MainPkg\n\t\tfor i := 0; i < currPkg.DistanceToProjectPkg; i++ {\n\t\t\tprojectRootDir = path.Dir(projectRootDir)\n\t\t\tprojectImportPath = path.Dir(projectImportPath)\n\t\t}\n\n\t\t// copy project package into vendor directory in output dir if it does not already exist\n\t\tprojectDstDir := path.Join(vendorDir, projectImportPath)\n\n\t\tif _, err := os.Stat(projectDstDir); os.IsNotExist(err) {\n\t\t\tif err := shutil.CopyTree(projectRootDir, projectDstDir, vendorCopyOptions(currPkg.OmitVendorDirs)); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to copy directory %s to %s\", projectRootDir, projectDstDir)\n\t\t\t}\n\t\t\tif _, err := removeEmptyDirs(projectDstDir); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to remove empty directories in destination %s\", projectDstDir)\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to stat %s\", projectDstDir)\n\t\t}\n\n\t\tprojectDstDirImport, err := build.ImportDir(projectDstDir, build.FindOnly)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"unable to import project destination directory %s\", projectDstDir)\n\t\t}\n\t\tprojectDstDirImportPath := projectDstDirImport.ImportPath\n\n\t\t// rewrite imports for all files in copied directory\n\t\tfileSet := token.NewFileSet()\n\t\tfoundMain := false\n\t\tgoFiles := make(map[string]*ast.File)\n\n\t\tflagPkgImported := false\n\t\tif err := filepath.Walk(projectDstDir, func(currPath string, currInfo os.FileInfo, err error) error {\n\t\t\tif !currInfo.IsDir() && strings.HasSuffix(currInfo.Name(), \".go\") {\n\t\t\t\tfileNode, err := parser.ParseFile(fileSet, currPath, nil, parser.ParseComments)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"failed to parse file %s\", currPath)\n\t\t\t\t}\n\t\t\t\tgoFiles[currPath] = fileNode\n\n\t\t\t\tfor _, currImport := range fileNode.Imports {\n\t\t\t\t\tcurrImportPathUnquoted, err := strconv.Unquote(currImport.Path.Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err, \"unable to unquote import %s\", currImport.Path.Value)\n\t\t\t\t\t}\n\n\t\t\t\t\tupdatedImport := \"\"\n\t\t\t\t\tif currImportPathUnquoted == \"flag\" {\n\t\t\t\t\t\tflagPkgImported = true\n\t\t\t\t\t\tupdatedImport = path.Join(projectDstDirImportPath, \"amalgomated_flag\")\n\t\t\t\t\t} else if strings.HasPrefix(currImportPathUnquoted, projectImportPath) {\n\t\t\t\t\t\tupdatedImport = strings.Replace(currImportPathUnquoted, projectImportPath, projectDstDirImportPath, -1)\n\t\t\t\t\t}\n\n\t\t\t\t\tif updatedImport != \"\" {\n\t\t\t\t\t\tif !astutil.RewriteImport(fileSet, fileNode, currImportPathUnquoted, updatedImport) {\n\t\t\t\t\t\t\treturn errors.Errorf(\"failed to rewrite import from %s to %s\", currImportPathUnquoted, updatedImport)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tremoveImportPathChecking(fileNode)\n\n\t\t\t\t// change package name for main packages\n\t\t\t\tif fileNode.Name.Name == \"main\" {\n\t\t\t\t\tfileNode.Name = ast.NewIdent(amalgomatedPackage)\n\n\t\t\t\t\t// find the main function\n\t\t\t\t\tmainFunc := findFunction(fileNode, \"main\")\n\t\t\t\t\tif mainFunc != nil {\n\t\t\t\t\t\terr = renameFunction(fileNode, \"main\", amalgomatedMain)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errors.Wrapf(err, \"failed to rename function in file %s\", currPath)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfoundMain = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !foundMain {\n\t\t\treturn errors.Errorf(\"main method not found in package %s\", currPkg.MainPkg)\n\t\t}\n\n\t\tif flagPkgImported {\n\t\t\t// if \"flag\" package is imported, add \"flag\" as a rewritten vendored dependency. This is done\n\t\t\t// because flag.CommandLine is a global variable that is often used by programs and problems can\n\t\t\t// arise if multiple amalgomated programs use it. A custom rewritten import is used rather than\n\t\t\t// vendoring so that the amalgomated program can itself be vendored.\n\t\t\tgoRoot, err := dirs.GoRoot()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t\tfmtSrcDir := path.Join(goRoot, \"src\", \"flag\")\n\t\t\tfmtDstDir := path.Join(projectDstDir, \"amalgomated_flag\")\n\t\t\tif err := shutil.CopyTree(fmtSrcDir, fmtDstDir, vendorCopyOptions(currPkg.OmitVendorDirs)); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to copy directory %s to %s\", fmtSrcDir, fmtDstDir)\n\t\t\t}\n\t\t\tif _, err := removeEmptyDirs(fmtDstDir); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to remove empty directories in destination %s\", fmtDstDir)\n\t\t\t}\n\t\t}\n\n\t\tfor currGoFile, currNode := range goFiles {\n\t\t\tif err = writeAstToFile(currGoFile, currNode, fileSet); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to write rewritten file %s\", config)\n\t\t\t}\n\t\t}\n\n\t\tprocessedPkgs[currPkg] = true\n\t}\n\treturn nil\n}",
"func (s *srkServer) Package(chunks srkproto.FunctionService_PackageServer) error {\n\n\tmeta, ok := metadata.FromIncomingContext(chunks.Context())\n\tif !ok {\n\t\treturn errors.New(\"Failed to parse metadata\")\n\t}\n\n\trawName, ok := meta[\"name\"]\n\tif !ok {\n\t\treturn errors.New(\"Metadata option \\\"name\\\" is required\")\n\t}\n\tname := rawName[0]\n\n\tincludes := meta[\"includes\"]\n\n\t// Unpack the uploaded file to a temporary location\n\ttdir, err := ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(tdir)\n\n\tfuncReader := &pbReader{chunks: chunks}\n\t_, err = srk.UntarStream(funcReader, tdir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not unpack received tar file\")\n\t}\n\n\t// Package the function\n\trawDir := s.mgr.GetRawPath(name)\n\n\tif err := s.mgr.CreateRaw(tdir, name, includes, nil); err != nil {\n\t\treturn errors.Wrap(err, \"Packaging function failed\")\n\t}\n\ts.mgr.Logger.Info(\"Created raw function: \" + rawDir)\n\n\tpkgPath, err := s.mgr.Provider.Faas.Package(rawDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Packaing failed\")\n\t}\n\ts.mgr.Logger.Info(\"Package created at: \" + pkgPath)\n\n\treturn nil\n}",
"func (ArchLinux) Package(info *nfpm.Info, w io.Writer) error {\n\tif err := info.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif !nameIsValid(info.Name) {\n\t\treturn ErrInvalidPkgName\n\t}\n\n\tzw, err := zstd.NewWriter(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer zw.Close()\n\n\ttw := tar.NewWriter(zw)\n\tdefer tw.Close()\n\n\tentries, totalSize, err := createFilesInTar(info, tw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkginfoEntry, err := createPkginfo(info, tw, totalSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// .PKGINFO must be the first entry in .MTREE\n\tentries = append([]MtreeEntry{*pkginfoEntry}, entries...)\n\n\terr = createMtree(info, tw, entries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn createScripts(info, tw)\n}",
"func doPackage(files []*File, astFiles []*ast.File, fset *token.FileSet) *Package {\n\n\tif len(astFiles) == 0 {\n\t\treturn nil\n\t}\n\tpkg := new(Package)\n\tpkg.path = astFiles[0].Name.Name\n\tpkg.files = files\n\tpkg.types = info.Types\n\tpkg.defs = info.Defs\n\tpkg.selectors = info.Selections\n\tpkg.uses = info.Uses\n\tpkg.collFieldTypes = make(map[string]string)\n\t// Type check the package.\n\tconf := &types.Config{\n\t\tError: func(e error) {\n\t\t\tfmt.Println(\"failed to typecheck: \", e)\n\t\t\tos.Exit(1)\n\t\t},\n\t\tImporter: importer.Default(),\n\t}\n\t_, err := conf.Check(pkg.path, fset, astFiles, &info)\n\tif err != nil {\n\t\tfmt.Printf(\"33 ========== %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, file := range files {\n\t\tfile.pkg = pkg\n\t\tif file.file != nil {\n\t\t\tfile.walkFile(file.name, file.file)\n\t\t}\n\t}\n\tfmt.Println(\"results:\")\n\tfor k, v := range pkg.collFieldTypes {\n\t\tfmt.Printf(\"k: %s, v: %s\\n\", k, v)\n\t}\n\n\treturn pkg\n}",
"func envParamsFileProcess(importPath, paramsPath, importEnvironment string) error {\n\tapiParams, err := params.LoadApiParamsFromFile(paramsPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// check whether import environment is included in params configuration\n\tenvParams := apiParams.GetEnv(importEnvironment)\n\tif envParams == nil {\n\t\treturn errors.New(\"Environment '\" + importEnvironment + \"' does not exist in \" + paramsPath)\n\t} else {\n\n\t\t// Create a source directory and add source content to it and then zip it\n\t\tsourceFilePath := filepath.Join(importPath, \"SourceArchive\")\n\t\terr = utils.MoveDirectoryContentsToNewDirectory(importPath, sourceFilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr, cleanupFunc := utils.CreateZipFile(sourceFilePath, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t//cleanup the temporary artifacts once consuming the zip file\n\t\tif cleanupFunc != nil {\n\t\t\tdefer cleanupFunc()\n\t\t}\n\t\t//If environment parameters are present in parameter file\n\t\terr = handleEnvParams(importPath, importPath, envParams)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func ComposePackage(repo *util.Repo, extraDependencies []string, imageSize int64, updatePackage, verbose, pullMissing bool,\n\tpackageDir, appName string, bootOpts *BootOptions, filesystem string, loaderImage string) error {\n\n\t// Package content should be collected in a subdirectory called mpm-pkg.\n\ttargetPath := filepath.Join(packageDir, \"mpm-pkg\")\n\t// Remove collected directory afterwards.\n\tdefer os.RemoveAll(targetPath)\n\n\t// Construct final bootcmd for the image.\n\tcommandLine, err := bootOpts.GetCmd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// First, collect the contents of the package.\n\tif err := CollectPackage(repo, packageDir, extraDependencies, pullMissing, false, verbose); err != nil {\n\t\treturn err\n\t}\n\n\t// If all is well, we have to start preparing the files for upload.\n\tpaths, err := CollectDirectoryContents(targetPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Get the path of imported image.\n\timagePath := repo.ImagePath(\"qemu\", appName)\n\t// Check whether the image already exists.\n\timageExists := false\n\tif _, err = os.Stat(imagePath); !os.IsNotExist(err) {\n\t\timageExists = true\n\t}\n\n\tif filesystem == \"zfs\" {\n\t\timageCachePath := repo.ImageCachePath(\"qemu\", appName)\n\t\tvar imageCache core.HashCache\n\t\tzfsBuilderPath := \"\"\n\n\t\t// If the user requested new image or requested to update a non-existent image,\n\t\t// initialize it first.\n\t\tif !updatePackage || !imageExists {\n\t\t\tzfsBuilderPath, err = repo.GetZfsBuilderImagePath()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to find ZFS builder path.\\nError was: %s\", err)\n\t\t\t}\n\t\t\t// Initialize an empty image based on the provided loader image. imageSize is used to\n\t\t\t// determine the size of the user partition. Use default loader image.\n\t\t\tif err := repo.InitializeZfsImage(loaderImage, appName, imageSize); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to initialize empty image named %s.\\nError was: %s\", appName, err)\n\t\t\t}\n\t\t} else {\n\t\t\t// We are updating an existing image so try to parse the cache\n\t\t\t// config file. Note that we are not interested in any errors as\n\t\t\t// no-cache or invalid cache means that all files will be uploaded.\n\t\t\timageCache, _ = core.ParseHashCache(imageCachePath)\n\t\t}\n\n\t\t// Upload the specified path onto virtual image.\n\t\timageCache, err = UploadPackageContents(repo, imagePath, paths, imageCache, verbose, zfsBuilderPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Save the new image cache\n\t\timageCache.WriteToFile(imageCachePath)\n\t} else {\n\t\t// Create ROFS\n\t\t// Create temporary folder in which the image will be composed.\n\t\ttmp, _ := ioutil.TempDir(\"\", \"capstan\")\n\t\t// Once this function is finished, remove temporary file.\n\t\tdefer os.RemoveAll(tmp)\n\t\trofs_image_path := path.Join(tmp, \"rofs.img\")\n\n\t\tif err := util.WriteRofsImage(rofs_image_path, paths, targetPath, verbose); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to write ROFS image named %s.\\nError was: %s\", rofs_image_path, err)\n\t\t}\n\n\t\tif err = repo.CreateRofsImage(loaderImage, appName, rofs_image_path); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to create ROFS image named %s.\\nError was: %s\", appName, err)\n\t\t}\n\t}\n\n\t// Set the command line.\n\tif err = util.SetCmdLine(imagePath, commandLine); err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Command line set to: '%s'\\n\", commandLine)\n\n\treturn nil\n}",
"func genJavaPackageFile(pkg *compile.Package, env *compile.Env) *JavaFileInfo {\n\tgenerated := false\n\tfor _, file := range pkg.Files {\n\t\tif file.PackageDef.Doc != \"\" {\n\t\t\tif generated {\n\t\t\t\tlog.Printf(\"WARNING: Multiple vdl files with package documentation. One will be overwritten.\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tdata := struct {\n\t\t\t\tDoc string\n\t\t\t\tFileDoc string\n\t\t\t\tPackagePath string\n\t\t\t\tSource string\n\t\t\t}{\n\t\t\t\tDoc: javaDoc(file.PackageDef.Doc, file.PackageDef.DocSuffix),\n\t\t\t\tFileDoc: pkg.FileDoc,\n\t\t\t\tPackagePath: javaPath(javaGenPkgPath(pkg.GenPath)),\n\t\t\t\tSource: javaFileNames(pkg.Files),\n\t\t\t}\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := parseTmpl(\"package\", packageTmpl).Execute(&buf, data)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"vdl: couldn't execute package template: %v\", err)\n\t\t\t}\n\t\t\treturn &JavaFileInfo{\n\t\t\t\tName: \"package-info.java\",\n\t\t\t\tData: buf.Bytes(),\n\t\t\t}\n\t\t}\n\t\tgenerated = true\n\t}\n\treturn nil\n}",
"func (b Builder) CreatePackage() error {\n\tvar (\n\t\tpfile = PackagePath(b.Config, b.Plan)\n\t)\n\n\tb.Config.Repo.Ensure()\n\n\tfd, err := os.Create(pfile)\n\tif err != nil {\n\t\telog.Println(err)\n\t\treturn err\n\t}\n\tdefer fd.Close()\n\tgz := gzip.NewWriter(fd)\n\tdefer gz.Close()\n\treturn b.Tarball(gz)\n}",
"func GenerateWailsFrontendPackage() (*ParserReport, error) {\n\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := NewParser()\n\n\terr = p.ParseProject(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = p.generateModule()\n\n\treturn p.parserReport(), err\n}",
"func (module *SdkLibrary) createXmlFile(mctx android.DefaultableHookContext) {\n\tprops := struct {\n\t\tName *string\n\t\tLib_name *string\n\t\tSoc_specific *bool\n\t\tDevice_specific *bool\n\t\tProduct_specific *bool\n\t\tSystem_ext_specific *bool\n\t\tApex_available []string\n\t}{\n\t\tName: proptools.StringPtr(module.xmlFileName()),\n\t\tLib_name: proptools.StringPtr(module.BaseModuleName()),\n\t\tApex_available: module.ApexProperties.Apex_available,\n\t}\n\n\tif module.SocSpecific() {\n\t\tprops.Soc_specific = proptools.BoolPtr(true)\n\t} else if module.DeviceSpecific() {\n\t\tprops.Device_specific = proptools.BoolPtr(true)\n\t} else if module.ProductSpecific() {\n\t\tprops.Product_specific = proptools.BoolPtr(true)\n\t} else if module.SystemExtSpecific() {\n\t\tprops.System_ext_specific = proptools.BoolPtr(true)\n\t}\n\n\tmctx.CreateModule(sdkLibraryXmlFactory, &props)\n}",
"func (pkgCtx *PackageContext) ReadPackage() {\n\t// Read & Normalize pkg files\n\tfiles, err := ioutil.ReadDir(pkgCtx.Filepath)\n\tif err != nil {\n\t\tpkgCtx.Error = err\n\t\treturn\n\t}\n\tfor _, finfo := range files {\n\t\tpkgCtx.Wg.Add(1)\n\t\tgo func(finfo os.FileInfo) {\n\t\t\tdefer pkgCtx.Wg.Done()\n\n\t\t\tif finfo.IsDir() {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !IsGoFile(finfo) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpath := filepath.Join(pkgCtx.Filepath, finfo.Name())\n\t\t\tpathFromPkgDir, err := filepath.Rel(pkgCtx.Filepath, path)\n\t\t\tif err != nil {\n\t\t\t\tpkgCtx.Error = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\toutpath := filepath.Join(pkgCtx.SrcDir, pkgCtx.Importpath, pathFromPkgDir)\n\n\t\t\tin, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\tpkgCtx.Error = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer in.Close()\n\n\t\t\tfile, err := parser.ParseFile(pkgCtx.OriginalFset, path, in, 0)\n\t\t\tif err != nil {\n\t\t\t\tpkgCtx.Error = err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tout := bytes.NewBuffer([]byte{})\n\t\t\tdefer func() {\n\t\t\t\tpkgCtx.GoFiles = append(pkgCtx.GoFiles, &GoFile{\n\t\t\t\t\tPath: outpath,\n\t\t\t\t\tOriginal: file,\n\t\t\t\t\tNormalized: MustParse(parser.ParseFile(pkgCtx.NormalizedFset, outpath, out, 0)),\n\t\t\t\t\tMode: finfo.Mode(),\n\t\t\t\t\tData: out,\n\t\t\t\t})\n\t\t\t}()\n\t\t\tif !IsTestGoFileName(path) {\n\t\t\t\tin.Seek(0, SeekStart)\n\t\t\t\t_, err := io.Copy(out, in)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpkgCtx.Error = err\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = NormalizeFile(file, in, out)\n\t\t\tif err != nil {\n\t\t\t\tpkgCtx.Error = err\n\t\t\t}\n\t\t\treturn\n\t\t}(finfo)\n\t}\n}",
"func (system *ModuleSystem) Build(packageRoot string, baseDirectory string, physicalGenDir string,\n\tinstance *ModuleInstance, options Options) error {\n\tclassGenerators := system.classes[instance.ClassName]\n\tgenerator := classGenerators.types[instance.ClassType]\n\n\tif generator == nil || instance.PackageInfo.CustomInitialisation {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"Skipping generation of %q %q class of type %q \"+\n\t\t\t\t\"as generator is not defined\\n\",\n\t\t\tinstance.InstanceName,\n\t\t\tinstance.ClassName,\n\t\t\tinstance.ClassType,\n\t\t)\n\t\treturn nil\n\t}\n\tbuildResult, err := generator.Generate(instance)\n\n\tif err != nil {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"Error generating %q %q of type %q:\\n%s\\n\",\n\t\t\tinstance.InstanceName,\n\t\t\tinstance.ClassName,\n\t\t\tinstance.ClassType,\n\t\t\terr.Error(),\n\t\t)\n\t\treturn err\n\t}\n\n\tif buildResult == nil {\n\t\treturn nil\n\t}\n\tinstance.mu.Lock()\n\tinstance.genSpec = buildResult.Spec\n\tinstance.mu.Unlock()\n\tif !options.CommitChange {\n\t\treturn nil\n\t}\n\trunner := parallelize.NewUnboundedRunner(len(buildResult.Files))\n\tfor filePath, content := range buildResult.Files {\n\t\tf := func(filePathInf interface{}, contentInf interface{}) (interface{}, error) {\n\n\t\t\tfilePath := filePathInf.(string)\n\t\t\tcontent := contentInf.([]byte)\n\n\t\t\tfilePath = filepath.Clean(filePath)\n\n\t\t\tresolvedPath := filepath.Join(\n\t\t\t\tphysicalGenDir,\n\t\t\t\tfilePath,\n\t\t\t)\n\n\t\t\tif err := writeFile(resolvedPath, content); err != nil {\n\t\t\t\treturn nil, errors.Wrapf(\n\t\t\t\t\terr,\n\t\t\t\t\t\"Error writing to file %q\",\n\t\t\t\t\tresolvedPath,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\t// HACK: The module system writer shouldn't\n\t\t\t// assume that we want to format the files in\n\t\t\t// this way, but we don't have these formatters\n\t\t\t// as a library or a custom post build script\n\t\t\t// for the generators yet.\n\t\t\tif filepath.Ext(filePath) == \".go\" {\n\t\t\t\tif err := FormatGoFile(resolvedPath); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, nil\n\t\t}\n\n\t\twrk := ¶llelize.TwoParamWork{Data1: filePath, Data2: content, Func: f}\n\t\trunner.SubmitWork(wrk)\n\t}\n\n\t_, err = runner.GetResult()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func ProcessMetaFiles(metadataFiles []string, targetDir string, stepHelperData StepHelperData) error {\n\n\tallSteps := struct{ Steps []string }{}\n\tfor key := range metadataFiles {\n\n\t\tvar stepData config.StepData\n\n\t\tconfigFilePath := metadataFiles[key]\n\n\t\tmetadataFile, err := stepHelperData.OpenFile(configFilePath)\n\t\tcheckError(err)\n\t\tdefer metadataFile.Close()\n\n\t\tfmt.Printf(\"Reading file %v\\n\", configFilePath)\n\n\t\terr = stepData.ReadPipelineStepData(metadataFile)\n\t\tcheckError(err)\n\n\t\tstepName := stepData.Metadata.Name\n\t\tfmt.Printf(\"Step name: %v\\n\", stepName)\n\t\tif stepName+\".yaml\" != filepath.Base(configFilePath) {\n\t\t\tfmt.Printf(\"Expected file %s to have name %s.yaml (<stepName>.yaml)\\n\", configFilePath, filepath.Join(filepath.Dir(configFilePath), stepName))\n\t\t\tos.Exit(1)\n\t\t}\n\t\tallSteps.Steps = append(allSteps.Steps, stepName)\n\n\t\tfor _, parameter := range stepData.Spec.Inputs.Parameters {\n\t\t\tfor _, mandatoryIfCase := range parameter.MandatoryIf {\n\t\t\t\tif mandatoryIfCase.Name == \"\" || mandatoryIfCase.Value == \"\" {\n\t\t\t\t\treturn errors.New(\"invalid mandatoryIf option\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tosImport := false\n\t\tosImport, err = setDefaultParameters(&stepData)\n\t\tcheckError(err)\n\n\t\tmyStepInfo, err := getStepInfo(&stepData, osImport, stepHelperData.ExportPrefix)\n\t\tcheckError(err)\n\n\t\tstep := stepTemplate(myStepInfo, \"step\", stepGoTemplate)\n\t\terr = stepHelperData.WriteFile(filepath.Join(targetDir, fmt.Sprintf(\"%v_generated.go\", stepName)), step, 0644)\n\t\tcheckError(err)\n\n\t\ttest := stepTemplate(myStepInfo, \"stepTest\", stepTestGoTemplate)\n\t\terr = stepHelperData.WriteFile(filepath.Join(targetDir, fmt.Sprintf(\"%v_generated_test.go\", stepName)), test, 0644)\n\t\tcheckError(err)\n\n\t\texists, _ := piperutils.FileExists(filepath.Join(targetDir, fmt.Sprintf(\"%v.go\", stepName)))\n\t\tif !exists {\n\t\t\timpl := stepImplementation(myStepInfo, \"impl\", stepGoImplementationTemplate)\n\t\t\terr = stepHelperData.WriteFile(filepath.Join(targetDir, fmt.Sprintf(\"%v.go\", stepName)), impl, 0644)\n\t\t\tcheckError(err)\n\t\t}\n\n\t\texists, _ = piperutils.FileExists(filepath.Join(targetDir, fmt.Sprintf(\"%v_test.go\", stepName)))\n\t\tif !exists {\n\t\t\timpl := stepImplementation(myStepInfo, \"implTest\", stepGoImplementationTestTemplate)\n\t\t\terr = stepHelperData.WriteFile(filepath.Join(targetDir, fmt.Sprintf(\"%v_test.go\", stepName)), impl, 0644)\n\t\t\tcheckError(err)\n\t\t}\n\t}\n\n\t// expose metadata functions\n\tcode := generateCode(allSteps, \"metadata\", metadataGeneratedTemplate, sprig.HermeticTxtFuncMap())\n\terr := stepHelperData.WriteFile(filepath.Join(targetDir, metadataGeneratedFileName), code, 0644)\n\tcheckError(err)\n\n\treturn nil\n}",
"func (o *Data) PostProcess(dir, fn string, erasefiles bool) {\n\to.FnameDir = os.ExpandEnv(dir)\n\to.FnameKey = io.FnKey(fn)\n\tif o.DirOut == \"\" {\n\t\to.DirOut = \"/tmp/gofem/\" + o.FnameKey\n\t}\n\tif o.Encoder != \"gob\" && o.Encoder != \"json\" {\n\t\to.Encoder = \"gob\"\n\t}\n\terr := os.MkdirAll(o.DirOut, 0777)\n\tif err != nil {\n\t\tchk.Panic(\"cannot create directory for output results (%s): %v\", o.DirOut, err)\n\t}\n\tif erasefiles {\n\t\tio.RemoveAll(io.Sf(\"%s/%s_*.vtu\", o.DirOut, o.FnameKey))\n\t\tio.RemoveAll(io.Sf(\"%s/%s_*.log\", o.DirOut, o.FnameKey))\n\t\tio.RemoveAll(io.Sf(\"%s/%s_*.gob\", o.DirOut, o.FnameKey))\n\t\tio.RemoveAll(io.Sf(\"%s/%s_*.json\", o.DirOut, o.FnameKey))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
InstallPackageFile execute the soap call to install the driver and returns the output
|
func InstallPackageFile(file *model.DriverFile, environments *model.Environments, soapFunc util.Soap) model.Output {
output := model.Output{Code: constant.OutputSuccess, Debug: constant.Undefined}
util.ValidateFolder(constant.FolderDebug + file.Type + util.GetPathFolder(file.Path))
file.InitXML(constant.Write, constant.FolderWrite)
iniTagRegexpStr, endTagRegexpStr := file.TagCDATA()
if iniTagRegexpStr != constant.Undefined && endTagRegexpStr != constant.Undefined {
responseString := transform.IncludeCDATA(file.GetXML(), iniTagRegexpStr, endTagRegexpStr)
file.SetXML(responseString)
}
err := file.RunXML(constant.Write, constant.FolderWrite, environments, soapFunc)
xogResponse := etree.NewDocument()
xogResponse.ReadFromString(file.GetXML())
output, err = validate.Check(xogResponse)
if err != nil {
return output
}
file.Write(constant.FolderDebug)
return output
}
|
[
"func InstallMSIPackage(_ context.Context, _ string, _ []string) error {\n\treturn nil\n}",
"func (s *Step) Execute() error {\n\tinstallPackage := s.PackageName\n\n\tif s.source != nil {\n\t\tdeb, err := s.findDEB()\n\t\tif err != nil {\n\t\t\ts.logger.Errorf(\"failed to discover DEB package\")\n\t\t\treturn err\n\t\t}\n\t\tinstallPackage = filepath.Join(s.downloadPath, deb)\n\t}\n\n\tinstalled := false\n\tif s.Version != \"\" {\n\t\tif err := s.checkInstalled(); err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase *exec.ExitError:\n\t\t\t\ts.logger.Infof(\"dpkg-query exited with code %d\", err.(*exec.ExitError).ExitCode())\n\t\t\t\tinstalled = false\n\t\t\tcase *go2chef.ErrChefAlreadyInstalled:\n\t\t\t\ts.logger.Infof(\"%s\", err)\n\t\t\t\tinstalled = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !installed {\n\t\treturn s.installChef(installPackage)\n\t}\n s.logger.Infof(\"%s specified is already installed, not reinstalling\", installPackage)\n\treturn nil\n}",
"func (s *Step) Execute() error {\n\t// If this is a DMG, go down the rabbit hole. Mount it and\n\t// then set downloadPath to its mount point.\n\tif s.IsDMG {\n\t\ts.logger.WriteEvent(go2chef.NewEvent(\"INSTALL_PKG_DMG_MOUNT\", s.Name(), \"mounting DMG\"))\n\t\tdmg, err := s.findDMG()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := s.mountDMG(filepath.Join(s.downloadPath, dmg)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// unmount and emit events\n\t\tdefer func() {\n\t\t\tif err := s.unmountDMG(); err != nil {\n\t\t\t\ts.logger.WriteEvent(go2chef.NewEvent(\"INSTALL_PKG_DMG_UNMOUNT_FAILED\", s.Name(), \"unmounting DMG failed!\"))\n\n\t\t\t}\n\t\t\ts.logger.WriteEvent(go2chef.NewEvent(\"INSTALL_PKG_DMG_UNMOUNT\", s.Name(), \"unmounting DMG\"))\n\t\t}()\n\t}\n\n\tpkg, err := s.findPKG()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinstCtx, cancel := context.WithTimeout(context.Background(), time.Duration(s.InstallerTimeoutSeconds)*time.Second)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(instCtx, \"installer\", \"-verbose\", \"-pkg\", filepath.Join(s.downloadPath, pkg), \"-target\", \"/\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\t// preserve exit error\n\t\txerr := err\n\t\tif exit, ok := xerr.(*exec.ExitError); ok {\n\t\t\ts.logger.Errorf(\"pkg installer exited with code %d\", exit.ExitCode())\n\t\t}\n\t\treturn xerr\n\t}\n\treturn nil\n}",
"func executeConfigurePackage(\n\ttracer trace.Tracer,\n\tcontext context.T,\n\trepository localpackages.Repository,\n\tinst installer.Installer,\n\tuninst installer.Installer,\n\tinitialInstallState localpackages.InstallState,\n\toutput contracts.PluginOutputter) {\n\n\ttrace := tracer.BeginSection(fmt.Sprintf(\"execute configure - state: %s\", initialInstallState))\n\tdefer trace.End()\n\n\tswitch initialInstallState {\n\tcase localpackages.Installing:\n\t\t// This could be picking up an install after reboot or an upgrade that rebooted during install (after a successful uninstall)\n\t\texecuteInstall(tracer, context, repository, inst, uninst, false, output)\n\tcase localpackages.RollbackInstall:\n\t\texecuteInstall(tracer, context, repository, uninst, inst, true, output)\n\tcase localpackages.RollbackUninstall:\n\t\texecuteUninstall(tracer, context, repository, uninst, inst, true, output)\n\tdefault:\n\t\tif uninst != nil {\n\t\t\texecuteUninstall(tracer, context, repository, inst, uninst, false, output)\n\t\t} else {\n\t\t\texecuteInstall(tracer, context, repository, inst, uninst, false, output)\n\t\t}\n\t}\n}",
"func executeInstall(\n\ttracer trace.Tracer,\n\tcontext context.T,\n\trepository localpackages.Repository,\n\tinst installer.Installer,\n\tuninst installer.Installer,\n\tisRollback bool,\n\toutput contracts.PluginOutputter) {\n\n\tinstalltrace := tracer.BeginSection(fmt.Sprintf(\"install %s/%s - rollback: %t\", inst.PackageName(), inst.Version(), isRollback))\n\tdefer installtrace.End()\n\n\tif isRollback {\n\t\tsetNewInstallState(tracer, repository, inst, localpackages.RollbackInstall)\n\t} else {\n\t\tsetNewInstallState(tracer, repository, inst, localpackages.Installing)\n\t}\n\n\tresult := inst.Install(tracer, context)\n\n\tinstalltrace.WithExitcode(int64(result.GetExitCode()))\n\n\tif result.GetStatus() == contracts.ResultStatusSuccess {\n\t\tvalidatetrace := tracer.BeginSection(fmt.Sprintf(\"validate %s/%s - rollback: %t\", inst.PackageName(), inst.Version(), isRollback))\n\t\tresult = inst.Validate(tracer, context)\n\t\tvalidatetrace.WithExitcode(int64(result.GetExitCode()))\n\t}\n\tif result.GetStatus().IsReboot() {\n\t\ttracer.BeginSection(fmt.Sprintf(\"Rebooting to finish installation of %v %v - rollback: %t\", inst.PackageName(), inst.Version(), isRollback))\n\t\toutput.MarkAsSuccessWithReboot()\n\t\treturn\n\t}\n\tif !result.GetStatus().IsSuccess() {\n\t\tinstalltrace.AppendErrorf(\"Failed to install package; install status %v\", result.GetStatus())\n\t\tif isRollback || uninst == nil {\n\t\t\toutput.MarkAsFailed(nil, nil)\n\t\t\t// TODO: Remove from repository if this isn't the last successfully installed version? Run uninstall to clean up?\n\t\t\tsetNewInstallState(tracer, repository, inst, localpackages.Failed)\n\t\t\treturn\n\t\t}\n\t\t// Execute rollback\n\t\texecuteUninstall(tracer, context, repository, uninst, inst, true, output)\n\t\treturn\n\t}\n\tif uninst != nil {\n\t\tcleanupAfterUninstall(tracer, repository, uninst, output)\n\t}\n\tif isRollback {\n\t\tinstalltrace.AppendInfof(\"Failed to install %v %v, successfully rolled back to %v %v\", uninst.PackageName(), uninst.Version(), inst.PackageName(), inst.Version())\n\t\tsetNewInstallState(tracer, repository, inst, localpackages.Installed)\n\t\toutput.MarkAsFailed(nil, nil)\n\t\treturn\n\t}\n\tinstalltrace.AppendInfof(\"Successfully installed %v %v\", inst.PackageName(), inst.Version())\n\tsetNewInstallState(tracer, repository, inst, localpackages.Installed)\n\toutput.MarkAsSucceeded()\n\treturn\n}",
"func ProcessPackageFile(file *model.DriverFile, selectedVersion *model.Version, packageFolder, writeFolder string, environments *model.Environments, soapFunc util.Soap) model.Output {\n\tif file.PackageTransform && file.NeedPackageTransform() {\n\t\tfile.InitXML(constant.Read, constant.Undefined)\n\t\tfile.RunAuxXML(environments.Target, soapFunc)\n\t}\n\n\treturn transform.ProcessPackageFile(file, packageFolder, writeFolder, selectedVersion.Definitions)\n}",
"func (gc *GatewayClient) UploadPackages(filePaths []string) (*types.GatewayResponse, error) {\n\tvar gatewayResponse types.GatewayResponse\n\n\tbody := &bytes.Buffer{}\n\twriter := multipart.NewWriter(body)\n\n\tfor _, filePath := range filePaths {\n\n\t\tinfo, err := os.Stat(filePath)\n\n\t\tif err != nil {\n\t\t\treturn &gatewayResponse, err\n\t\t}\n\n\t\tif !info.IsDir() && (strings.HasSuffix(filePath, \".tar\") || strings.HasSuffix(filePath, \".rpm\")) {\n\n\t\t\tfile, filePathError := os.Open(path.Clean(filePath))\n\t\t\tif filePathError != nil {\n\t\t\t\treturn &gatewayResponse, filePathError\n\t\t\t}\n\n\t\t\tpart, fileReaderError := writer.CreateFormFile(\"files\", path.Base(filePath))\n\t\t\tif fileReaderError != nil {\n\t\t\t\treturn &gatewayResponse, fileReaderError\n\t\t\t}\n\t\t\t_, fileContentError := io.Copy(part, file)\n\t\t\tif fileContentError != nil {\n\t\t\t\treturn &gatewayResponse, fileContentError\n\t\t\t}\n\t\t} else {\n\t\t\treturn &gatewayResponse, fmt.Errorf(\"invalid file type, please provide valid file type\")\n\t\t}\n\t}\n\n\tfileWriterError := writer.Close()\n\tif fileWriterError != nil {\n\t\treturn &gatewayResponse, fileWriterError\n\t}\n\n\treq, httpError := http.NewRequest(\"POST\", gc.host+\"/im/types/installationPackages/instances/actions/uploadPackages\", body)\n\tif httpError != nil {\n\t\treturn &gatewayResponse, httpError\n\t}\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\treq.Header.Set(\"Authorization\", \"Basic \"+base64.StdEncoding.EncodeToString([]byte(gc.username+\":\"+gc.password)))\n\tclient := gc.http\n\tresponse, httpRespError := client.Do(req)\n\n\tif httpRespError != nil {\n\t\treturn &gatewayResponse, httpRespError\n\t}\n\n\tif response.StatusCode != 200 {\n\t\tresponseString, _ := extractString(response)\n\n\t\terr := json.Unmarshal([]byte(responseString), &gatewayResponse)\n\n\t\tif err != nil {\n\t\t\treturn &gatewayResponse, fmt.Errorf(\"Error For Uploading Package: %s\", err)\n\t\t}\n\n\t\treturn &gatewayResponse, fmt.Errorf(\"Error For Uploading Package: %s\", gatewayResponse.Message)\n\t}\n\n\tgatewayResponse.StatusCode = 200\n\n\treturn &gatewayResponse, nil\n}",
"func (ctl *Controller) Install(handler string, pkgs ...string) error {\n\tklog.V(2).Infof(\"Installing package(s) %v on handler %v\", pkgs, handler)\n\treturn ctl.handlerDo(PackageHandler.Install, handler, pkgs...)\n}",
"func (m *reportModule) Execute(targets map[string]pgs.File, pkgs map[string]pgs.Package) []pgs.Artifact {\n\tbuf := &bytes.Buffer{}\n\n\t// firstly, understanding what to generate - servicemodel (E2SM) or E2AP\n\tsm, _ := m.Parameters().Bool(\"sm\")\n\n\tdir, err := os.Getwd()\n\t// handle err\n\tif err != nil {\n\t\treturn nil\n\t}\n\t//printFiles(path)\n\t_, err = fmt.Fprintf(buf, \"Working directory is %v\\n\", dir)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tfiles, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, file := range files {\n\t\t_, err = fmt.Fprintf(buf, \"Found file %v\\n\", file.Name())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// creating structure to generate encoder package\n\tenc := make([]encoder, 0)\n\tloggerPresence := true\n\n\t//creating structure to generate servicemodel package\n\tsmodel := servicemodel{}\n\n\tif sm {\n\t\t_, err = fmt.Fprintf(buf, \"Gathering data for encoder and servicemodel packages generation\\n\")\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tsmBasicInfoFilled := false\n\t\tfor _, f := range targets { // Input .proto files\n\t\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\t\t\t_, err = fmt.Fprintf(buf, \"Leading target comments were found, they are:\\n%v\\n\", f.SourceCodeInfo().LeadingDetachedComments())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// understanding if canonical choice ordering is present\n\t\t\tcanonicalChoice := canonicalOrderingIsPresent(f.AllMessages())\n\n\t\t\t// looking for a proto path here\n\t\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\n\t\t\tfor _, msg := range f.AllMessages() {\n\t\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t// This indicates us that we've found top-level message\n\t\t\t\tif strings.Contains(msg.Name().String(), \"E2Sm\") &&\n\t\t\t\t\t!strings.Contains(msg.Name().String(), \"Format\") && !strings.Contains(msg.Name().String(), \"Item\") {\n\n\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Top-level message was found!! It is %v\\n\", msg.Name().String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Comments were found, they are:\\n%v\\n\", msg.SourceCodeInfo().LeadingComments())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\tpdu := encoder{\n\t\t\t\t\t\tProtoName: adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String()),\n\t\t\t\t\t\tMessageName: msg.Name().String(),\n\t\t\t\t\t\tMessageNameInLogging: adjustMessageNameForLogging(msg.Name().String()),\n\t\t\t\t\t\tChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"Choicemap\",\n\t\t\t\t\t\tCanonicalChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"CanonicalChoicemap\",\n\t\t\t\t\t\tCanonicalChoicePresence: canonicalChoice,\n\t\t\t\t\t\tParameters: lookUpMessageParameters(msg.SourceCodeInfo().LeadingComments()),\n\t\t\t\t\t}\n\t\t\t\t\tif loggerPresence {\n\t\t\t\t\t\tpdu.Logger = true\n\t\t\t\t\t\tloggerPresence = false\n\t\t\t\t\t} else {\n\t\t\t\t\t\tpdu.Logger = false\n\t\t\t\t\t}\n\t\t\t\t\tpdu.Imports = pdu.ProtoName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\t\t\t\t\tenc = append(enc, pdu)\n\n\t\t\t\t\t// filling in some information about SM\n\t\t\t\t\tsmodel.ParsePdu(msg.Name().String())\n\t\t\t\t\tif !smBasicInfoFilled {\n\t\t\t\t\t\tsmodel.ParseSmData(f.SourceCodeInfo().LeadingDetachedComments())\n\t\t\t\t\t\tsmodel.Imports = smodel.SmName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\t\t\t\t\t\tsmodel.AddEncoderImport(protoFilePath)\n\t\t\t\t\t\tsmBasicInfoFilled = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// There is only single top-level PDU so far, but leaving it here for future\n\t\tfor _, f := range targets { // Input .proto files\n\t\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\t\t\t// understanding if canonical choice ordering is present\n\t\t\tcanonicalChoice := canonicalOrderingIsPresent(f.AllMessages())\n\n\t\t\t// looking for a proto path here\n\t\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\n\t\t\tfor _, msg := range f.AllMessages() {\n\t\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t// This indicates us that we've found top-level message\n\t\t\t\tif strings.Contains(msg.Name().String(), \"E2ApPdu\") &&\n\t\t\t\t\t!strings.Contains(msg.Name().String(), \"Format\") && !strings.Contains(msg.Name().String(), \"Item\") {\n\n\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Top-level message was found!! It is %v\\n\", msg.Name().String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\t// Stick to the E2AP message\n\t\t\t\t\tpdu := encoder{\n\t\t\t\t\t\tLogger: true,\n\t\t\t\t\t\tProtoName: adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String()),\n\t\t\t\t\t\tMessageName: msg.Name().String(),\n\t\t\t\t\t\tMessageNameInLogging: \"E2AP\",\n\t\t\t\t\t\tChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"Choicemap\",\n\t\t\t\t\t\tCanonicalChoiceMapName: adjustMapVariableName(extractPackageName(f.Name().Split()[0])) + \"CanonicalChoicemap\",\n\t\t\t\t\t\tCanonicalChoicePresence: canonicalChoice,\n\t\t\t\t\t\tParameters: lookUpMessageParameters(msg.SourceCodeInfo().LeadingComments()),\n\t\t\t\t\t}\n\t\t\t\t\tpdu.Imports = pdu.ProtoName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\t\t\t\t\tenc = append(enc, pdu)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// creating list of all messages and their correspondence to certain .proto file/package\n\ttree := map[string]protoItem{}\n\t_, err = fmt.Fprintf(buf, \"There are multiple .proto files passed at input, building a simple tree of the messages\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, f := range targets { // Input .proto files\n\t\t_, err = fmt.Fprintf(buf, \"Proto package name is %v\\n\", f.Package().ProtoName().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tpackageName := adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())\n\n\t\t_, err = fmt.Fprintf(buf, \"Iterating over the messages in %v\\n\", f.Package().ProtoName().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, msg := range f.AllMessages() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\titemName := adjustFieldName(msg.Name().String())\n\t\t\tleaf := protoItem{\n\t\t\t\tPackageName: packageName,\n\t\t\t\tProtoFilePath: lookUpProtoFilePath(dir, f.File().InputPath()),\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v, oneOf items are %v, non-oneOf items are %v\\n\", msg.Name().String(), len(msg.OneOfs()), len(msg.NonOneOfFields()))\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// avoiding parsing constants && checking if this is a list\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") &&\n\t\t\t\tlen(msg.Fields()) == 1 && strings.Contains(strings.ToLower(msg.Name().String()), \"list\") {\n\t\t\t\tleaf.IsList = msg.Fields()[0].Type().IsRepeated()\n\t\t\t}\n\t\t\t// avoiding parsing constants && checking if this is a oneOf (CHOICE)\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") &&\n\t\t\t\tlen(msg.OneOfs()) > 0 && len(msg.NonOneOfFields()) == 0 { // it also excludes the case when optional items is interpreted as a oneOf\n\t\t\t\tleaf.IsChoice = true\n\t\t\t}\n\n\t\t\ttree[itemName] = leaf\n\t\t}\n\n\t\t// processing enumerators and adding to the tree\n\t\tfor _, en := range f.AllEnums() { // Constants\n\t\t\tleaf := protoItem{\n\t\t\t\tPackageName: packageName,\n\t\t\t\tProtoFilePath: lookUpProtoFilePath(dir, f.File().InputPath()),\n\t\t\t\tIsEnum: true,\n\t\t\t}\n\t\t\ttree[en.Name().String()] = leaf\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(buf, \"Obtained Protobuf tree:\\n%v\\n\", tree)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// gathering data for builder\n\t_, err = fmt.Fprintf(buf, \"Gathering data for builder package generation\\n\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, f := range targets { // Input .proto files\n\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\n\t\tpackageName := adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())\n\t\t// this package should be located in the same directory as .pb.go\n\t\tbldr := builder{\n\t\t\tPackageName: packageName,\n\t\t\tImports: \"\",\n\t\t\tInstances: make([]builderInstance, 0),\n\t\t}\n\n\t\t_, err = fmt.Fprintf(buf, \"Processing file %v\\n\", f.Name().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// iterating over messages and collecting set of OPTIONAL items\n\t\tfor _, msg := range f.AllMessages() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoiding parsing constants\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") {\n\t\t\t\t// iterating over fields of the message\n\t\t\t\tfor _, dep := range msg.Fields() {\n\t\t\t\t\t// This indicates us that we've found OPTIONAL item in the message\n\t\t\t\t\tif strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"optional\") {\n\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Hooray! Found OPTIONAL item - %v\\n\", dep.Name().String())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tinstanceName := msg.Name().String()\n\t\t\t\t\t\titemType := adjustFieldName(extractItemMessageType(dep))\n\t\t\t\t\t\t// checking if the message is of type BitString (special case)\n\t\t\t\t\t\tif strings.Contains(itemType, \"BitString\") {\n\t\t\t\t\t\t\tbldr.Imports = bldr.Imports + \"\\n\\\"github.com/onosproject/onos-lib-go/api/asn1/v1/asn1\\\"\\n\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// checking if the message is of the elementary type\n\t\t\t\t\t\telementaryType := false\n\t\t\t\t\t\telementaryType = isElementaryType(itemType)\n\n\t\t\t\t\t\titemName := adjustFieldName(composeItemName(dep.Name().String()))\n\t\t\t\t\t\tinstance := builderInstance{\n\t\t\t\t\t\t\tInstance: instanceName,\n\t\t\t\t\t\t\tFunctionName: itemName,\n\t\t\t\t\t\t\tItemName: itemName,\n\t\t\t\t\t\t\tItemType: itemType,\n\t\t\t\t\t\t\tVariableName: doLinting(strings.ToLower(itemName[:1]) + itemName[1:]),\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\tie, ok := tree[itemType]\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Couldn't find the message %v in the Protobuf tree\\n\", itemName)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// checking if the message is defined in the other Protobuf file\n\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Current package name is %v, item %v is from package %v\\n\", packageName, adjustFieldName(itemType), ie.PackageName)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif ie.fromOtherProto(packageName) && !elementaryType {\n\t\t\t\t\t\t\ttmp := instance.ItemType\n\t\t\t\t\t\t\tinstance.ItemType = ie.PackageName + \".\" + tmp\n\t\t\t\t\t\t\tif !strings.Contains(bldr.Imports, ie.PackageName) {\n\t\t\t\t\t\t\t\tbldr.Imports = bldr.Imports + \"\\n\" + ie.getImport()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// avoiding case when it's an enumerator\n\t\t\t\t\t\tif strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueLB:\") && strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\tinstance.VariableNamePtr = \"&\" + instance.VariableName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif !elementaryType {\n\t\t\t\t\t\t\t\tinstance.ItemType = \"*\" + instance.ItemType\n\t\t\t\t\t\t\t\tinstance.VariableNamePtr = instance.VariableName\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t//treating special case\n\t\t\t\t\t\t\t\tif !strings.Contains(instance.ItemType, \"[]byte\") {\n\t\t\t\t\t\t\t\t\tinstance.VariableNamePtr = \"&\" + instance.VariableName\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tinstance.VariableNamePtr = instance.VariableName\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// treating the case of the list\n\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(instance.ItemName), \"list\") && !strings.Contains(strings.ToLower(instance.ItemType), \"list\") {\n\t\t\t\t\t\t\t\tinstance.ItemType = \"[]\" + instance.ItemType\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// linting some of the fields\n\t\t\t\t\t\tinstance.doLinting()\n\t\t\t\t\t\tbldr.Instances = append(bldr.Instances, instance)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_, err = fmt.Fprintf(buf, \"We're about to start generating builder for Protobuf\\nObtained structure is %v\\n\", bldr)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// looking for a proto path to locate where to store builder file\n\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\t\t_, err = fmt.Fprintf(buf, \"Protobuf file path is %v\\nFile's Input path is %v\\n\", protoFilePath, f.File().InputPath().Dir().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// composing builder's output path to be the same as the generated Go Protobuf is located\n\t\tindex := strings.Index(protoFilePath, f.File().InputPath().Dir().String())\n\t\tif index == -1 {\n\t\t\t_, err = fmt.Fprintf(buf, \"Something went wrong in searching for the output path to store generated builder file..\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tprotoFilePath = protoFilePath[index:]\n\t\tindex = strings.Index(protoFilePath, \"/\")\n\t\tif index == -1 {\n\t\t\t_, err = fmt.Fprintf(buf, \"Something went wrong in searching for the output path to store generated builder file..\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\toutputPath := protoFilePath[index+1:]\n\t\t_, err = fmt.Fprintf(buf, \"Output file path is %v\\nFile's Input path is %v\\n\", protoFilePath, f.File().InputPath().Dir().String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t//Generating new .go file\n\t\tm.OverwriteGeneratorTemplateFile(outputPath+\"/builder.go\", templateBuilder.Lookup(\"builder.tpl\"), bldr)\n\t}\n\n\t// gathering data for pdubuilder\n\t_, err = fmt.Fprintf(buf, \"Gathering data for pdubuilder package generation\\n\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\tpdubldr := pdubuilder{\n\t\tImports: \"\",\n\t\tOneOfs: make([]oneOf, 0),\n\t\tEnums: make([]enum, 0),\n\t\tMessages: make([]message, 0),\n\t\t//Lists: make([]list, 0),\n\t}\n\tfor _, f := range targets { // Input .proto files\n\t\tm.Push(f.Name().String()).Debug(\"reporting\")\n\t\t// adding target file to import (all Protobuf files are linked to each other, so should be present in import)\n\t\t// if the dependency is not required, GoFmt() post-processor will take care of it\n\t\t// looking for a proto path here\n\t\tprotoFilePath := lookUpProtoFilePath(dir, f.File().InputPath())\n\t\tpackageName := adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())\n\t\tpdubldr.Imports = pdubldr.Imports + \"\\n\" + packageName + \" \\\"\" + protoFilePath + \"\\\"\" + \"\\n\"\n\n\t\tif len(f.AllMessages()) > 0 {\n\t\t\tpdubldr.MessagePresence = true\n\t\t}\n\t\tif len(f.AllEnums()) > 0 {\n\t\t\tpdubldr.EnumPresence = true\n\t\t}\n\n\t\tfor _, msg := range f.AllMessages() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Message name is %v\\n\", msg.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoiding parsing constants\n\t\t\tif !strings.Contains(msg.SourceCodeInfo().LeadingComments(), \"constant\") {\n\t\t\t\tif len(msg.OneOfs()) > 0 {\n\t\t\t\t\t// handling the OneOf case\n\t\t\t\t\tfor _, oneOfField := range msg.OneOfFields() {\n\t\t\t\t\t\t// avoiding OPTIONAL case\n\t\t\t\t\t\tif !strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"optional\") {\n\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"CHOICE message is %v\\n\", adjustFieldName(msg.Name().String()))\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpdubldr.OneOfPresence = true\n\t\t\t\t\t\t\titemName := adjustFieldName(composeItemName(oneOfField.Name().String()))\n\t\t\t\t\t\t\titemType := adjustFieldName(extractItemMessageType(oneOfField))\n\t\t\t\t\t\t\tms := oneOf{\n\t\t\t\t\t\t\t\tPackageName: packageName,\n\t\t\t\t\t\t\t\tFunctionName: doLinting(msg.Name().String() + itemName),\n\t\t\t\t\t\t\t\tFunctionOutputType: adjustFieldName(msg.Name().String()),\n\t\t\t\t\t\t\t\tVariableType: itemType,\n\t\t\t\t\t\t\t\tVariableName: doLinting(strings.ToLower(itemName[:1]) + itemName[1:]),\n\t\t\t\t\t\t\t\tItemName: itemName,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\t\tif isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t// treating special case (BitString) here\n\t\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(itemType), \"bitstring\") {\n\t\t\t\t\t\t\t\t\tms.VariableType = \"*\" + ms.VariableType\n\t\t\t\t\t\t\t\t\tif !strings.Contains(pdubldr.Imports, \"/asn1/v1/asn1\") {\n\t\t\t\t\t\t\t\t\t\tpdubldr.Imports = pdubldr.Imports + \"\\n\\\"github.com/onosproject/onos-lib-go/api/asn1/v1/asn1\\\"\\n\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tie, ok := tree[itemType]\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Couldn't find the message %v in the Protobuf tree\\n\", itemName)\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t// checking if the message is defined in the other Protobuf file\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Current package name is %v, item %v is from package %v\\n\", packageName, adjustFieldName(itemType), ie.PackageName)\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tif ie.fromOtherProto(adjustPackageName(adjustProtoFileName(extractProtoFileName(f.Name().Split()[0])), f.File().InputPath().Dir().String())) && !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\tif ie.IsEnum {\n\t\t\t\t\t\t\t\t\t\t\tms.VariableType = ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tif strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueLB:\") && strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\t\t\t\t\t\tms.VariableType = ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\t\tms.VariableType = \"*\" + ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} else if !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\tif strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueLB:\") && strings.Contains(oneOfField.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\t\t\t\t\tms.VariableType = packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tms.VariableType = \"*\" + packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif oneOfField.Type().IsRepeated() {\n\t\t\t\t\t\t\t\t\tms.VariableType = \"[]\" + ms.VariableType\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpdubldr.OneOfs = append(pdubldr.OneOfs, ms)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tms := message{\n\t\t\t\t\t\tPackageName: packageName,\n\t\t\t\t\t\tFunctionName: doLinting(msg.Name().String()),\n\t\t\t\t\t\tFunctionOutputType: adjustFieldName(msg.Name().String()),\n\t\t\t\t\t\tItems: make([]item, 0),\n\t\t\t\t\t}\n\t\t\t\t\tif len(msg.Fields()) == 1 {\n\t\t\t\t\t\tms.SingleItem = true\n\t\t\t\t\t}\n\t\t\t\t\t// iterating over fields of the message\n\t\t\t\t\tfor _, dep := range msg.Fields() {\n\t\t\t\t\t\t// don't want to include optional items in the message\n\t\t\t\t\t\tif !strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"optional\") {\n\t\t\t\t\t\t\titemName := adjustFieldName(composeItemName(dep.Name().String()))\n\t\t\t\t\t\t\titemType := adjustFieldName(extractItemMessageType(dep))\n\n\t\t\t\t\t\t\ti := item{\n\t\t\t\t\t\t\t\tVariableName: doLinting(strings.ToLower(itemName[:1]) + itemName[1:]),\n\t\t\t\t\t\t\t\tFieldName: itemName,\n\t\t\t\t\t\t\t\tItemType: itemType,\n\t\t\t\t\t\t\t\tIsChoice: false,\n\t\t\t\t\t\t\t\tIsEnum: false,\n\t\t\t\t\t\t\t\tIsList: false,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\t\tif isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t// treating special case (BitString) here\n\t\t\t\t\t\t\t\tif strings.Contains(strings.ToLower(itemType), \"bitstring\") {\n\t\t\t\t\t\t\t\t\ti.ItemType = \"*\" + i.ItemType\n\t\t\t\t\t\t\t\t\tif !strings.Contains(pdubldr.Imports, \"/asn1/v1/asn1\") {\n\t\t\t\t\t\t\t\t\t\tpdubldr.Imports = pdubldr.Imports + \"\\n\\\"github.com/onosproject/onos-lib-go/api/asn1/v1/asn1\\\"\\n\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// getting information about the item from the own Protobuf tree\n\t\t\t\t\t\t\t\tie, ok := tree[itemType]\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Couldn't find the message %v in the Protobuf tree\\n\", itemName)\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t// checking if the message is defined in the other Protobuf file\n\t\t\t\t\t\t\t\t\t_, err = fmt.Fprintf(buf, \"Current package name is %v, item %v is from package %v. It is of elementary type %v and from other proto %v\\n\",\n\t\t\t\t\t\t\t\t\t\tpackageName, adjustFieldName(itemType), ie.PackageName, isElementaryType(itemType), ie.fromOtherProto(packageName))\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t// avoiding the case when it's an enumerator\n\t\t\t\t\t\t\t\t\tif !isElementaryType(itemType) && !strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueLB:\") && !strings.Contains(dep.SourceCodeInfo().LeadingComments(), \"valueUB:\") {\n\t\t\t\t\t\t\t\t\t\tif ie.fromOtherProto(packageName) && !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = \"*\" + ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else if !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = \"*\" + packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tif ie.fromOtherProto(packageName) && !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = ie.PackageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t} else if !isElementaryType(itemType) {\n\t\t\t\t\t\t\t\t\t\t\ti.ItemType = packageName + \".\" + itemType\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tif dep.Type().IsRepeated() {\n\t\t\t\t\t\t\t\t\t\ti.ItemType = \"[]\" + i.ItemType\n\t\t\t\t\t\t\t\t\t\ti.IsList = true\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tif ie.IsChoice {\n\t\t\t\t\t\t\t\t\t\ti.IsChoice = true\n\t\t\t\t\t\t\t\t\t} else if ie.IsList {\n\t\t\t\t\t\t\t\t\t\ti.IsList = true\n\t\t\t\t\t\t\t\t\t} else if ie.IsEnum {\n\t\t\t\t\t\t\t\t\t\ti.IsEnum = true\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// treating the special case when it is a list\n\t\t\t\t\t\t\tif ms.SingleItem && dep.Type().IsRepeated() && !strings.Contains(i.ItemType, \"[]\") {\n\t\t\t\t\t\t\t\ti.ItemType = \"[]\" + i.ItemType\n\t\t\t\t\t\t\t\ti.IsList = true\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tms.Items = append(ms.Items, i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tpdubldr.Messages = append(pdubldr.Messages, ms)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, en := range f.AllEnums() {\n\t\t\t_, err = fmt.Fprintf(buf, \"Enumerator is %v\\n\", en.Name().String())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ie := range en.Values() {\n\t\t\t\t_, err = fmt.Fprintf(buf, \"Enumerator value is %v\\n\", composeItemName(strings.ToLower(ie.Name().String())))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\te := enum{\n\t\t\t\t\tPackageName: packageName,\n\t\t\t\t\tFunctionName: composeItemName(strings.ToLower(ie.Name().String())),\n\t\t\t\t\tFunctionOutputType: packageName + \".\" + adjustFieldName(en.Name().String()),\n\t\t\t\t\tItem: adjustFieldName(en.Name().String()) + \"_\" + ie.Name().String(),\n\t\t\t\t}\n\t\t\t\tpdubldr.Enums = append(pdubldr.Enums, e)\n\t\t\t}\n\t\t}\n\t}\n\n\t_, err = fmt.Fprintf(buf, \"We're about to start generating encoder package\\nObtained structure is %v\\n\", enc)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t//printing encoder package\n\tfor _, e := range enc {\n\t\t_, err = fmt.Fprintf(buf, \"Generating template for %v with name %v\\n\", e.MessageName, e.MessageNameInLogging)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t//Generating new .go file\n\t\tm.OverwriteGeneratorTemplateFile(\"encoder/\"+e.MessageNameInLogging+\".go\", templateEncoder.Lookup(\"encoder.tpl\"), e)\n\t}\n\n\t// printing pdubuilder package\n\t//Generating new .go file\n\tm.OverwriteGeneratorTemplateFile(\"pdubuilder/pdubuilder.go\", templatePdubuilder.Lookup(\"pdubuilder.tpl\"), pdubldr)\n\n\tif sm {\n\t\t_, err = fmt.Fprintf(buf, \"We're about to start generating servicemodel package\\nObtained structure is %v\\n\", smodel)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\t//Generating new .go file\n\t\tm.OverwriteGeneratorTemplateFile(\"servicemodel/servicemodel.go\", templateServicemodel.Lookup(\"servicemodel.tpl\"), smodel)\n\t}\n\n\tout := m.OutputPath()\n\t_, err = fmt.Fprintf(buf, \"Output path is\\n%v\\n\", out)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tm.OverwriteCustomFile(\n\t\t\"/tmp/report.txt\",\n\t\tbuf.String(),\n\t\t0644,\n\t)\n\n\treturn m.Artifacts()\n}",
"func installFileWatcher(ctx *gcp.Context) error {\n\twxl, err := ctx.Layer(watchexecLayer, gcp.CacheLayer, gcp.LaunchLayer)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating %v layer: %w\", watchexecLayer, err)\n\t}\n\n\t// Check metadata layer to see if correct version of watchexec is already installed.\n\tmetaWatchexecVersion := ctx.GetMetadata(wxl, versionKey)\n\tif metaWatchexecVersion == watchexecVersion {\n\t\tctx.CacheHit(watchexecLayer)\n\t} else {\n\t\tctx.CacheMiss(watchexecLayer)\n\t\t// Clear layer data to avoid files from multiple versions of watchexec.\n\t\tif err := ctx.ClearLayer(wxl); err != nil {\n\t\t\treturn fmt.Errorf(\"clearing layer %q: %w\", wxl.Name, err)\n\t\t}\n\n\t\tbinDir := filepath.Join(wxl.Path, \"bin\")\n\t\tif err := ctx.MkdirAll(binDir, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Download and install watchexec in layer.\n\t\tctx.Logf(\"Installing watchexec v%s\", watchexecVersion)\n\t\tarchiveURL := fmt.Sprintf(watchexecURL, watchexecVersion)\n\t\tcommand := fmt.Sprintf(\"curl --fail --show-error --silent --location --retry 3 %s | tar xJ --directory %s --strip-components=1 --wildcards \\\"*watchexec\\\"\", archiveURL, binDir)\n\t\tif _, err := ctx.Exec([]string{\"bash\", \"-c\", command}, gcp.WithUserAttribution); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.SetMetadata(wxl, versionKey, watchexecVersion)\n\t}\n\treturn nil\n}",
"func DownloadPackage(installPackageURI string) (string, error) {\n\tlog.Infoln(\"downloadPackage ENTER\")\n\tlog.Infoln(\"installPackageURI=\", installPackageURI)\n\n\tpath, err := util.GetFullPath()\n\tif err != nil {\n\t\tlog.Errorln(\"GetFullPath Failed:\", err)\n\t\tlog.Infoln(\"downloadPackage LEAVE\")\n\t\treturn \"\", err\n\t}\n\n\tfilename := util.GetFilenameFromURIOrFullPath(installPackageURI)\n\tlog.Infoln(\"Filename:\", filename)\n\n\tfullpath := util.AppendSlash(path) + filename\n\tlog.Infoln(\"Fullpath:\", fullpath)\n\n\t//create a downloaded file\n\toutput, err := os.Create(fullpath)\n\tif err != nil {\n\t\tlog.Errorln(\"Create File Failed:\", err)\n\t\tlog.Infoln(\"downloadPackage LEAVE\")\n\t\treturn \"\", err\n\t}\n\n\t//get the \"executor\" file\n\tresp, err := http.Get(installPackageURI)\n\tif err != nil {\n\t\tlog.Errorln(\"HTTP GET Failed:\", err)\n\t\tlog.Infoln(\"downloadPackage LEAVE\")\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\t_, err = io.Copy(output, resp.Body)\n\tif err != nil {\n\t\tlog.Errorln(\"IO Copy Failed:\", err)\n\t\tlog.Infoln(\"downloadPackage LEAVE\")\n\t\treturn \"\", err\n\t}\n\toutput.Close()\n\n\tlog.Infoln(\"downloadPackage Succeeded:\", fullpath)\n\tlog.Infoln(\"downloadPackage LEAVE\")\n\treturn fullpath, nil\n}",
"func (y *YumManager) InstallPackage(pkg string) (string, error) {\n\tif _, isInstalled := y.InstalledVersion(pkg); isInstalled {\n\t\treturn \"already installed\", nil\n\t}\n\tres, err := y.Sys.Run(fmt.Sprintf(\"yum install -y %s\", pkg))\n\treturn string(res), err\n}",
"func (f BuildTemplate) Install() error {\n\treturn pkg.Kubectl(f.Data.(string), os.Stdout)\n}",
"func (t *BinTool) Install() error {\n\tif t.goInstall {\n\t\treturn t.installGo()\n\t}\n\treturn t.installBinary()\n}",
"func (s Service) Install() error {\n\tprefix := \"messagedb/database/\"\n\tloadFiles := []string{\n\n\t\t// Main install\n\t\t\"roles/message-store.sql\",\n\t\t\"schema/message-store.sql\",\n\t\t\"extensions/pgcrypto.sql\",\n\t\t\"tables/messages.sql\",\n\n\t\t// Functions\n\t\t\"types/message.sql\",\n\t\t\"functions/message-store-version.sql\",\n\t\t\"functions/hash-64.sql\",\n\t\t\"functions/acquire-lock.sql\",\n\t\t\"functions/category.sql\",\n\t\t\"functions/is-category.sql\",\n\t\t\"functions/id.sql\",\n\t\t\"functions/cardinal-id.sql\",\n\t\t\"functions/stream-version.sql\",\n\t\t\"functions/write-message.sql\",\n\t\t\"functions/get-stream-messages.sql\",\n\t\t\"functions/get-category-messages.sql\",\n\t\t\"functions/get-last-stream-message.sql\",\n\n\t\t// Indexes\n\t\t\"indexes/messages-id.sql\",\n\t\t\"indexes/messages-stream.sql\",\n\t\t\"indexes/messages-category.sql\",\n\n\t\t// Privileges\n\t\t\"privileges/schema.sql\",\n\t\t\"privileges/table.sql\",\n\t\t\"privileges/sequence.sql\",\n\t\t\"privileges/functions.sql\",\n\t\t\"privileges/views.sql\",\n\n\t\t// Views\n\t\t\"views/stream-summary.sql\",\n\t\t\"views/type-summary.sql\",\n\t\t\"views/stream-type-summary.sql\",\n\t\t\"views/type-stream-summary.sql\",\n\t\t\"views/category-type-summary.sql\",\n\t\t\"views/type-category-summary.sql\",\n\t}\n\n\tfor _, file := range loadFiles {\n\t\t_, err := sqlx.LoadFile(s.db, prefix+file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Install(ctx context.Context, client *http.Client, store *storage.Service, name string) error {\n\tsklog.Infof(\"Installing: %s\", name)\n\tobj, err := store.Objects.Get(bucketName, \"debs/\"+name).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to retrieve Google Storage metadata about debian package: %s\", err)\n\t}\n\treq, err := gcs.RequestForStorageURL(obj.MediaLink)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to construct request object for media: %s\", err)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to retrieve packages file: %s\", err)\n\t}\n\tdefer util.Close(resp.Body)\n\tf, err := ioutil.TempFile(\"\", \"skia-pull\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create tmp file: %s\", err)\n\t}\n\t_, copyErr := io.Copy(f, resp.Body)\n\tif err := f.Close(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to close temporary file: %v\", err)\n\t}\n\tif copyErr != nil {\n\t\treturn fmt.Errorf(\"Failed to download file: %s\", copyErr)\n\t}\n\n\tif err := installDependencies(ctx, f.Name()); err != nil {\n\t\treturn fmt.Errorf(\"Error installing dependencies: %s\", err)\n\t}\n\n\tcmd := exec.Command(\"sudo\", \"dpkg\", \"-i\", f.Name())\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\tif err := cmd.Run(); err != nil {\n\t\tsklog.Errorf(\"Install package stdout: %s\", out.String())\n\t\treturn fmt.Errorf(\"Failed to install package: %s\", err)\n\t}\n\tsklog.Infof(\"Install package stdout: %s\", out.String())\n\treturn nil\n}",
"func (h *Habitat) install() (err error) {\n\tpkgInstallArgs := []string{\"pkg\", \"install\", h.Spec.Package}\n\n\treturn execCommand(habPath, pkgInstallArgs)\n}",
"func initPackage(file string, args []string,\n\tin io.Reader, out io.Writer) error {\n\n\tvar p pack.Pack\n\tvar err error\n\ts := bufio.NewScanner(in)\n\n\tvar wd string\n\twd, err = os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = os.Stat(file)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(out, \"Creating initial packfile...\")\n\n\t// Get package name\n\tp.Name = filepath.Base(wd)\n\tgetInput(s, out, \"Name\", &p.Name)\n\n\t// Get import path\n\tp.ImportPath, err = getImportPath(wd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgetInput(s, out, \"Import Path\", &p.ImportPath)\n\n\t// Misc Details\n\tgetInput(s, out, \"Summary\", &p.Summary)\n\tgetInput(s, out, \"Description\", &p.Description)\n\tgetInput(s, out, \"Homepage\", &p.Homepage)\n\tp.License = \"MIT\"\n\tgetInput(s, out, \"License\", &p.License)\n\n\terr = p.WritePackFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *srkServer) Package(chunks srkproto.FunctionService_PackageServer) error {\n\n\tmeta, ok := metadata.FromIncomingContext(chunks.Context())\n\tif !ok {\n\t\treturn errors.New(\"Failed to parse metadata\")\n\t}\n\n\trawName, ok := meta[\"name\"]\n\tif !ok {\n\t\treturn errors.New(\"Metadata option \\\"name\\\" is required\")\n\t}\n\tname := rawName[0]\n\n\tincludes := meta[\"includes\"]\n\n\t// Unpack the uploaded file to a temporary location\n\ttdir, err := ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(tdir)\n\n\tfuncReader := &pbReader{chunks: chunks}\n\t_, err = srk.UntarStream(funcReader, tdir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not unpack received tar file\")\n\t}\n\n\t// Package the function\n\trawDir := s.mgr.GetRawPath(name)\n\n\tif err := s.mgr.CreateRaw(tdir, name, includes, nil); err != nil {\n\t\treturn errors.Wrap(err, \"Packaging function failed\")\n\t}\n\ts.mgr.Logger.Info(\"Created raw function: \" + rawDir)\n\n\tpkgPath, err := s.mgr.Provider.Faas.Package(rawDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Packaing failed\")\n\t}\n\ts.mgr.Logger.Info(\"Package created at: \" + pkgPath)\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewSpaceRepo returns a new SpaceRepo which internally uses the specified coreconfig.Repository and net.Gateway.
|
func NewSpaceRepo(config coreconfig.Repository, gateway net.Gateway) (repo SpaceRepo) {
repo.config = config
repo.gateway = gateway
return
}
|
[
"func (b *Backend) NewRepo(p core.NewRepoP) core.NewRepoR {\n\tfields := make(map[string]string)\n\tfields[\"namespace\"] = p.Namespace\n\tfields[\"storage-limit\"] = string(p.StorageLimit)\n\n\tfor k, v := range p.Labels {\n\t\tkey := join(\"label\", k)\n\t\tfields[key] = v\n\t}\n\n\tfn := func(tx *buntdb.Tx) error {\n\t\tfor k, v := range fields {\n\t\t\tfullKey := join(\"repo\", p.Name, k)\n\t\t\ttx.Set(fullKey, v, nil)\n\t\t}\n\t\treturn nil\n\t}\n\n\tb.db.Update(fn)\n\treturn core.NewRepoR{Error: nil}\n}",
"func New(owner, repo string) Interface {\n\treturn NewWithHost(owner, repo, glinstance.OverridableDefault())\n}",
"func New(cfg *config.Config, log logger.Logger) (Repository, error) {\n\t// create new in-memory cache bridge\n\tcaBridge, dbBridge, rpcBridge, err := connect(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"repository init failed\")\n\t\treturn nil, err\n\t}\n\n\t// construct the proxy instance\n\tp := proxy{\n\t\tcache: caBridge,\n\t\tdb: dbBridge,\n\t\trpc: rpcBridge,\n\t\tlog: log,\n\t\tcfg: cfg,\n\n\t\tgovContracts: governanceContractsMap(&cfg.Governance),\n\n\t\t// keep reference to the SOL compiler\n\t\tsolCompiler: cfg.Compiler.DefaultSolCompilerPath,\n\n\t\t// keep the ballot sources ref\n\t\tballotSources: cfg.Voting.Sources,\n\t}\n\n\t// make the service orchestrator and start it's job\n\tp.orc = newOrchestrator(&p, log, &cfg.Repository)\n\tp.orc.run()\n\n\t// return the proxy\n\treturn &p, nil\n}",
"func newDBRepository(db storage.Repository) repository {\n\treturn &repo{db: db}\n}",
"func New() Repository {\n\treturn Repository{}\n}",
"func NewRepo(cryptoService signed.CryptoService) *Repo {\n\treturn &Repo{\n\t\tTargets: make(map[data.RoleName]*data.SignedTargets),\n\t\tcryptoService: cryptoService,\n\t}\n}",
"func NewRepository() Repository {\n\n\tclient := redis.NewClient(&redis.Options{\n\t\tAddr: viper.GetString(\"redis.address\"),\n\t\tPassword: viper.GetString(\"redis.password\"),\n\t\tDB: viper.GetInt(\"redis.db\"),\n\t})\n\n\treturn &db{client: client}\n}",
"func newGroupRepo() repo.Group {\n\treturn &groupRepo{}\n}",
"func newRepo(m *repoManager) (*repoT, dvid.VersionID, error) {\n\trepoID, err := m.NewRepoID()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tuuid, versionID, err := m.NewUUID()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tt := time.Now()\n\trepo := &repoT{\n\t\trepoID: repoID,\n\t\trootID: uuid,\n\t\tlog: []string{},\n\t\tproperties: make(map[string]interface{}),\n\t\tdata: make(map[dvid.DataString]DataService),\n\t\tmanager: m,\n\t\tcreated: t,\n\t\tupdated: t,\n\t}\n\trepo.dag = repo.newDAG(uuid, versionID)\n\n\tm.repos[uuid] = repo\n\tm.repoToUUID[repoID] = uuid\n\n\treturn repo, versionID, err\n}",
"func NewRepository(conv EntityConverter) *repository {\n\treturn &repository{\n\t\tconditionTreeLister: repo.NewConditionTreeListerGlobal(tableName, tableColumns),\n\t\tlister: repo.NewListerGlobal(resource.FormationConstraint, tableName, tableColumns),\n\t\tcreator: repo.NewCreatorGlobal(resource.FormationConstraint, tableName, tableColumns),\n\t\tsingleGetter: repo.NewSingleGetterGlobal(resource.FormationConstraint, tableName, tableColumns),\n\t\tdeleter: repo.NewDeleterGlobal(resource.FormationConstraint, tableName),\n\t\tupdater: repo.NewUpdaterGlobal(resource.FormationConstraint, tableName, updatableColumns, idColumns),\n\t\tconv: conv,\n\t}\n}",
"func NewRepoWorkSpace(dir string) (*RepoWorkSpace, error) {\n\tvar (\n\t\terr error\n\t)\n\n\ttopDir, err := path.FindTopDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newRepoWorkSpace(topDir, \"\")\n}",
"func NewRepository(awsSession *session.Session, stage string) Repository {\n\treturn Repository{\n\t\tstage: stage,\n\t\tdynamoDBClient: dynamodb.New(awsSession),\n\t\tgithubOrgTableName: fmt.Sprintf(\"cla-%s-github-orgs\", stage),\n\t}\n}",
"func NewRepository(settings *Settings) (*Repository, error) {\n\trepo := connect(settings)\n\n\trepo.jobsCollection = repo.db.Collection(settings.JobsCollectionName)\n\trepo.profileCollection = repo.db.Collection(settings.ProfileCollectionName)\n\trepo.companiesCollection = repo.db.Collection(settings.CompaniesCollectionName)\n\trepo.jobReportsCollection = repo.db.Collection(settings.JobReportsCollectionName)\n\trepo.candidateReportsCollection = repo.db.Collection(settings.CandidateReportsCollectionName)\n\trepo.jobFiltersCollection = repo.db.Collection(settings.JobFiltersCollectionName)\n\trepo.candidateFiltersCollection = repo.db.Collection(settings.CandidateFiltersCollectionName)\n\trepo.pricesCollection = repo.db.Collection(settings.PricesCollectionCollectionName)\n\n\treturn repo, nil\n}",
"func New(name string, users, readOnlyUsers []string, isPublic bool) (*Repository, error) {\n\tlog.Debugf(\"Creating repository %q\", name)\n\tr := &Repository{Name: name, Users: users, ReadOnlyUsers: readOnlyUsers, IsPublic: isPublic}\n\tif v, err := r.isValid(); !v {\n\t\tlog.Errorf(\"repository.New: Invalid repository %q: %s\", name, err)\n\t\treturn r, err\n\t}\n\tif err := newBare(name); err != nil {\n\t\tlog.Errorf(\"repository.New: Error creating bare repository for %q: %s\", name, err)\n\t\treturn r, err\n\t}\n\tbarePath := barePath(name)\n\tif barePath != \"\" && isPublic {\n\t\tioutil.WriteFile(barePath+\"/git-daemon-export-ok\", []byte(\"\"), 0644)\n\t\tif f, err := fs.Filesystem().Create(barePath + \"/git-daemon-export-ok\"); err == nil {\n\t\t\tf.Close()\n\t\t}\n\t}\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\terr = conn.Repository().Insert(&r)\n\tif mgo.IsDup(err) {\n\t\tlog.Errorf(\"repository.New: Duplicate repository %q\", name)\n\t\treturn r, fmt.Errorf(\"A repository with this name already exists.\")\n\t}\n\treturn r, err\n}",
"func NewRepository(server string) (repository.Repository, error) {\n\ttravis := os.Getenv(\"TRAVIS\")\n\tif travis == \"true\" {\n\t\terr := fmt.Errorf(\"cannot use MongoDB in Travis CI due to gopkg.in/mgo.v2 issue #218\")\n\t\tlog.Println(err.Error())\n\t\treturn nil, err\n\t}\n\n\tu, err := url.Parse(server)\n\tif err != nil {\n\t\terr2 := fmt.Errorf(\"cannot parse url '%s': %s\\n\", server, err)\n\t\tlog.Println(err2.Error())\n\t\treturn nil, err2\n\t}\n\n\tu2 := &url.URL{Scheme: \"mongodb\", User: u.User, Host: u.Host}\n\tserver = u2.String()\n\n\tsession, err := mgo.Dial(server)\n\tif err != nil {\n\t\terr2 := fmt.Errorf(\"cannot connect to MongoDB at %s: %s\\n\", server, err)\n\t\tlog.Println(err2.Error())\n\t\treturn nil, err2\n\t}\n\n\tsession.SetMode(mgo.Strong, false)\n\tsession.SetSafe(&mgo.Safe{WMode: \"majority\"})\n\tdatabase := session.DB(DatabaseName)\n\tdeployments, err := createCollection(database, DeploymentsCollectionName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmanifests, err := createCollection(database, ManifestsCollectionName,\n\t\t[][]string{{\"manifest.deployment\"}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstances, err := createCollection(database, InstancesCollectionName,\n\t\t[][]string{{\"typeinstance.type\"}, {\"typeinstance.deployment\"}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpr := &pRepository{\n\t\tSession: session,\n\t\tDeployments: deployments,\n\t\tManifests: manifests,\n\t\tInstances: instances,\n\t}\n\n\treturn pr, nil\n}",
"func New() *repository {\n\treturn &repository{}\n}",
"func (af *AftoRepo) newRepo() {\n\tvar body string\n\taf.checkReqs()\n\tlog.Println(\"generating repo: \\\"\" + af.Name + \"\\\"\")\n\tos.Mkdir(af.Name, 0755)\n\t// Execute dpkg script.\n\tdirerr := af.executeDpkgScript()\n\tif direrr != nil {\n\t\tlog.Fatalln(direrr)\n\t}\n\tlog.Println(\"generated Packages file.\")\n\t// Execute bzip command.\n\tbzerr := afutil.BzipPackages()\n\tif bzerr != nil {\n\t\tlog.Fatalln(bzerr)\n\t}\n\tlog.Println(\"bzipped Packages file.\")\n\t// Create Release file.\n\trfile, rfilerr := afutil.ReleaseFile(\"afto beta repo\", \"apt.afto.repo\", \"A default repo generated by afto\", \"afto\", \"beta\")\n\tif rfilerr != nil {\n\t\tlog.Fatalln(rfilerr)\n\t}\n\trf, rferr := os.Create(af.Name + \"/Release\")\n\tif rferr != nil {\n\t\tlog.Fatalln(rferr)\n\t}\n\trf.WriteString(rfile)\n\tlog.Println(\"created Release file.\")\n\n\thtmlFile, hterr := os.Create(\"index.html\")\n\tif hterr != nil {\n\t\tlog.Println(hterr)\n\t}\n\n\t// Restore the icons too.\n\tcyiconerr := RestoreAsset(\".\", \"CydiaIcon.png\")\n\tif cyiconerr != nil {\n\t\tlog.Fatalln(cyiconerr)\n\t}\n\tcyicon2err := RestoreAsset(\".\", \"[email protected]\")\n\tif cyicon2err != nil {\n\t\tlog.Fatalln(cyicon2err)\n\t}\n\tcyicon3err := RestoreAsset(\".\", \"[email protected]\")\n\tif cyicon3err != nil {\n\t\tlog.Fatalln(cyicon3err)\n\t}\n\n\t// Move files to repo & generate HTML file.\n\tos.Rename(\"Packages\", af.Name+\"/Packages\")\n\tos.Rename(\"Packages.bz2\", af.Name+\"/Packages.bz2\")\n\tos.Rename(\"CydiaIcon.png\", af.Name+\"/CydiaIcon.png\")\n\tos.Rename(\"[email protected]\", af.Name+\"/[email protected]\")\n\tos.Rename(\"[email protected]\", af.Name+\"/[email protected]\")\n\tfor _, deb := range af.Debs {\n\t\tos.Rename(deb, af.Name+\"/\"+deb)\n\t\tbody += fmt.Sprintln(`<pre><a href=\"` + deb + `\">` + deb + `</a></pre>`)\n\t}\n\thtmlFile.WriteString(header + body + footer)\n\tos.Rename(\"index.html\", af.Name+\"/index.html\")\n}",
"func NewRepository(variables templateUtils.TemplateVariables) Template {\n\trawTemplate, err := template.New(\"repository\").Parse(RepositoryTemplate)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn New(resource.New(geography.RepositoryDir, variables.Entity.EntityFU()+\"Repository.php\"), rawTemplate, variables)\n}",
"func NewRepo(db *sql.DB, logger log.Logger) Repository {\n\t// Apparently if you implement the methods defined on an interface on the\n\t// underlying value (this case a struct), you can return an instance of that\n\t// interface like this (here returning a pointer to the struct) where the return\n\t// value specified on the func is the Interface but the function returns a pointer\n\t// to the underlying implementation of the struct... cool!\n\treturn &repo{\n\t\tdb: db,\n\t\tlogger: log.With(logger, \"repo\", \"sql\"),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ListSpaces lists all spaces in the organisation. For each space, it calls the provided callback function.
|
func (sr SpaceRepo) ListSpaces(callback func(models.Space) bool) error {
return sr.gateway.ListPaginatedResources(
sr.config.APIEndpoint(),
"/v2/spaces",
resources.SpaceResource{},
func(resource interface{}) bool {
return callback(resource.(resources.SpaceResource).ToModel())
})
}
|
[
"func (s *SpacesService) List() ([]*Space, *http.Response, error) {\n\tvar spaces []*Space\n\treq, err := s.client.NewRequest(\"GET\", \"spaces\", nil)\n\n\tif err != nil {\n\t\treturn spaces, nil, err\n\t}\n\n\tvar spacesResponse SpacesResponse\n\tresp, err := s.client.Do(req, &spacesResponse)\n\n\tif err != nil {\n\t\treturn spaces, resp, err\n\t}\n\n\tspaces = spacesResponse.Spaces\n\treturn spaces, resp, nil\n}",
"func ListSpaces() ([]*Space, error) {\n\tbkmConf := config.GlobalConf.Bkmonitor\n\t// 使用网关访问\n\treqURL := fmt.Sprintf(\"%s%s\", bkmConf.GatewayHost, listSpacesPath)\n\tspaces := make([]*Space, 0)\n\tvar page, pageSize = 1, 1000\n\tfor {\n\t\treq := gorequest.New().Get(reqURL)\n\t\treq.QueryData.Set(\"space_type_id\", \"bkci\")\n\t\treq.QueryData.Set(\"page\", strconv.Itoa(page))\n\t\treq.QueryData.Set(\"page_size\", strconv.Itoa(pageSize))\n\t\t// 请求API\n\t\tproxy := \"\"\n\t\tbody, err := component.Request(*req, timeout, proxy, getAuthHeader())\n\t\tif err != nil {\n\t\t\tlogging.Error(\"request list bkmonitor bcs spaces failed, %s\", err.Error())\n\t\t\treturn nil, errorx.NewRequestBkMonitorErr(err.Error())\n\t\t}\n\t\t// 解析返回的body\n\t\tresp := &ListSpacesResp{}\n\t\tif err := json.Unmarshal([]byte(body), resp); err != nil {\n\t\t\tlogging.Error(\"parse bkmonitor body error, body: %v\", body)\n\t\t\treturn nil, err\n\t\t}\n\t\tif resp.Code != 200 {\n\t\t\tlogging.Error(\"request list bkmonitor spaces failed, msg: %s\", resp.Message)\n\t\t\treturn nil, errors.New(resp.Message)\n\t\t}\n\t\tfor _, space := range resp.Data.List {\n\t\t\tif space.IsBcsValid {\n\t\t\t\tspaces = append(spaces, space)\n\t\t\t}\n\t\t}\n\t\tif resp.Data.Count <= page*pageSize {\n\t\t\tbreak\n\t\t}\n\t\tpage++\n\t}\n\n\treturn spaces, nil\n}",
"func (s *SpacesService) List(opt *SpaceListOptions) ([]Space, *http.Response, error) {\n\tu, err := urlWithOptions(\"spaces\", opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar spacesResp listSpacesResponse\n\tresp, err := s.client.Do(req, &spacesResp)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn spacesResp.Spaces, resp, nil\n}",
"func (k *Kf) Spaces(ctx context.Context) []string {\n\tLogf(k.t, \"listing spaces...\")\n\tdefer Logf(k.t, \"done listing spaces.\")\n\tk.t.Helper()\n\toutput, errs := k.kf(ctx, k.t, KfTestConfig{\n\t\tArgs: []string{\n\t\t\t\"spaces\",\n\t\t},\n\t})\n\tPanicOnError(ctx, k.t, \"spaces\", errs)\n\treturn CombineOutputStr(ctx, k.t, output)\n}",
"func (c *Cache) ListSpace(ctx context.Context, storageID, spaceID string) (*Shares, error) {\n\tctx, span := tracer.Start(ctx, \"ListSpace\")\n\tdefer span.End()\n\tspan.SetAttributes(attribute.String(\"cs3.storageid\", storageID), attribute.String(\"cs3.spaceid\", spaceID))\n\n\tunlock := c.LockSpace(spaceID)\n\tdefer unlock()\n\tspan.AddEvent(\"got lock\")\n\n\t// sync cache, maybe our data is outdated\n\terr := c.syncWithLock(ctx, storageID, spaceID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspaces, ok := c.Providers.Load(storageID)\n\tif !ok {\n\t\treturn &Shares{}, nil\n\t}\n\n\tspace, ok := spaces.Spaces.Load(spaceID)\n\tif !ok {\n\t\treturn &Shares{}, nil\n\t}\n\n\tshares := &Shares{\n\t\tShares: maps.Clone(space.Shares),\n\t\tEtag: space.Etag,\n\t}\n\treturn shares, nil\n}",
"func (c *SearchController) Spaces(ctx *app.SpacesSearchContext) error {\n\tq := ctx.Q\n\tif q == \"\" {\n\t\treturn jsonapi.JSONErrorResponse(ctx, goa.ErrBadRequest(\"empty search query not allowed\"))\n\t} else if q == \"*\" {\n\t\tq = \"\" // Allow empty query if * specified\n\t}\n\n\tvar result []space.Space\n\tvar count int\n\tvar err error\n\n\toffset, limit := computePagingLimits(ctx.PageOffset, ctx.PageLimit)\n\n\treturn application.Transactional(c.db, func(appl application.Application) error {\n\t\tvar resultCount uint64\n\t\tresult, resultCount, err = appl.Spaces().Search(ctx, &q, &offset, &limit)\n\t\tcount = int(resultCount)\n\t\tif err != nil {\n\t\t\tcause := errs.Cause(err)\n\t\t\tswitch cause.(type) {\n\t\t\tcase errors.BadParameterError:\n\t\t\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\t\t\"query\": q,\n\t\t\t\t\t\"offset\": offset,\n\t\t\t\t\t\"limit\": limit,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}, \"unable to list spaces\")\n\t\t\t\treturn jsonapi.JSONErrorResponse(ctx, goa.ErrBadRequest(fmt.Sprintf(\"error listing spaces for expression: %s: %s\", q, err)))\n\t\t\tdefault:\n\t\t\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\t\t\"query\": q,\n\t\t\t\t\t\"offset\": offset,\n\t\t\t\t\t\"limit\": limit,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}, \"unable to list spaces\")\n\t\t\t\treturn jsonapi.JSONErrorResponse(ctx, goa.ErrInternal(fmt.Sprintf(\"unable to list spaces for expression: %s: %s\", q, err)))\n\t\t\t}\n\t\t}\n\n\t\tspaceData, err := ConvertSpacesFromModel(ctx.Context, c.db, ctx.Request, result)\n\t\tif err != nil {\n\t\t\treturn jsonapi.JSONErrorResponse(ctx, err)\n\t\t}\n\t\tresponse := app.SearchSpaceList{\n\t\t\tLinks: &app.PagingLinks{},\n\t\t\tMeta: &app.SpaceListMeta{TotalCount: count},\n\t\t\tData: spaceData,\n\t\t}\n\t\tsetPagingLinks(response.Links, buildAbsoluteURL(ctx.Request), len(result), offset, limit, count, \"q=\"+q)\n\n\t\treturn ctx.OK(&response)\n\t})\n}",
"func (c *SecurityGroupClient) ListStagingForSpace(ctx context.Context, spaceGUID string, opts *SecurityGroupSpaceListOptions) ([]*resource.SecurityGroup, *Pager, error) {\n\tif opts == nil {\n\t\topts = NewSecurityGroupSpaceListOptions()\n\t}\n\tvar res resource.SecurityGroupList\n\terr := c.client.get(ctx, path.Format(\"/v3/spaces/%s/staging_security_groups?%s\", spaceGUID, opts.ToQueryString()), &res)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpager := NewPager(res.Pagination)\n\treturn res.Resources, pager, nil\n}",
"func (a *Client) ListAllTeamsSpaces(params *ListAllTeamsSpacesParams, authInfo runtime.ClientAuthInfoWriter) (*ListAllTeamsSpacesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListAllTeamsSpacesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listAllTeams_Spaces\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/{baseSpaceId}/teams/all\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListAllTeamsSpacesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListAllTeamsSpacesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for listAllTeams_Spaces: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (c *SecurityGroupClient) ListRunningForSpaceAll(ctx context.Context, spaceGUID string, opts *SecurityGroupSpaceListOptions) ([]*resource.SecurityGroup, error) {\n\tif opts == nil {\n\t\topts = NewSecurityGroupSpaceListOptions()\n\t}\n\treturn AutoPage[*SecurityGroupSpaceListOptions, *resource.SecurityGroup](opts, func(opts *SecurityGroupSpaceListOptions) ([]*resource.SecurityGroup, *Pager, error) {\n\t\treturn c.ListRunningForSpace(ctx, spaceGUID, opts)\n\t})\n}",
"func (c *SecurityGroupClient) ListStagingForSpaceAll(ctx context.Context, spaceGUID string, opts *SecurityGroupSpaceListOptions) ([]*resource.SecurityGroup, error) {\n\tif opts == nil {\n\t\topts = NewSecurityGroupSpaceListOptions()\n\t}\n\treturn AutoPage[*SecurityGroupSpaceListOptions, *resource.SecurityGroup](opts, func(opts *SecurityGroupSpaceListOptions) ([]*resource.SecurityGroup, *Pager, error) {\n\t\treturn c.ListStagingForSpace(ctx, spaceGUID, opts)\n\t})\n}",
"func (c *SecurityGroupClient) ListRunningForSpace(ctx context.Context, spaceGUID string, opts *SecurityGroupSpaceListOptions) ([]*resource.SecurityGroup, *Pager, error) {\n\tif opts == nil {\n\t\topts = NewSecurityGroupSpaceListOptions()\n\t}\n\tvar res resource.SecurityGroupList\n\terr := c.client.get(ctx, path.Format(\"/v3/spaces/%s/running_security_groups?%s\", spaceGUID, opts.ToQueryString()), &res)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpager := NewPager(res.Pagination)\n\treturn res.Resources, pager, nil\n}",
"func (a *Client) ListAllRunbooksSpaces(params *ListAllRunbooksSpacesParams, authInfo runtime.ClientAuthInfoWriter) (*ListAllRunbooksSpacesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListAllRunbooksSpacesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listAllRunbooks_Spaces\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/{baseSpaceId}/runbooks/all\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListAllRunbooksSpacesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListAllRunbooksSpacesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for listAllRunbooks_Spaces: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (c *CloudClient) GetSpaces() ([]*latest.Space, error) {\n\treturn c.Spaces, nil\n}",
"func (client *CassandraResourcesClient) ListCassandraKeyspaces(ctx context.Context, resourceGroupName string, accountName string, options *CassandraResourcesListCassandraKeyspacesOptions) (CassandraResourcesListCassandraKeyspacesResponse, error) {\n\treq, err := client.listCassandraKeyspacesCreateRequest(ctx, resourceGroupName, accountName, options)\n\tif err != nil {\n\t\treturn CassandraResourcesListCassandraKeyspacesResponse{}, err\n\t}\n\tresp, err := client.pl.Do(req)\n\tif err != nil {\n\t\treturn CassandraResourcesListCassandraKeyspacesResponse{}, err\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\treturn CassandraResourcesListCassandraKeyspacesResponse{}, client.listCassandraKeyspacesHandleError(resp)\n\t}\n\treturn client.listCassandraKeyspacesHandleResponse(resp)\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/functions/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (s *API) ListNamespaces(req *ListNamespacesRequest, opts ...scw.RequestOption) (*ListNamespacesResponse, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/containers/v1beta1/regions/\" + fmt.Sprint(req.Region) + \"/namespaces\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListNamespacesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (s *SpacesService) ListCharts(spaceID uint) ([]SpaceChart, *http.Response, error) {\n\tu := fmt.Sprintf(\"spaces/%d/charts\", spaceID)\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcharts := new([]SpaceChart)\n\tresp, err := s.client.Do(req, charts)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *charts, resp, err\n}",
"func listFunc(cmd *cobra.Command, args []string) {\n\tkeyword := cmd.Flag(\"keyword\").Value.String()\n\tpage, _ := cmd.Flags().GetInt(\"page\")\n\tsize, _ := cmd.Flags().GetInt(\"size\")\n\n\tresp, err := app.Client.GetDomainGroups(\n\t\tkeyword, page, size,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// data, _ := convert.StringToJSONWithIndent(string(resp))\n\t// fmt.Println(data)\n\n\t// Parse Response\n\tvar groups *DomainGroups\n\terr = json.Unmarshal(resp, &groups)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// data, _ := convert.StructToJSONWithIndent(groups)\n\t// fmt.Println(data)\n\n\trows := make([][]string, 0)\n\tfor idx, group := range groups.DomainGroups.DomainGroup {\n\t\trow := make([]string, 0)\n\t\trow = append(row, strconv.Itoa(idx))\n\t\trow = append(row, group.GroupID)\n\t\trow = append(row, group.GroupName)\n\t\trow = append(row, strconv.FormatInt(group.DomainCount, 10))\n\n\t\trows = append(rows, row)\n\t}\n\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttableHeader := []string{\n\t\t\"序号\", \"域名分组ID\", \"域名分组名称\", \"域名分组数量\",\n\t}\n\ttableHeaderColor := make([]tablewriter.Colors, 0)\n\tfor i := 0; i < len(tableHeader); i++ {\n\t\ttableHeaderColor = append(tableHeaderColor, tablewriter.Colors{tablewriter.Bold})\n\t}\n\ttable.SetHeader(tableHeader)\n\ttable.SetHeaderColor(tableHeaderColor...)\n\ttable.SetAlignment(tablewriter.ALIGN_CENTER)\n\ttable.SetRowLine(true)\n\ttable.AppendBulk(rows)\n\n\ttable.Render()\n}",
"func (ms *NameSpaceStore) List() ([]*domain.Namespace, error) {\n\tnamespaces := []*domain.Namespace{}\n\tfor _, ns := range ms.namespaces {\n\t\tif ns != nil {\n\t\t\tnamespaces = append(namespaces, ns)\n\t\t}\n\t}\n\n\treturn namespaces, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Del provides a mock function with given fields: keys
|
func (_m *MockCacheStore) Del(keys ...string) {
_va := make([]interface{}, len(keys))
for _i := range keys {
_va[_i] = keys[_i]
}
var _ca []interface{}
_ca = append(_ca, _va...)
_m.Called(_ca...)
}
|
[
"func (m *MockPipeline) HDel(key string, fields ...string) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{key}\n\tfor _, a := range fields {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"HDel\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (_e *FavorService_Expecter) Del(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}) *FavorService_Del_Call {\n\treturn &FavorService_Del_Call{Call: _e.mock.On(\"Del\", _a0, _a1, _a2, _a3)}\n}",
"func TestDeleteMetaByKey(t *testing.T) {\n\t//Initialize Global Variables (Mocks)\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\tormerMock := beego.NewMockOrmer(mockCtrl)\n\tquerySeterMock := beego.NewMockQuerySeter(mockCtrl)\n\tdbm.DBAccess = ormerMock\n\n\tcases := []struct {\n\t\t// name is name of the testcase\n\t\tname string\n\t\t// filterReturn is the return of mock interface querySeterMock's filter function\n\t\tfilterReturn orm.QuerySeter\n\t\t// deleteReturnInt is the first return of mock interface querySeterMock's delete function\n\t\tdeleteReturnInt int64\n\t\t// deleteReturnErr is the second return of mock interface querySeterMocks's delete function also expected error\n\t\tdeleteReturnErr error\n\t\t// queryTableReturn is the return of mock interface ormerMock's QueryTable function\n\t\tqueryTableReturn orm.QuerySeter\n\t}{{\n\t\t// Success Case\n\t\tname: \"SuccessCase\",\n\t\tfilterReturn: querySeterMock,\n\t\tdeleteReturnInt: int64(1),\n\t\tdeleteReturnErr: nil,\n\t\tqueryTableReturn: querySeterMock,\n\t}, {\n\t\t// Failure Case\n\t\tname: \"FailureCase\",\n\t\tfilterReturn: querySeterMock,\n\t\tdeleteReturnInt: int64(0),\n\t\tdeleteReturnErr: errFailedDBOperation,\n\t\tqueryTableReturn: querySeterMock,\n\t},\n\t}\n\n\t// run the test cases\n\tfor _, test := range cases {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tquerySeterMock.EXPECT().Filter(gomock.Any(), gomock.Any()).Return(test.filterReturn).Times(1)\n\t\t\tquerySeterMock.EXPECT().Delete().Return(test.deleteReturnInt, test.deleteReturnErr).Times(1)\n\t\t\tormerMock.EXPECT().QueryTable(gomock.Any()).Return(test.queryTableReturn).Times(1)\n\t\t\terr := DeleteMetaByKey(\"test\")\n\t\t\tif test.deleteReturnErr != err {\n\t\t\t\tt.Errorf(\"Delete Meta By Key Case failed : wanted %v and got %v\", test.deleteReturnErr, err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (m *MockRedis) Del(key string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Del\", key)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (m *MockKeyValue) Del(v0 string) error {\n\tr0 := m.DelFunc.nextHook()(v0)\n\tm.DelFunc.appendCall(KeyValueDelFuncCall{v0, r0})\n\treturn r0\n}",
"func (_m *MockRedisClient) Unlink(keys ...string) *redis.IntCmd {\n\t_va := make([]interface{}, len(keys))\n\tfor _i := range keys {\n\t\t_va[_i] = keys[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 *redis.IntCmd\n\tif rf, ok := ret.Get(0).(func(...string) *redis.IntCmd); ok {\n\t\tr0 = rf(keys...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*redis.IntCmd)\n\t\t}\n\t}\n\n\treturn r0\n}",
"func (m *MockCache) Del(arg0 context.Context, arg1 structs.Request) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Del\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (m *MockAPI) KeysDelete(arg0 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"KeysDelete\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func TestDeleteMetaByKeyAndPodUID(t *testing.T) {\n\t//Initialize Global Variables (Mocks)\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\tormerMock := beego.NewMockOrmer(mockCtrl)\n\tdbm.DBAccess = ormerMock\n\trawSeterMock := beego.NewMockRawSeter(mockCtrl)\n\tdeleteRes := beego.NewMockDriverRes(mockCtrl)\n\tdeleteRes.EXPECT().RowsAffected().Return(int64(1), nil).Times(1)\n\n\tcases := []struct {\n\t\t// name is name of the testcase\n\t\tname string\n\t\t// deleteReturnRes is first return of mock interface rawSeterMock's Exec function\n\t\tdeleteReturnRes sql.Result\n\t\t// deleteReturnErr is second return of mock interface rawSeterMock's Exec function which is also expected error\n\t\tdeleteReturnErr error\n\t\t// deleteReturnRaw is the return of mock interface ormerMock's Raw function\n\t\tdeleteReturnRaw orm.RawSeter\n\t}{{\n\t\t// Success Case\n\t\tname: \"SuccessCase\",\n\t\tdeleteReturnRes: deleteRes,\n\t\tdeleteReturnErr: nil,\n\t\tdeleteReturnRaw: rawSeterMock,\n\t}, {\n\t\t// Failure Case\n\t\tname: \"FailureCase\",\n\t\tdeleteReturnRes: nil,\n\t\tdeleteReturnErr: errFailedDBOperation,\n\t\tdeleteReturnRaw: rawSeterMock,\n\t},\n\t}\n\n\t// run the test cases\n\tfor _, test := range cases {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trawSeterMock.EXPECT().Exec().Return(test.deleteReturnRes, test.deleteReturnErr).Times(1)\n\t\t\tormerMock.EXPECT().Raw(gomock.Any(), gomock.Any()).Return(test.deleteReturnRaw).Times(1)\n\t\t\t_, err := DeleteMetaByKeyAndPodUID(\"test\", \"testUID\")\n\t\t\tif test.deleteReturnErr != err {\n\t\t\t\tt.Errorf(\"Delete Meta By Key Case failed : wanted %v and got %v\", test.deleteReturnErr, err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (DummyStore) DeleteMap(ctx context.Context, key string, fields ...string) error { return nil }",
"func (m *MockCache) DeleteMulti(ctx context.Context, projectID string, keys []*datastore.Key) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteMulti\", ctx, projectID, keys)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (_m *ObjectStore) Delete(key string, store string) error {\n\tret := _m.Called(key, store)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(key, store)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func KVDelete(\n\tinit func(KVStoreFields, *testing.T) (kv.Store, func()),\n\tt *testing.T,\n) {\n\ttype args struct {\n\t\tbucket []byte\n\t\tkey []byte\n\t}\n\ttype wants struct {\n\t\terr error\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tfields KVStoreFields\n\t\targs args\n\t\twants wants\n\t}{\n\t\t{\n\t\t\tname: \"delete key\",\n\t\t\tfields: KVStoreFields{\n\t\t\t\tBucket: []byte(\"bucket\"),\n\t\t\t\tPairs: []kv.Pair{\n\t\t\t\t\t{\n\t\t\t\t\t\tKey: []byte(\"hello\"),\n\t\t\t\t\t\tValue: []byte(\"world\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tbucket: []byte(\"bucket\"),\n\t\t\t\tkey: []byte(\"hello\"),\n\t\t\t},\n\t\t\twants: wants{},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ts, close := init(tt.fields, t)\n\t\t\tdefer close()\n\n\t\t\terr := s.Update(func(tx kv.Tx) error {\n\t\t\t\tb, err := tx.Bucket(tt.args.bucket)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"unexpected error retrieving bucket: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t{\n\t\t\t\t\terr := b.Delete(tt.args.key)\n\t\t\t\t\tif (err != nil) != (tt.wants.err != nil) {\n\t\t\t\t\t\tt.Errorf(\"expected error '%v' got '%v'\", tt.wants.err, err)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif err != nil && tt.wants.err != nil {\n\t\t\t\t\t\tif err.Error() != tt.wants.err.Error() {\n\t\t\t\t\t\t\tt.Errorf(\"expected error messages to match '%v' got '%v'\", tt.wants.err, err.Error())\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif _, err := b.Get(tt.args.key); err != kv.ErrKeyNotFound {\n\t\t\t\t\t\tt.Errorf(\"expected key not found error got %v\", err)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error during view transaction: %v\", err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (m *MockServiceEngine) DeleteExpiredKeys(arg0 context.Context) (int, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteExpiredKeys\", arg0)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockUpClient) DelUpPassedCacheByStaff(ctx context.Context, in *UpCacheReq, opts ...grpc.CallOption) (*NoReply, error) {\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DelUpPassedCacheByStaff\", varargs...)\n\tret0, _ := ret[0].(*NoReply)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (_m *KV) Delete(_a0 string) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (c *command) Hdel(key interface{}, field []interface{}) Result {\n\tr := newResult()\n\tif field == nil {\n\t\tr.setErr(newInvalidValueError(\"field\", nil))\n\t\treturn r\n\t}\n\tr.request.cmd = append(r.request.cmd, \"HDEL\", key)\n\tfor _, v := range field {\n\t\tr.request.cmd = append(r.request.cmd, v)\n\t}\n\tc.send(CmdHdel, r)\n\treturn r\n}",
"func (b *Batch) Del(keys ...interface{}) *Batch {\n\tvar calls []Call\n\tfor _, key := range keys {\n\t\tk, err := marshalKey(key)\n\t\tif err != nil {\n\t\t\tb.initResult(0, len(keys), err)\n\t\t\treturn b\n\t\t}\n\t\tcalls = append(calls, Delete(proto.Key(k)))\n\t}\n\tb.calls = append(b.calls, calls...)\n\tb.initResult(len(calls), len(calls), nil)\n\treturn b\n}",
"func (_m *Interface) ObjectDelete(app string, key string) error {\n\tret := _m.Called(app, key)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(app, key)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set provides a mock function with given fields: key, value, expiration
|
func (_m *MockCacheStore) Set(key string, value interface{}, expiration time.Duration) {
_m.Called(key, value, expiration)
}
|
[
"func (_m *KV) Set(_a0 string, _a1 []byte, _a2 time.Duration) error {\n\tret := _m.Called(_a0, _a1, _a2)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, []byte, time.Duration) error); ok {\n\t\tr0 = rf(_a0, _a1, _a2)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (m *MockIRedis) MSet(items map[string]interface{}, expire ...time.Duration) bool {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{items}\n\tfor _, a := range expire {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"MSet\", varargs...)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}",
"func (mmget *mCacheMockget) Set(f func(ref insolar.Reference, getter func() (val interface{}, err error)) (val interface{}, err error)) *CacheMock {\n\tif mmget.defaultExpectation != nil {\n\t\tmmget.mock.t.Fatalf(\"Default expectation is already set for the cache.get method\")\n\t}\n\n\tif len(mmget.expectations) > 0 {\n\t\tmmget.mock.t.Fatalf(\"Some expectations are already set for the cache.get method\")\n\t}\n\n\tmmget.mock.funcget = f\n\treturn mmget.mock\n}",
"func (m *mcacheMockget) Set(f func(p insolar.Reference, p1 func() (r interface{}, r1 error)) (r interface{}, r1 error)) *cacheMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.getFunc = f\n\treturn m.mock\n}",
"func (s *SessionMock) Set(cacheKey string, v interface{}, timeInSeconds int) {\n\ts.Calls.Set++\n}",
"func Set(key string, value interface{}, expires int) error {\n\tCacheInstance.Set(key, wrapValue(value), expires)\n\treturn nil\n}",
"func (m *mLocalStorageMockGet) Set(f func(p context.Context, p1 core.PulseNumber, p2 []byte) (r []byte, r1 error)) *LocalStorageMock {\n\tm.mock.GetFunc = f\n\tm.mockExpectations = nil\n\treturn m.mock\n}",
"func (s *CacheStore) Set(key string, value string) {\n now := time.Now()\n duration := time.Millisecond * time.Duration(s.GlobalExpiry)\n expiry := int64(now.Add(duration).UnixNano())\n\n cv := &CacheValue{\n Value: value,\n Expiry: expiry,\n }\n\n s.mutex.Lock()\n s.Cache.Add(key, cv)\n s.mutex.Unlock()\n}",
"func (m *mLocalStorageMockGet) Set(f func(p context.Context, p1 core.PulseNumber, p2 []byte) (r []byte, r1 error)) *LocalStorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.GetFunc = f\n\treturn m.mock\n}",
"func (_m *SharedStore) Set(new *reacji.SharedPost, days int) error {\n\tret := _m.Called(new, days)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*reacji.SharedPost, int) error); ok {\n\t\tr0 = rf(new, days)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (DummyStore) SetWithExpiration(ctx context.Context, key string, value interface{}, expiration time.Duration) error {\n\treturn nil\n}",
"func Set(key string, value []byte, d time.Duration) error {\n\tif d == 0 {\n\t\td = defaultDuration\n\t}\n\tif d == -1 {\n\t\treturn nil\n\t}\n\n\tfmt.Printf(\"%v cached\\n\", key)\n\treturn cache.Set([]byte(key), value, int(d.Seconds()))\n}",
"func (m *mLocalStorageMockSet) Set(f func(p context.Context, p1 core.PulseNumber, p2 []byte, p3 []byte) (r error)) *LocalStorageMock {\n\tm.mock.SetFunc = f\n\tm.mockExpectations = nil\n\treturn m.mock\n}",
"func (m *mMemberAnnouncementSignatureMockCopyOfSignature) Set(f func() (r cryptkit.Signature)) *MemberAnnouncementSignatureMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.CopyOfSignatureFunc = f\n\treturn m.mock\n}",
"func (m *mMemberAnnouncementSignatureMockEquals) Set(f func(p cryptkit.SignatureHolder) (r bool)) *MemberAnnouncementSignatureMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.EqualsFunc = f\n\treturn m.mock\n}",
"func (r *RadixDriver) Set(key string, value interface{}, secondsLifetime int64) error {\n\tvar cmd radix.CmdAction\n\t// if has expiration, then use the \"EX\" to delete the key automatically.\n\tif secondsLifetime > 0 {\n\t\tcmd = radix.FlatCmd(nil, \"SETEX\", key, secondsLifetime, value)\n\t} else {\n\t\tcmd = radix.FlatCmd(nil, \"SET\", key, value) // MSET same performance...\n\t}\n\n\treturn r.pool.Do(cmd)\n}",
"func (h *Handler) setCache(key []byte, val []byte) {\n expire := 600 // expire in 600 seconds (10 min)\n err := h.Cache.Set(key, val, expire)\n if err != nil {\n fmt.Println(err)\n }\n}",
"func (c *SimpleExpCache) Set(data interface{}) {\n\tc.mu.Lock()\n\tc.data = data\n\tc.expiredAt = Clock.GetUTCNow().Add(c.ttl)\n\tc.mu.Unlock()\n}",
"func (m *mPendingStorageMockAddPendingRequest) Set(f func(p context.Context, p1 insolar.ID, p2 insolar.ID)) *PendingStorageMock {\n\tm.mainExpectation = nil\n\tm.expectationSeries = nil\n\n\tm.mock.AddPendingRequestFunc = f\n\treturn m.mock\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewSet returns a set of table buffers from the given query.
|
func NewSet(ctx context.Context, q Queryer, sql string, params ...interface{}) (Set, error) {
rows, err := q.QueryContext(ctx, sql, params...)
if err != nil {
return nil, err
}
defer rows.Close()
return FillSet(ctx, rows)
}
|
[
"func NewBuffer(ctx context.Context, q Queryer, sql string, params ...interface{}) (table *Buffer, err error) {\n\tset, err := NewSet(ctx, q, sql, params...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(set) == 0 {\n\t\treturn nil, &IndexError{subject: indexErrorColumn, length: len(set), requested: 0}\n\t}\n\treturn set[0], nil\n}",
"func UA_QueryDataSet_new() []UA_QueryDataSet {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[52]))[:]).([]UA_QueryDataSet)\n}",
"func newQuery(rs ...*RecordSet) Query {\n\tvar rset *RecordSet\n\tif len(rs) > 0 {\n\t\trset = rs[0]\n\t}\n\treturn Query{\n\t\tcond: NewCondition(),\n\t\trecordSet: rset,\n\t}\n}",
"func FillSet(ctx context.Context, rows *sql.Rows) (Set, error) {\n\tvar out []interface{}\n\tvar dest []interface{}\n\tvar err error\n\n\tvar set Set = make([]*Buffer, 0, 3)\n\ttable := &Buffer{\n\t\tRows: make([]Row, 0, 10),\n\t}\n\n\tfor {\n\t\tfirst := true\n\t\tcolCount := 0\n\t\tfor rows.Next() {\n\t\t\t// Some initialization depends on knowing the column names\n\t\t\t// which isn't available until the first row is fetched.\n\t\t\tif first {\n\t\t\t\tfirst = false\n\n\t\t\t\t// Get the column names.\n\t\t\t\ttable.Columns, err = rows.Columns()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn set, err\n\t\t\t\t}\n\t\t\t\tcolCount = len(table.Columns)\n\n\t\t\t\t// Create an easy lookup that should be more efficent then\n\t\t\t\t// always looping to lookup an index from a column name.\n\t\t\t\ttable.columnNameIndex = make(map[string]int, colCount)\n\t\t\t\tfor i, n := range table.Columns {\n\t\t\t\t\ttable.columnNameIndex[n] = i\n\t\t\t\t}\n\n\t\t\t\t// Create a sized pointer slice.\n\t\t\t\tdest = make([]interface{}, colCount)\n\t\t\t}\n\t\t\t// Create a new data slice that will be appended on to the table.\n\t\t\tout = make([]interface{}, colCount)\n\n\t\t\t// Scanning requires having a pointer to the data slice,\n\t\t\t// so first make a pointer slice to each element of the data slice.\n\t\t\tfor i, _ := range dest {\n\t\t\t\tdest[i] = &out[i]\n\t\t\t}\n\n\t\t\t// Then scan into the pointer slice.\n\t\t\terr = rows.Scan(dest...)\n\t\t\tif err != nil {\n\t\t\t\treturn set, err\n\t\t\t}\n\t\t\ttable.Rows = append(table.Rows, Row{\n\t\t\t\tcolumnNameIndex: table.columnNameIndex,\n\t\t\t\tField: out,\n\t\t\t})\n\t\t}\n\t\tset = append(set, table)\n\t\tif !rows.NextResultSet() {\n\t\t\tbreak\n\t\t}\n\t\tfirst = false\n\t\ttable = &Buffer{\n\t\t\tRows: make([]Row, 0, 10),\n\t\t}\n\t}\n\treturn set, nil\n}",
"func newTableBuffer() *tableBuffer {\n\tbuffers := make(map[int][]*rowBuffer)\n\treturn &tableBuffer{\n\t\tbuffers: buffers,\n\t}\n}",
"func NewReviewSetQuery()(*ReviewSetQuery) {\n m := &ReviewSetQuery{\n Entity: *ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.NewEntity(),\n }\n return m\n}",
"func NewKeyboardQuerySet(db *gorm.DB) KeyboardQuerySet {\n\treturn KeyboardQuerySet{\n\t\tdb: db.Model(&Keyboard{}),\n\t}\n}",
"func NewTokenQuerySet(db *gorm.DB) TokenQuerySet {\n\treturn TokenQuerySet{\n\t\tdb: db.Model(&Token{}),\n\t}\n}",
"func newRows(qr *mproto.QueryResult) driver.Rows {\n\treturn &rows{qr: qr}\n}",
"func TableSetFromIds(tids ...int) (ts TableSet) {\n\treturn TableSet(bitset.Build(tids...))\n}",
"func NewQuery(table string, sf map[string]bool) *Query {\n\treturn &Query{\n\t\ttable: table,\n\t\tsfields: sf,\n\t\tsel: make([]string, 0, 16),\n\t\torderBy: \"id\",\n\t\torderDir: OrderDirection(\"ASC\"),\n\t\tlimit: 0,\n\t\tstartAfter: \"\",\n\t\tendBefore: \"\",\n\t\terr: nil,\n\t}\n}",
"func newQuery(db *Reindexer, namespace string) *Query {\n\tvar q *Query\n\tobj := queryPool.Get()\n\tif obj != nil {\n\t\tq = obj.(*Query)\n\t}\n\tif q == nil {\n\t\tq = &Query{}\n\t\tq.ser = cjson.NewSerializer(q.initBuf[:0])\n\t} else {\n\t\tq.nextOp = 0\n\t\tq.root = nil\n\t\tq.joinType = 0\n\t\tq.context = nil\n\t\tq.joinToFields = q.joinToFields[:0]\n\t\tq.joinQueries = q.joinQueries[:0]\n\t\tq.joinHandlers = q.joinHandlers[:0]\n\t\tq.mergedQueries = q.mergedQueries[:0]\n\t\tq.ptVersions = q.ptVersions[:0]\n\t\tq.ser = cjson.NewSerializer(q.ser.Bytes()[:0])\n\t\tq.closed = false\n\t\tq.totalName = \"\"\n\t\tq.executed = false\n\t\tq.nsArray = q.nsArray[:0]\n\t}\n\n\tq.Namespace = namespace\n\tq.db = db\n\tq.nextOp = opAND\n\tq.fetchCount = defaultFetchCount\n\n\tq.ser.PutVString(namespace)\n\treturn q\n}",
"func newTransactionQuery(query *query, tx *Transaction) *TransactionQuery {\n\treturn &TransactionQuery{\n\t\tquery: query,\n\t\ttx: tx,\n\t}\n}",
"func NewSetFrom(I ...*Automata) *Set {\n\ts := NewSet()\n\tfor _, i := range I {\n\t\ts.Add(i)\n\t}\n\treturn s\n}",
"func NewGetBuffer(data []byte) (get *GetBuffer) {\n\tget = new(GetBuffer)\n\t_, get.err = get.buf.Write(data)\n\treturn\n}",
"func (tm *TypeManager) NewSet(name string) *Set {\n\ts := &Set{\n\t\tName: name,\n\t\ttm: tm,\n\t}\n\n\ttm.contains.RLock()\n\tif abstract, ok := tm.contains.abstracts[name]; ok && abstract.dtype == LWWSet {\n\t\ts.d = abstract\n\t\ttm.contains.RUnlock()\n\t\treturn s\n\t}\n\ttm.contains.RUnlock()\n\n\tas := &crdt{\n\t\tid: name + aSetIDSuffix,\n\t\tdtype: aSet,\n\t\tsval: setValue{},\n\t}\n\trs := &crdt{\n\t\tid: name + rSetIDSuffix,\n\t\tdtype: rSet,\n\t\tsval: setValue{},\n\t}\n\tabstract := &abstractCRDT{\n\t\tid: name,\n\t\tdtype: LWWSet,\n\t\tcomponents: map[DataType]*crdt{\n\t\t\taSet: as,\n\t\t\trSet: rs,\n\t\t},\n\t}\n\n\ttm.contains.Lock()\n\ttm.contains.values[as.id] = as\n\ttm.contains.values[rs.id] = rs\n\ttm.contains.abstracts[name] = abstract\n\ttm.contains.Unlock()\n\n\ts.d = abstract\n\treturn s\n}",
"func (d *CockroachDBDriver) newQuery(mods ...qm.QueryMod) *queries.Query {\n\tq := new(queries.Query)\n\tqueries.SetDialect(q, &d.dialect)\n\tqm.Apply(q, mods...)\n\n\treturn q\n}",
"func newWorkingSet(\n\theight uint64,\n\tkv db.KVStore,\n\troot []byte,\n\topts ...db.KVStoreFlusherOption,\n) (WorkingSet, error) {\n\tflusher, err := db.NewKVStoreFlusher(kv, batch.NewCachedBatch(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdbForTrie, err := db.NewKVStoreForTrie(AccountTrieNamespace, flusher.KVStoreWithBuffer())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate state tire db\")\n\t}\n\ttr, err := trie.NewTrie(trie.KVStoreOption(dbForTrie), trie.RootHashOption(root[:]))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate state trie from config\")\n\t}\n\n\treturn &workingSet{\n\t\taccountTrie: tr,\n\t\tfinalized: false,\n\t\tblockHeight: height,\n\t\ttrieRoots: make(map[int][]byte),\n\t\tflusher: flusher,\n\t}, tr.Start(context.Background())\n}",
"func NewSet(size int) *Set {\n\tif size <= 0 {\n\t\tsize = 10\n\t}\n\ts := &Set{\n\t\titems: make([]interface{}, 0, size),\n\t}\n\treturn s\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuffer returns a new single table buffer.
|
func NewBuffer(ctx context.Context, q Queryer, sql string, params ...interface{}) (table *Buffer, err error) {
set, err := NewSet(ctx, q, sql, params...)
if err != nil {
return nil, err
}
if len(set) == 0 {
return nil, &IndexError{subject: indexErrorColumn, length: len(set), requested: 0}
}
return set[0], nil
}
|
[
"func newTableBuffer() *tableBuffer {\n\tbuffers := make(map[int][]*rowBuffer)\n\treturn &tableBuffer{\n\t\tbuffers: buffers,\n\t}\n}",
"func SourceBufferNew(table *gtk.TextTagTable) (*SourceBuffer, error) {\n\tc := C.gtk_source_buffer_new(nativeTextTagTable(table))\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\n\te := wrapSourceBuffer(glib.Take(unsafe.Pointer(c)))\n\treturn e, nil\n}",
"func (t *Type) NewBuffer(conf buffer.Config) (buffer.Type, error) {\n\treturn t.env.BufferInit(conf, t)\n}",
"func NewBuffer(buf []byte) *Buffer {\n\treturn &Buffer{buf: buf}\n}",
"func (c *Context) NewBuffer(t gfx.BufferType) gfx.Buffer {\n\treturn &Buffer{\n\t\tctx: c,\n\t\ttyp: t,\n\t\to: c.O.Call(\"createBuffer\"),\n\t}\n}",
"func NewBuffer(handler types.IHandler, bufferLimit, level int, bubble, flushOnOverflow bool) *Buffer {\n\tb := &Buffer{\n\t\tFlushOnOverflow: flushOnOverflow,\n\t\tbufferLimit: bufferLimit,\n\t\th: handler,\n\t\tbuffer: make([]*types.Record, 0, bufferLimit),\n\t}\n\tb.SetLevel(level)\n\tb.SetBubble(bubble)\n\treturn b\n}",
"func New() *Buffer {\n\treturn &Buffer{nextCh: make(chan interface{}, 1)}\n}",
"func NewBuffer(prompt string, out *os.File, echo bool) *Buffer {\n\treturn &Buffer{\n\t\tOut: out,\n\t\tPrompt: prompt,\n\t\tEcho: echo,\n\t}\n}",
"func NewBuffer(target bufferAllocator) *Buffer {\n\treturn &Buffer{buf: *NewBufferWithError(target)}\n}",
"func NewGetBuffer(data []byte) (get *GetBuffer) {\n\tget = new(GetBuffer)\n\t_, get.err = get.buf.Write(data)\n\treturn\n}",
"func (c *Context) NewBuffer(t gfx.BufferType) gfx.Buffer {\n\tb := &Buffer{\n\t\tctx: c,\n\t\ttyp: t,\n\t}\n\tgl.GenBuffers(1, &b.o)\n\treturn b\n}",
"func (a *Avg) NewBuffer() (sql.AggregationBuffer, error) {\n\tconst (\n\t\tsum = float64(0)\n\t\trows = int64(0)\n\t)\n\n\tbufferChild, err := expression.Clone(a.UnaryExpression.Child)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &avgBuffer{sum, rows, bufferChild}, nil\n}",
"func NewBuffer(capacity int, flushInt time.Duration, onflush BufferFlush) Buffer {\n\tbuffer := &TimedBuffer{\n\t\tarray: make([]interface{}, 0, capacity),\n\t\tqueue: make(chan interface{}),\n\t\tonflush: onflush,\n\t}\n\n\tgo func() {\n\t\t//autostart buffer flusher.\n\t\ttimeout := time.After(flushInt)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-buffer.queue:\n\t\t\t\tif len(buffer.array) < capacity {\n\t\t\t\t\tbuffer.array = append(buffer.array, msg)\n\t\t\t\t}\n\n\t\t\t\tif len(buffer.array) >= capacity {\n\t\t\t\t\t//no more buffer space.\n\t\t\t\t\tbuffer.flush()\n\t\t\t\t}\n\t\t\tcase <-timeout:\n\t\t\t\ttimeout = time.After(flushInt)\n\t\t\t\tbuffer.flush()\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn buffer\n}",
"func NewBuffer(domain, path string) *Buffer {\n\treturn &Buffer{\n\t\tdomain: domain,\n\t\tcachePath: path,\n\t}\n}",
"func NewBuffer() *Buffer {\n\tp := bufPool.Get().(*Buffer)\n\tp.Reset()\n\treturn p\n}",
"func NewBuffer(glTarget gl.Enum) (me *Buffer) {\r\n\tme = &Buffer{GlTarget: glTarget}\r\n\treturn\r\n}",
"func newBuffer(max int) *buffer {\n\treturn &buffer{\n\t\tmax: max,\n\t\tnotify: nil,\n\t\tstop: make(chan struct{}),\n\t}\n}",
"func TestNewBuffer(t *testing.T) {\n\tcontext, err := NewContext(nil)\n\trequire.NoError(t, err)\n\n\tbuffer, err := NewBuffer(context)\n\trequire.NoError(t, err)\n\tassert.NotNil(t, buffer)\n\n\tbytes, err := buffer.dataCopy()\n\trequire.NoError(t, err)\n\tassert.Nil(t, bytes)\n\n\tdatatype, err := buffer.Type()\n\trequire.NoError(t, err)\n\tassert.Equal(t, datatype, TILEDB_UINT8)\n}",
"func CloneBuffer(obj flatbuffers.FlatBuffer) []byte {\n\tt := obj.Table()\n\tb := make([]byte, len(t.Bytes))\n\tcopy(b, t.Bytes)\n\tobj.Init(b, t.Pos)\n\treturn b\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewScaler returns the first field in the first row.
|
func NewScaler(ctx context.Context, q Queryer, sql string, params ...interface{}) (interface{}, error) {
t, err := NewBuffer(ctx, q, sql, params...)
if err != nil {
return nil, err
}
if len(t.Rows) == 0 {
return nil, &IndexError{subject: indexErrorRow, length: len(t.Rows), requested: 0}
}
row := t.Rows[0]
if len(row.Field) == 0 {
return nil, &IndexError{subject: indexErrorColumn, length: len(row.Field), requested: 0}
}
return row.Field[0], nil
}
|
[
"func NewScaler() Scaler {\n\treturn &machineSetScaler{}\n}",
"func (s *StyleWindow) GetScaler() *StyleItem {\n\tvar ret *StyleItem\n\tret = (*StyleItem)(unsafe.Pointer(&s.scaler))\n\treturn ret\n}",
"func Scale(a Tuple, scalar float64) Tuple {\n\treturn New(scalar*a.X, scalar*a.Y, scalar*a.Z, scalar*a.W)\n}",
"func (mat *T) Scaling() vec3.T {\n\treturn vec3.T{mat[0][0], mat[1][1], mat[2][2]}\n}",
"func newScales(c *v1beta1.ExtensionsV1beta1Client, namespace string) *k8sscales {\n\treturn &k8sscales{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}",
"func NewNormalizingRow() *NormalizingRow {\n\treturn &NormalizingRow{\n\t\tcount: 0,\n\t\tdata: make(map[State]uint32),\n\t\tsize: uint64(4),\n\t\tdirty: true,\n\t}\n}",
"func (a Vector) Scale(s float64) Vector {\n\treturn Vector{X: s * a.X, Y: s * a.Y, Z: s * a.Z}\n}",
"func (s CurrencySet) First() m.CurrencyData {\n\treturn &CurrencyData{\n\t\ts.RecordCollection.First(),\n\t}\n}",
"func (d *idf) firstVal(tag uint16) uint {\n\treturn d.features[tag].firstVal()\n}",
"func (c *Context) ReadPrescale() uint8 {\n\treturn c.read8(PreScale)\n}",
"func PreprocessScale(input *mat.Dense, param *PreprocessScaleOptionalParam) (*mat.Dense, scalingModel) {\n params := getParams(\"preprocess_scale\")\n timers := getTimers()\n\n disableBacktrace()\n disableVerbose()\n // Detect if the parameter was passed; set if so.\n gonumToArmaMat(params, \"input\", input)\n setPassed(params, \"input\")\n\n // Detect if the parameter was passed; set if so.\n if param.Epsilon != 1e-06 {\n setParamDouble(params, \"epsilon\", param.Epsilon)\n setPassed(params, \"epsilon\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.InputModel != nil {\n setScalingModel(params, \"input_model\", param.InputModel)\n setPassed(params, \"input_model\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.InverseScaling != false {\n setParamBool(params, \"inverse_scaling\", param.InverseScaling)\n setPassed(params, \"inverse_scaling\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.MaxValue != 1 {\n setParamInt(params, \"max_value\", param.MaxValue)\n setPassed(params, \"max_value\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.MinValue != 0 {\n setParamInt(params, \"min_value\", param.MinValue)\n setPassed(params, \"min_value\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.ScalerMethod != \"standard_scaler\" {\n setParamString(params, \"scaler_method\", param.ScalerMethod)\n setPassed(params, \"scaler_method\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.Seed != 0 {\n setParamInt(params, \"seed\", param.Seed)\n setPassed(params, \"seed\")\n }\n\n // Detect if the parameter was passed; set if so.\n if param.Verbose != false {\n setParamBool(params, \"verbose\", param.Verbose)\n setPassed(params, \"verbose\")\n enableVerbose()\n }\n\n // Mark all output options as passed.\n setPassed(params, \"output\")\n setPassed(params, \"output_model\")\n\n // Call the mlpack program.\n C.mlpackPreprocessScale(params.mem, timers.mem)\n\n // Initialize result variable and get output.\n var outputPtr mlpackArma\n output := outputPtr.armaToGonumMat(params, \"output\")\n var outputModel scalingModel\n outputModel.getScalingModel(params, \"output_model\")\n // Clean memory.\n cleanParams(params)\n cleanTimers(timers)\n // Return output(s).\n return output, outputModel\n}",
"func getScaleOfLeftmostValue(ctx *sql.Context, row sql.Row, e sql.Expression, d, dScale int32) int32 {\n\tif e == nil {\n\t\treturn 0\n\t}\n\n\tif a, ok := e.(*Div); ok {\n\t\td = d + 1\n\t\tif d == dScale {\n\t\t\tlval, err := a.Left.Eval(ctx, row)\n\t\t\tif err != nil {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\t_, s := GetPrecisionAndScale(lval)\n\t\t\t// the leftmost value can be row value of decimal type column\n\t\t\t// the evaluated value does not always match the scale of column type definition\n\t\t\ttyp := a.Left.Type()\n\t\t\tif dt, dok := typ.(sql.DecimalType); dok {\n\t\t\t\tts := dt.Scale()\n\t\t\t\tif ts > s {\n\t\t\t\t\ts = ts\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn int32(s)\n\t\t} else {\n\t\t\treturn getScaleOfLeftmostValue(ctx, row, a.Left, d, dScale)\n\t\t}\n\t}\n\n\treturn 0\n}",
"func (s Series) Min() float64 {\n\ts.Retain()\n\tdefer s.Release()\n\n\tswitch s.field.Type {\n\tcase arrow.PrimitiveTypes.Int32:\n\t\tv := s.Interface.(*array.Int32)\n\t\treturn int32Min(v)\n\tcase arrow.PrimitiveTypes.Int64:\n\t\tv := s.Interface.(*array.Int64)\n\t\treturn int64Min(v)\n\tcase arrow.PrimitiveTypes.Float32:\n\t\tv := s.Interface.(*array.Float32)\n\t\treturn float32Min(v)\n\tcase arrow.PrimitiveTypes.Float64:\n\t\tv := s.Interface.(*array.Float64)\n\t\treturn float64Min(v)\n\tdefault:\n\t\tpanic(\"series: min: unsupported type\")\n\t}\n}",
"func (p *primitive) Transform() []*mat.Dense {\n\treturn p.T\n}",
"func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }",
"func (p Polynomial) Scale(c float64) Polynomial {\n\tres := make(Polynomial, len(p))\n\tfor i, x := range p {\n\t\tres[i] = x * c\n\t}\n\treturn res\n}",
"func (A *SparseArray) Scale(val float64) {\n\tfor i, _ := range A.Elements {\n\t\tA.Elements[i] *= val\n\t}\n}",
"func (c *Csv) CutOne(dlcol int) *Csv {\n\tced := new(Csv)\n\tfor _, slice := range c.Records {\n\t\ttmp := make([]string, len(c.Records)-1)\n\t\ttmp = append(slice[:dlcol], slice[dlcol+1:]...)\n\t\tced.Records = append(ced.Records, tmp)\n\t}\n\treturn ced\n}",
"func (mat *T) SetScaling(s *vec3.T) *T {\n\tmat[0][0] = s[0]\n\tmat[1][1] = s[1]\n\tmat[2][2] = s[2]\n\treturn mat\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
FillSet will take a sql query result and fill the buffer with the entire result set.
|
func FillSet(ctx context.Context, rows *sql.Rows) (Set, error) {
var out []interface{}
var dest []interface{}
var err error
var set Set = make([]*Buffer, 0, 3)
table := &Buffer{
Rows: make([]Row, 0, 10),
}
for {
first := true
colCount := 0
for rows.Next() {
// Some initialization depends on knowing the column names
// which isn't available until the first row is fetched.
if first {
first = false
// Get the column names.
table.Columns, err = rows.Columns()
if err != nil {
return set, err
}
colCount = len(table.Columns)
// Create an easy lookup that should be more efficent then
// always looping to lookup an index from a column name.
table.columnNameIndex = make(map[string]int, colCount)
for i, n := range table.Columns {
table.columnNameIndex[n] = i
}
// Create a sized pointer slice.
dest = make([]interface{}, colCount)
}
// Create a new data slice that will be appended on to the table.
out = make([]interface{}, colCount)
// Scanning requires having a pointer to the data slice,
// so first make a pointer slice to each element of the data slice.
for i, _ := range dest {
dest[i] = &out[i]
}
// Then scan into the pointer slice.
err = rows.Scan(dest...)
if err != nil {
return set, err
}
table.Rows = append(table.Rows, Row{
columnNameIndex: table.columnNameIndex,
Field: out,
})
}
set = append(set, table)
if !rows.NextResultSet() {
break
}
first = false
table = &Buffer{
Rows: make([]Row, 0, 10),
}
}
return set, nil
}
|
[
"func NewSet(ctx context.Context, q Queryer, sql string, params ...interface{}) (Set, error) {\n\trows, err := q.QueryContext(ctx, sql, params...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\treturn FillSet(ctx, rows)\n}",
"func NewBuffer(ctx context.Context, q Queryer, sql string, params ...interface{}) (table *Buffer, err error) {\n\tset, err := NewSet(ctx, q, sql, params...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(set) == 0 {\n\t\treturn nil, &IndexError{subject: indexErrorColumn, length: len(set), requested: 0}\n\t}\n\treturn set[0], nil\n}",
"func (o *OracleDatabase) BulkSet(req []state.SetRequest) error {\n\treturn o.dbaccess.ExecuteMulti(req, nil)\n}",
"func (q *Query) Clear() {\n q.Results = make([]*blob.Blob, 0)\n}",
"func (rset *Rset) beginRow() (err error) {\n\trset.log(_drv.Cfg().Log.Rset.BeginRow)\n\trset.Lock()\n\tdefer rset.Unlock()\n\n\tfetched, offset, finished := rset.fetched, rset.offset, rset.finished\n\tocistmt := rset.ocistmt\n\n\trset.logF(_drv.Cfg().Log.Rset.BeginRow, \"fetched=%d offset=%d finished=%t\", fetched, offset, finished)\n\tif fetched > 0 && fetched > offset {\n\t\tatomic.AddInt32(&rset.index, 1)\n\t\treturn nil\n\t}\n\tif finished {\n\t\trset.log(_drv.Cfg().Log.Rset.BeginRow, \"finished\")\n\t\treturn io.EOF\n\t}\n\t// check is open\n\tif ocistmt == nil {\n\t\trset.log(_drv.Cfg().Log.Rset.BeginRow, \"Rset is closed\")\n\t\treturn io.EOF\n\t}\n\t// allocate define descriptor handles\n\tif rset.env == nil {\n\t\treturn errF(\"Rset env is closed\")\n\t}\n\tenv := rset.env\n\tfor _, define := range rset.defs {\n\t\t//rset.logF(_drv.Cfg().Log.Rset.BeginRow, \"defs[%d]=%#v\", i, define)\n\t\tif define == nil {\n\t\t\tcontinue\n\t\t}\n\t\terr := define.alloc()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trset.finished = false\n\t// fetch rset.fetchLen rows\n\tr := C.OCIStmtFetch2(\n\t\trset.ocistmt, //OCIStmt *stmthp,\n\t\tenv.ocierr, //OCIError *errhp,\n\t\tC.ub4(rset.fetchLen), //ub4 nrows,\n\t\tC.OCI_FETCH_NEXT, //ub2 orientation,\n\t\tC.sb4(0), //sb4 fetchOffset,\n\t\tC.OCI_DEFAULT) //ub4 mode );\n\tif r == C.OCI_ERROR {\n\t\terr := env.ociError()\n\t\treturn err\n\t} else if r == C.OCI_NO_DATA {\n\t\trset.log(_drv.Cfg().Log.Rset.BeginRow, \"OCI_NO_DATA\")\n\t\trset.finished = true\n\t\tfetchLen := rset.fetchLen\n\t\tif fetchLen == 1 {\n\t\t\t// return io.EOF to conform with database/sql/driver\n\t\t\treturn io.EOF\n\t\t}\n\t\t// If OCIStmtFetch2 returns OCI_NO_DATA this does not mean that no data fetched,\n\t\t// this means that the number of fetched rows is less than the array size,\n\t\t// they are all fetched by this OCIStmtFetch2 call, and you do not need to\n\t\t// call OCIStmtFetch2 anymore.\n\t\t//\n\t}\n\tvar rowsFetched C.ub4\n\tif err := rset.attr(unsafe.Pointer(&rowsFetched), 4, C.OCI_ATTR_ROWS_FETCHED); err != nil {\n\t\treturn err\n\t}\n\n\trset.fetched = int64(rowsFetched)\n\trset.offset = 0\n\terr = nil\n\tif rset.fetched == 0 {\n\t\trset.finished = true\n\t\terr = io.EOF\n\t} else {\n\t\tatomic.AddInt32(&rset.index, 1)\n\t}\n\n\treturn err\n}",
"func (mc *mergeCursor) fill() {\n\tif !mc.done {\n\t\tmc.peeked = mc.underlying.Next()\n\t\tmc.done = (mc.peeked == nil)\n\t}\n}",
"func (q *sqlQuery) All() (base.RecordDataSet, error) {\n\twhereClause := q.parseWhere()\n\toptionClause := q.parseOptions()\n\tvar query string\n\n\tif whereClause != \"\" {\n\t\tquery = strings.TrimRight(fmt.Sprintf(\n\t\t\t\"SELECT * FROM %s WHERE %s %s\", q.table, whereClause, optionClause,\n\t\t), \" \")\n\t} else {\n\t\tquery = strings.TrimRight(fmt.Sprintf(\n\t\t\t\"SELECT * FROM %s %s\", q.table, optionClause,\n\t\t), \" \")\n\t}\n\n\trows, err := queryDB(q.session, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fetchResults(rows)\n}",
"func (b *SQLBuilder) Set(sets []set) {\n\tif len(sets) == 0 {\n\t\treturn\n\t}\n\tif len(sets) > 1 {\n\t\tb.w.WriteLine(`SET`)\n\t\tb.w.AddIndent()\n\t\tdefer b.w.SubIndent()\n\t} else {\n\t\tb.w.WriteString(`SET `)\n\t}\n\n\tcField := *b.Context\n\tcField.alias = NoAlias()\n\n\tfor k, v := range sets {\n\t\tcomma := `,`\n\t\tif k == len(sets)-1 {\n\t\t\tcomma = ``\n\t\t}\n\t\tb.w.WriteLine(v.Field.QueryString(&cField) + ` = ` + v.Value.QueryString(b.Context) + comma)\n\t}\n}",
"func (g *BaseGenerator) fillInQuery(qi query.Query, humanLabel, humanDesc, sql string) {\n\tv := url.Values{}\n\tv.Set(\"count\", \"false\")\n\tv.Set(\"query\", sql)\n\tq := qi.(*query.HTTP)\n\tq.HumanLabel = []byte(humanLabel)\n\tq.RawQuery = []byte(sql)\n\tq.HumanDescription = []byte(humanDesc)\n\tq.Method = []byte(\"GET\")\n\tq.Path = []byte(fmt.Sprintf(\"/exec?%s\", v.Encode()))\n\tq.Body = nil\n}",
"func (b *Batch) fillResults() error {\n\toffset := 0\n\tfor i := range b.Results {\n\t\tresult := &b.Results[i]\n\n\t\tfor k := 0; k < result.calls; k++ {\n\t\t\targs := b.reqs[offset+k].GetInner()\n\n\t\t\tvar reply roachpb.Response\n\t\t\t// It's possible that result.Err was populated early, for example\n\t\t\t// when PutProto is called and the proto marshaling errored out.\n\t\t\t// In that case, we don't want to mutate this result's error\n\t\t\t// further.\n\t\t\tif result.Err == nil {\n\t\t\t\t// The outcome of each result is that of the batch as a whole.\n\t\t\t\tresult.Err = b.pErr.GoError()\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\t// For a successful request, load the reply to populate in\n\t\t\t\t\t// this pass.\n\t\t\t\t\tif b.response != nil && offset+k < len(b.response.Responses) {\n\t\t\t\t\t\treply = b.response.Responses[offset+k].GetInner()\n\t\t\t\t\t} else if args.Method() != roachpb.EndTransaction {\n\t\t\t\t\t\t// TODO(tschottdorf): EndTransaction is special-cased\n\t\t\t\t\t\t// here because it may be elided (r/o txns). Might\n\t\t\t\t\t\t// prefer to simulate an EndTransaction response\n\t\t\t\t\t\t// instead; this effectively just leaks here.\n\t\t\t\t\t\t// TODO(tschottdorf): returning an error here seems\n\t\t\t\t\t\t// to get swallowed.\n\t\t\t\t\t\tpanic(errors.Errorf(\"not enough responses for calls: %+v, %+v\",\n\t\t\t\t\t\t\tb.reqs, b.response))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tswitch req := args.(type) {\n\t\t\tcase *roachpb.GetRequest:\n\t\t\t\trow := &result.Rows[k]\n\t\t\t\trow.Key = []byte(req.Key)\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\trow.Value = reply.(*roachpb.GetResponse).Value\n\t\t\t\t}\n\t\t\tcase *roachpb.PutRequest:\n\t\t\t\trow := &result.Rows[k]\n\t\t\t\trow.Key = []byte(req.Key)\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\trow.Value = &req.Value\n\t\t\t\t}\n\t\t\tcase *roachpb.ConditionalPutRequest:\n\t\t\t\trow := &result.Rows[k]\n\t\t\t\trow.Key = []byte(req.Key)\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\trow.Value = &req.Value\n\t\t\t\t}\n\t\t\tcase *roachpb.InitPutRequest:\n\t\t\t\trow := &result.Rows[k]\n\t\t\t\trow.Key = []byte(req.Key)\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\trow.Value = &req.Value\n\t\t\t\t}\n\t\t\tcase *roachpb.IncrementRequest:\n\t\t\t\trow := &result.Rows[k]\n\t\t\t\trow.Key = []byte(req.Key)\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\tt := reply.(*roachpb.IncrementResponse)\n\t\t\t\t\trow.Value = &roachpb.Value{}\n\t\t\t\t\trow.Value.SetInt(t.NewValue)\n\t\t\t\t}\n\t\t\tcase *roachpb.ScanRequest:\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\tt := reply.(*roachpb.ScanResponse)\n\t\t\t\t\tresult.Rows = make([]KeyValue, len(t.Rows))\n\t\t\t\t\tfor j := range t.Rows {\n\t\t\t\t\t\tsrc := &t.Rows[j]\n\t\t\t\t\t\tdst := &result.Rows[j]\n\t\t\t\t\t\tdst.Key = src.Key\n\t\t\t\t\t\tdst.Value = &src.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase *roachpb.ReverseScanRequest:\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\tt := reply.(*roachpb.ReverseScanResponse)\n\t\t\t\t\tresult.Rows = make([]KeyValue, len(t.Rows))\n\t\t\t\t\tfor j := range t.Rows {\n\t\t\t\t\t\tsrc := &t.Rows[j]\n\t\t\t\t\t\tdst := &result.Rows[j]\n\t\t\t\t\t\tdst.Key = src.Key\n\t\t\t\t\t\tdst.Value = &src.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase *roachpb.DeleteRequest:\n\t\t\t\trow := &result.Rows[k]\n\t\t\t\trow.Key = []byte(args.(*roachpb.DeleteRequest).Key)\n\n\t\t\tcase *roachpb.DeleteRangeRequest:\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\tresult.Keys = reply.(*roachpb.DeleteRangeResponse).Keys\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\tif result.Err == nil {\n\t\t\t\t\tresult.Err = errors.Errorf(\"unsupported reply: %T for %T\",\n\t\t\t\t\t\treply, args)\n\t\t\t\t}\n\n\t\t\t\t// Nothing to do for all methods below as they do not generate\n\t\t\t\t// any rows.\n\t\t\tcase *roachpb.BeginTransactionRequest:\n\t\t\tcase *roachpb.EndTransactionRequest:\n\t\t\tcase *roachpb.AdminMergeRequest:\n\t\t\tcase *roachpb.AdminSplitRequest:\n\t\t\tcase *roachpb.AdminTransferLeaseRequest:\n\t\t\tcase *roachpb.HeartbeatTxnRequest:\n\t\t\tcase *roachpb.GCRequest:\n\t\t\tcase *roachpb.PushTxnRequest:\n\t\t\tcase *roachpb.RangeLookupRequest:\n\t\t\tcase *roachpb.ResolveIntentRequest:\n\t\t\tcase *roachpb.ResolveIntentRangeRequest:\n\t\t\tcase *roachpb.MergeRequest:\n\t\t\tcase *roachpb.TruncateLogRequest:\n\t\t\tcase *roachpb.RequestLeaseRequest:\n\t\t\tcase *roachpb.CheckConsistencyRequest:\n\t\t\tcase *roachpb.ChangeFrozenRequest:\n\t\t\t}\n\t\t\t// Fill up the resume span.\n\t\t\tif result.Err == nil && reply != nil && reply.Header().ResumeSpan != nil {\n\t\t\t\tresult.ResumeSpan = *reply.Header().ResumeSpan\n\t\t\t}\n\t\t}\n\t\toffset += result.calls\n\t}\n\n\tfor i := range b.Results {\n\t\tresult := &b.Results[i]\n\t\tif result.Err != nil {\n\t\t\treturn result.Err\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *resultGroup) Flush(ctx context.Context) error {\n\tif err := r.conn.flush(true /* forceSend */); err != nil {\n\t\treturn err\n\t}\n\t// hasSentResults is relative to the Flush() point, so we reset it here.\n\tr.state.hasSentResults = false\n\treturn nil\n}",
"func (m *MapReduceJob) processFill(results [][]interface{}) [][]interface{} {\n\t// don't do anything if we're supposed to leave the nulls\n\tif m.stmt.Fill == NullFill {\n\t\treturn results\n\t}\n\n\tif m.stmt.Fill == NoFill {\n\t\t// remove any rows that have even one nil value. This one is tricky because they could have multiple\n\t\t// aggregates, but this option means that any row that has even one nil gets purged.\n\t\tnewResults := make([][]interface{}, 0, len(results))\n\t\tfor _, vals := range results {\n\t\t\thasNil := false\n\t\t\t// start at 1 because the first value is always time\n\t\t\tfor j := 1; j < len(vals); j++ {\n\t\t\t\tif vals[j] == nil {\n\t\t\t\t\thasNil = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !hasNil {\n\t\t\t\tnewResults = append(newResults, vals)\n\t\t\t}\n\t\t}\n\t\treturn newResults\n\t}\n\n\t// they're either filling with previous values or a specific number\n\tfor i, vals := range results {\n\t\t// start at 1 because the first value is always time\n\t\tfor j := 1; j < len(vals); j++ {\n\t\t\tif vals[j] == nil {\n\t\t\t\tswitch m.stmt.Fill {\n\t\t\t\tcase PreviousFill:\n\t\t\t\t\tif i != 0 {\n\t\t\t\t\t\tvals[j] = results[i-1][j]\n\t\t\t\t\t}\n\t\t\t\tcase NumberFill:\n\t\t\t\t\tvals[j] = m.stmt.FillValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn results\n}",
"func (rset *Rset) Exhaust() {\n\tif rset == nil {\n\t\treturn\n\t}\n\tif !rset.IsOpen() {\n\t\treturn\n\t}\n\tfor {\n\t\terr := rset.beginRow()\n\t\trset.endRow()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func fetchRows(iter *iterator, dst interface{}) error {\n\tvar err error\n\trows := iter.cursor\n\tdefer rows.Close()\n\n\t// Destination.\n\tdstv := reflect.ValueOf(dst)\n\n\tif dstv.IsNil() || dstv.Kind() != reflect.Ptr {\n\t\treturn ErrExpectingPointer\n\t}\n\n\tif dstv.Elem().Kind() != reflect.Slice {\n\t\treturn ErrExpectingSlicePointer\n\t}\n\n\tif dstv.Kind() != reflect.Ptr || dstv.Elem().Kind() != reflect.Slice || dstv.IsNil() {\n\t\treturn ErrExpectingSliceMapStruct\n\t}\n\n\tvar columns []string\n\tif columns, err = rows.Columns(); err != nil {\n\t\treturn err\n\t}\n\n\tslicev := dstv.Elem()\n\titemT := slicev.Type().Elem()\n\n\treset(dst)\n\n\tfor rows.Next() {\n\t\titem, err := fetchResult(iter, itemT, columns)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif itemT.Kind() == reflect.Ptr {\n\t\t\tslicev = reflect.Append(slicev, item)\n\t\t} else {\n\t\t\tslicev = reflect.Append(slicev, reflect.Indirect(item))\n\t\t}\n\t}\n\n\tdstv.Elem().Set(slicev)\n\n\treturn rows.Err()\n}",
"func (m *mSqlDBMockQuery) Set(f func(ctx context.Context, query string, args ...interface{}) (s1 sqlRows, err error)) *SqlDBMock {\n\tif m.defaultExpectation != nil {\n\t\tm.mock.t.Fatalf(\"Default expectation is already set for the sqlDB.Query method\")\n\t}\n\n\tif len(m.expectations) > 0 {\n\t\tm.mock.t.Fatalf(\"Some expectations are already set for the sqlDB.Query method\")\n\t}\n\n\tm.mock.funcQuery = f\n\treturn m.mock\n}",
"func UA_QueryDataSet_init(p []UA_QueryDataSet) {\n\tnoarch.Memset(p, byte(0), 304)\n}",
"func (d *DB) QueryPrimitive(ctx context.Context, statement string, _ string, args ...interface{}) (connection.ResultFetch, error) {\n\tvar rows pgx.Rows\n\tvar err error\n\tvar connQ func(context.Context, string, ...interface{}) (pgx.Rows, error)\n\tif d.tx != nil {\n\t\tconnQ = d.tx.Query\n\t} else if d.conn != nil {\n\t\tconnQ = d.conn.Query\n\t} else {\n\t\treturn nil, gaumErrors.NoDB\n\t}\n\n\tif len(args) != 0 {\n\t\trows, err = connQ(ctx, statement, args...)\n\t} else {\n\t\trows, err = connQ(ctx, statement)\n\t}\n\tif err != nil {\n\t\treturn func(interface{}) error { return nil },\n\t\t\terrors.Wrap(err, \"querying database\")\n\t}\n\treturn func(destination interface{}) error {\n\t\tif reflect.TypeOf(destination).Kind() != reflect.Ptr {\n\t\t\treturn errors.Errorf(\"the passed receiver is not a pointer, connection is still open\")\n\t\t}\n\t\t// TODO add a timer that closes rows if nothing is done.\n\t\tdefer rows.Close()\n\t\tvar err error\n\t\treflect.ValueOf(destination).Elem().Set(reflect.MakeSlice(reflect.TypeOf(destination).Elem(), 0, 0))\n\n\t\t// Obtain the actual slice\n\t\tdestinationSlice := reflect.ValueOf(destination).Elem()\n\n\t\t// If this is not Ptr->Slice->Type it would have failed already.\n\t\ttod := reflect.TypeOf(destination).Elem().Elem()\n\n\t\tfor rows.Next() {\n\t\t\t// Get a New ptr to the object of the type of the slice.\n\t\t\tnewElemPtr := reflect.New(tod)\n\n\t\t\t// Try to fetch the data\n\t\t\terr = rows.Scan(newElemPtr.Interface())\n\t\t\tif err != nil {\n\t\t\t\trows.Close()\n\t\t\t\treturn errors.Wrap(err, \"scanning values into recipient, connection was closed\")\n\t\t\t}\n\t\t\t// Add to the passed slice, this will actually add to an already populated slice if one\n\t\t\t// passed, how cool is that?\n\t\t\tdestinationSlice.Set(reflect.Append(destinationSlice, newElemPtr.Elem()))\n\t\t}\n\t\treturn rows.Err()\n\t}, nil\n}",
"func (cc *Conn) FlushSet(s *Set) {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tdata := cc.marshalAttr([]netlink.Attribute{\n\t\t{Type: unix.NFTA_SET_TABLE, Data: []byte(s.Table.Name + \"\\x00\")},\n\t\t{Type: unix.NFTA_SET_NAME, Data: []byte(s.Name + \"\\x00\")},\n\t})\n\tcc.messages = append(cc.messages, netlink.Message{\n\t\tHeader: netlink.Header{\n\t\t\tType: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_DELSETELEM),\n\t\t\tFlags: netlink.Request | netlink.Acknowledge,\n\t\t},\n\t\tData: append(extraHeader(uint8(s.Table.Family), 0), data...),\n\t})\n}",
"func (c *Cursor) All(result interface{}) error {\n\tresultv := reflect.ValueOf(result)\n\tif resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice {\n\t\tpanic(\"result argument must be a slice address\")\n\t}\n\tslicev := resultv.Elem()\n\tslicev = slicev.Slice(0, slicev.Cap())\n\telemt := slicev.Type().Elem()\n\ti := 0\n\tfor {\n\t\tif slicev.Len() == i {\n\t\t\telemp := reflect.New(elemt)\n\t\t\tif !c.Next(elemp.Interface()) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tslicev = reflect.Append(slicev, elemp.Elem())\n\t\t\tslicev = slicev.Slice(0, slicev.Cap())\n\t\t} else {\n\t\t\tif !c.Next(slicev.Index(i).Addr().Interface()) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ti++\n\t}\n\tresultv.Elem().Set(slicev.Slice(0, i))\n\n\tif err := c.Err(); err != nil {\n\t\tc.Close()\n\t\treturn err\n\t}\n\n\tif err := c.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the field from the row index and named column.
|
func (t *Buffer) Get(rowIndex int, columnName string) interface{} {
i, ok := t.columnNameIndex[columnName]
if !ok {
panic(&IndexError{subject: indexErrorName, notFoundName: columnName})
}
if len(t.Rows) <= rowIndex {
panic(&IndexError{subject: indexErrorRow, length: len(t.Rows), requested: rowIndex})
}
return t.Rows[rowIndex].Field[i]
}
|
[
"func (r *Row) FieldByIndex(index int) *Field {\n\tif index > len(r.fieldList) {\n\t\treturn nil\n\t}\n\treturn r.fieldList[index]\n}",
"func (r *Row) FieldByName(name string) *Field {\n\tif f, ok := r.fieldMap[name]; ok {\n\t\treturn f\n\t}\n\treturn nil\n}",
"func (r RowCollection) fieldIndex(fieldName string) (int, error) {\n\tif len(r) == 0 {\n\t\treturn -1, errors.New(errEmptyDataSet)\n\t}\n\tfld := r[0].FieldByName(fieldName)\n\tif fld == nil {\n\t\treturn -1, errors.New(errNoSuchField)\n\t}\n\treturn fld.Index(), nil\n}",
"func (v *VuFindBibliographicIndex) Field(name string) (*dynamicField, error) {\n\tfor _, field := range v.fields {\n\t\tif field.name == name {\n\t\t\treturn field, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no such field: %s\", name)\n}",
"func (t *Table) FieldName(name string) (string, error) {\n\tif t.columnsByName == nil {\n\t\tt.columnsByName = make(map[string]Column)\n\t}\n\tif column, ok := t.columnsByName[name]; ok {\n\t\treturn column.Field(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"no mapping for column %q\", name)\n}",
"func (f FieldPath) GetColumn(batch arrow.Record) (arrow.Array, error) {\n\treturn f.getArray(batch.Columns())\n}",
"func (tp *Template) GetFieldAt(i int) (fld interface{}) {\n\tif tp != nil {\n\t\tfld = (*tp).Flds[i]\n\t}\n\n\treturn fld\n}",
"func (sv *structValue) GetField(name string) (*Field, bool) {\n\tfield, found := sv.fieldMap[name]\n\treturn field, found\n}",
"func (t Table) GetFieldID(idx int) string {\n\treturn reflect.ValueOf(t.Elems).Elem().Index(idx).FieldByName(\"Fields\").FieldByName(\"ID\").String()\n}",
"func (h Helper) GetFieldByIndex(index int, spec *ast.TypeSpec) *ast.Field {\n\tif !h.IsStruct(spec.Type) {\n\t\treturn nil\n\t}\n\n\tsType := h.GetStructType(spec.Type)\n\tif index < 0 || index >= len(sType.Fields.List) {\n\t\treturn nil\n\t}\n\n\treturn sType.Fields.List[index]\n}",
"func (s *StructType) Field(i int) (field *StructField) {\n\tif i < 0 || i >= len(s.fields) {\n\t\tpanic(\"aster: Field index out of bounds\")\n\t}\n\treturn s.fields[i]\n}",
"func GetFieldValue(validatedFieldPosition int, board *Board) string {\n\trow, col := GetBoardRowAndCol(validatedFieldPosition, board)\n\n\treturn board.fields[row][col]\n}",
"func (v Value) Column() int { return int(^v.columnIndex) }",
"func (txt *Txt) GetField(fieldID) (storage.Field, bool) {\n\n}",
"func (t *StructValue) FieldByName(name string) Value {\n\tif f, ok := t.Type().(*StructType).FieldByName(name); ok {\n\t\treturn t.FieldByIndex(f.Index)\n\t}\n\treturn nil;\n}",
"func (strct *Struct) GetField(name string) *Field {\n\tfor _, f := range strct.fields {\n\t\tif f.name == name {\n\t\t\treturn f\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *Matrix) GetColumn(index int) (array []float64) {\n\tarray = make([]float64, m.Column)\n\tfor i, r := range m.raw {\n\t\tarray[i] = r[index]\n\t}\n\treturn\n}",
"func (ci *Column[T]) Get(entry *T) reflect.Value {\n\tif entry == nil {\n\t\treturn reflect.Zero(ci.Type())\n\t}\n\tv := reflect.ValueOf(entry)\n\tif ci.Extractor != nil {\n\t\treturn reflect.ValueOf(ci.Extractor(v.Interface().(*T)))\n\t}\n\treturn ci.getRawField(v)\n}",
"func getFieldNameValueFromRawLineWithIndexMap(fieldName, line string, indexMap map[string]int) string {\n\tkey := strings.ToLower(fieldName)\n\tbegin, ok := indexMap[key]\n\tif !ok || begin == -1 {\n\t\treturn \"\"\n\t}\n\n\tif begin >= len(line) {\n\t\treturn \"\"\n\t}\n\n\treturn strings.Fields(line[begin:])[0]\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
UseTPM12LengthPrefixSize makes Pack/Unpack use TPM 1.2 encoding for byte arrays.
|
func UseTPM12LengthPrefixSize() {
lengthPrefixSize = tpm12PrefixSize
}
|
[
"func UseTPM20LengthPrefixSize() {\n\tlengthPrefixSize = tpm20PrefixSize\n}",
"func (b *U32Bytes) TPMUnmarshal(in io.Reader) error {\n\tvar tmpSize uint32\n\tif err := binary.Read(in, binary.BigEndian, &tmpSize); err != nil {\n\t\treturn err\n\t}\n\n\tif tmpSize > maxBytesBufferSize {\n\t\treturn bytes.ErrTooLarge\n\t}\n\t// We can now safely cast to an int on 32-bit or 64-bit machines\n\tsize := int(tmpSize)\n\n\tif len(*b) >= size {\n\t\t*b = (*b)[:size]\n\t} else {\n\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t}\n\n\tn, err := in.Read(*b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != size {\n\t\treturn fmt.Errorf(\"unable to read all contents in to U32Bytes\")\n\t}\n\treturn nil\n}",
"func (b *U16Bytes) TPMUnmarshal(in io.Reader) error {\n\tvar tmpSize uint16\n\tif err := binary.Read(in, binary.BigEndian, &tmpSize); err != nil {\n\t\treturn err\n\t}\n\tsize := int(tmpSize)\n\n\tif len(*b) >= size {\n\t\t*b = (*b)[:size]\n\t} else {\n\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t}\n\n\tn, err := in.Read(*b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != size {\n\t\treturn io.ErrUnexpectedEOF\n\t}\n\treturn nil\n}",
"func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)",
"func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)",
"func TestPasswordGenerationWithCustomLength(t *testing.T) {\n\tlength := 15\n\tstringifyLength := strconv.Itoa(length)\n\tresponse := decodeResponse(Handle([]byte(\"{\\\"length\\\":\" + stringifyLength + \"}\")))\n\n\tvalidateResponseCode(t, http.StatusOK, response.Code)\n\tvalidatePassword(t, response.Password, length)\n}",
"func TestEnsurePrivateKeySizePadsLessThanRequiredSizeArrays(t *testing.T) {\n\tshortByteArray := make([]byte, ecdsaPrivateKeySize/2)\n\tfor i := range shortByteArray {\n\t\tshortByteArray[i] = byte(1)\n\t}\n\n\texpected := append(\n\t\tmake([]byte, ecdsaPrivateKeySize-ecdsaPrivateKeySize/2),\n\t\tshortByteArray...)\n\n\tresult := EnsurePrivateKeySize(shortByteArray)\n\trequire.True(t, reflect.DeepEqual(expected, result))\n}",
"func NewPMT(b []byte) (PMT, error) {\n\tminsize := 16\n\tif len(b) < minsize {\n\t\treturn nil, ErrTooShort\n\t}\n\treturn PMT(b), nil\n}",
"func UnpackBuf(buf io.Reader, elts ...interface{}) error {\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tk := v.Kind()\n\t\tif k != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"all values passed to Unpack must be pointers, got %v\", k)\n\t\t}\n\n\t\tif v.IsNil() {\n\t\t\treturn errors.New(\"can't fill a nil pointer\")\n\t\t}\n\n\t\tiv := reflect.Indirect(v)\n\t\tswitch iv.Kind() {\n\t\tcase reflect.Struct:\n\t\t\t// Decompose the struct and copy over the values.\n\t\t\tfor i := 0; i < iv.NumField(); i++ {\n\t\t\t\tif err := UnpackBuf(buf, iv.Field(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tvar size int\n\t\t\t_, isHandles := e.(*[]Handle)\n\n\t\t\tswitch {\n\t\t\t// []Handle always uses 2-byte length, even with TPM 1.2.\n\t\t\tcase isHandles:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 2.0\n\t\t\tcase lengthPrefixSize == tpm20PrefixSize:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 1.2\n\t\t\tcase lengthPrefixSize == tpm12PrefixSize:\n\t\t\t\tvar tmpSize uint32\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"lengthPrefixSize is %d, must be either 2 or 4\", lengthPrefixSize)\n\t\t\t}\n\n\t\t\t// A zero size is used by the TPM to signal that certain elements\n\t\t\t// are not present.\n\t\t\tif size == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Make len(e) match size exactly.\n\t\t\tswitch b := e.(type) {\n\t\t\tcase *[]byte:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t\t\t\t}\n\t\t\tcase *[]Handle:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]Handle, size-len(*b))...)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"can't fill pointer to %T, only []byte or []Handle slices\", e)\n\t\t\t}\n\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func Test16TPMPresent() (bool, error) {\n\tif tpm12Connection != nil {\n\t\tvid, err := tpm1.GetManufacturer(*tpm12Connection)\n\n\t\treturn vid != nil && err == nil, nil\n\t} else if tpm20Connection != nil {\n\t\tca, _, err := tpm2.GetCapability(*tpm20Connection, tpm2.CapabilityTPMProperties, 1, uint32(tpm2.Manufacturer))\n\n\t\treturn ca != nil && err == nil, nil\n\t} else {\n\t\treturn false, fmt.Errorf(\"No TPM connection\")\n\t}\n}",
"func TestGetBlockTxns(t *testing.T) {\n\tpver := ProtocolVersion\n\tenc := BaseEncoding\n\n\t// Ensure the command is expected value.\n\twantCmd := \"getblocktxn\"\n\tmsg := NewMsgGetBlockTxns(chainhash.Hash{}, nil)\n\tif cmd := msg.Command(); cmd != wantCmd {\n\t\tt.Errorf(\"NewMsgGetBlockTxns: wrong command - got %v want %v\",\n\t\t\tcmd, wantCmd)\n\t}\n\n\t// Ensure max payload is expected value.\n\twantPayload := maxMessagePayload()\n\tmaxPayload := msg.MaxPayloadLength(pver)\n\tif maxPayload != wantPayload {\n\t\tt.Errorf(\"MaxPayloadLength: wrong max payload length for \"+\n\t\t\t\"protocol version %d - got %v, want %v\", pver,\n\t\t\tmaxPayload, wantPayload)\n\t}\n\n\t// Test encode with latest protocol version.\n\tvar buf bytes.Buffer\n\terr := msg.BchEncode(&buf, pver, enc)\n\tif err != nil {\n\t\tt.Errorf(\"encode of MsgGetBlockTxns failed %v err <%v>\", msg,\n\t\t\terr)\n\t}\n\n\t// Older protocol versions should fail encode since message didn't\n\t// exist yet.\n\toldPver := BIP0152Version - 1\n\terr = msg.BchEncode(&buf, oldPver, enc)\n\tif err == nil {\n\t\ts := \"encode of MsgGetBlockTxns passed for old protocol \" +\n\t\t\t\"version %v err <%v>\"\n\t\tt.Errorf(s, msg, err)\n\t}\n\n\t// Test decode with latest protocol version.\n\treadmsg := MsgGetBlockTxns{}\n\terr = readmsg.BchDecode(&buf, pver, enc)\n\tif err != nil {\n\t\tt.Errorf(\"decode of MsgGetBlockTxns failed [%v] err <%v>\", buf,\n\t\t\terr)\n\t}\n\n\t// Older protocol versions should fail decode since message didn't\n\t// exist yet.\n\terr = readmsg.BchDecode(&buf, oldPver, enc)\n\tif err == nil {\n\t\ts := \"decode of MsgGetBlockTxns passed for old protocol \" +\n\t\t\t\"version %v err <%v>\"\n\t\tt.Errorf(s, msg, err)\n\t}\n}",
"func TestRequiredPackets(t *testing.T) {\n\tt.Parallel()\n\t// Initialize the cipher.\n\tkey := fastrand.Bytes(X25519KeyLen)\n\taead, err := chacha20poly1305.New(key[:])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Declare some helper vars.\n\tpacketSize := uint32(1440)\n\toverhead := marshaledFrameHeaderSize + uint32(2*(aead.NonceSize()+aead.Overhead()))\n\n\t// Prepare tests.\n\ttests := []struct {\n\t\tpayloadLen uint32\n\t\trequiredPackets uint32\n\t\tleftoverBytes uint32\n\t}{\n\t\t// empty payload\n\t\t{\n\t\t\tpayloadLen: 0,\n\t\t\trequiredPackets: 1,\n\t\t\tleftoverBytes: 1374,\n\t\t},\n\t\t// full packet\n\t\t{\n\t\t\tpayloadLen: packetSize - overhead,\n\t\t\trequiredPackets: 1,\n\t\t\tleftoverBytes: 0,\n\t\t},\n\t\t// full packet + 1\n\t\t{\n\t\t\tpayloadLen: packetSize - overhead + 1,\n\t\t\trequiredPackets: 2,\n\t\t\tleftoverBytes: 1439,\n\t\t},\n\t\t// 2 * full payload\n\t\t{\n\t\t\tpayloadLen: 2*packetSize - overhead,\n\t\t\trequiredPackets: 2,\n\t\t\tleftoverBytes: 0,\n\t\t},\n\t\t// 2 * full payload + 1\n\t\t{\n\t\t\tpayloadLen: 2*packetSize - overhead + 1,\n\t\t\trequiredPackets: 3,\n\t\t\tleftoverBytes: 1439,\n\t\t},\n\t}\n\n\t// Run tests.\n\tfor _, test := range tests {\n\t\trp, lb := requiredPackets(test.payloadLen, packetSize, aead)\n\t\tif rp != test.requiredPackets {\n\t\t\tt.Errorf(\"%v != %v\", rp, test.requiredPackets)\n\t\t}\n\t\tif lb != test.leftoverBytes {\n\t\t\tt.Errorf(\"%v != %v\", lb, test.leftoverBytes)\n\t\t}\n\t}\n}",
"func fiat_poly1305_msat(out1 *[6]uint32) {\n out1[0] = 0xfffffffb\n out1[1] = 0xffffffff\n out1[2] = 0xffffffff\n out1[3] = 0xffffffff\n out1[4] = 0x3\n out1[5] = uint32(0x0)\n}",
"func binaryTpFromContext(ctx context.Context) []byte {\n\tsc := trace.SpanContextFromContext(ctx)\n\ttpBytes := make([]byte, 0, 26)\n\n\t// the otel spec says 16 bytes for trace id and 8 for spans are good enough\n\t// for everyone copy them into a []byte that we can deliver over option43\n\ttid := [16]byte(sc.TraceID()) // type TraceID [16]byte\n\tsid := [8]byte(sc.SpanID()) // type SpanID [8]byte\n\n\ttpBytes = append(tpBytes, 0x00) // traceparent version\n\ttpBytes = append(tpBytes, tid[:]...) // trace id\n\ttpBytes = append(tpBytes, sid[:]...) // span id\n\tif sc.IsSampled() {\n\t\ttpBytes = append(tpBytes, 0x01) // trace flags\n\t} else {\n\t\ttpBytes = append(tpBytes, 0x00)\n\t}\n\n\treturn tpBytes\n}",
"func packType(buf io.Writer, elts ...interface{}) error {\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tswitch v.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif err := packType(buf, reflect.Indirect(v).Interface()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\t\tif err := packType(buf, v.Field(i).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tswitch s := e.(type) {\n\t\t\tcase []byte:\n\t\t\t\tswitch lengthPrefixSize {\n\t\t\t\tcase tpm20PrefixSize:\n\t\t\t\t\tif err := binary.Write(buf, binary.BigEndian, uint16(len(s))); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tcase tpm12PrefixSize:\n\t\t\t\t\tif err := binary.Write(buf, binary.BigEndian, uint32(len(s))); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"lengthPrefixSize is %d, must be either 2 or 4\", lengthPrefixSize)\n\t\t\t\t}\n\t\t\t\tif err := binary.Write(buf, binary.BigEndian, s); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase RawBytes:\n\t\t\t\tif err := binary.Write(buf, binary.BigEndian, s); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"only []byte and RawBytes slices are supported, got %T\", e)\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := binary.Write(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func GetSizePrefixedBufferIdentifier(buf []byte) string {\n\treturn string(buf[SizeUOffsetT+sizePrefixLength:][:fileIdentifierLength])\n}",
"func FixedLengthRandomPasswordBytes() []byte {\n\treturn RandomBytes(13)\n}",
"func (t T) FixedLength() int {\n\tswitch t {\n\tcase T_any:\n\t\treturn 0\n\tcase T_int8, T_uint8, T_bool:\n\t\treturn 1\n\tcase T_int16, T_uint16:\n\t\treturn 2\n\tcase T_int32, T_uint32, T_date, T_float32:\n\t\treturn 4\n\tcase T_int64, T_uint64, T_datetime, T_time, T_float64, T_timestamp:\n\t\treturn 8\n\tcase T_decimal64:\n\t\treturn 8\n\tcase T_decimal128:\n\t\treturn 16\n\tcase T_decimal256:\n\t\treturn 32\n\tcase T_uuid:\n\t\treturn 16\n\tcase T_TS:\n\t\treturn TxnTsSize\n\tcase T_Rowid:\n\t\treturn RowidSize\n\tcase T_Blockid:\n\t\treturn BlockidSize\n\tcase T_char, T_varchar, T_blob, T_json, T_text, T_binary, T_varbinary, T_array_float32, T_array_float64:\n\t\treturn -24\n\tcase T_enum:\n\t\treturn 2\n\t}\n\tpanic(moerr.NewInternalErrorNoCtx(fmt.Sprintf(\"unknown type %d\", t)))\n}",
"func TestEncodeMsgpackWorks(t *testing.T) {\n\tenc, err := encodeMsgpack(map[string]int{\n\t\t\"foo\": 1,\n\t})\n\tassert.NoError(t, err)\n\n\t// `fixmap` of one item, `fixstr` of three characters, `fixnum` of 1.\n\tassert.Equal(t, enc, []byte{0x81, 0xa3, 0x66, 0x6f, 0x6f, 1})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
UseTPM20LengthPrefixSize makes Pack/Unpack use TPM 2.0 encoding for byte arrays.
|
func UseTPM20LengthPrefixSize() {
lengthPrefixSize = tpm20PrefixSize
}
|
[
"func UseTPM12LengthPrefixSize() {\n\tlengthPrefixSize = tpm12PrefixSize\n}",
"func TestEnsurePrivateKeySizePadsLessThanRequiredSizeArrays(t *testing.T) {\n\tshortByteArray := make([]byte, ecdsaPrivateKeySize/2)\n\tfor i := range shortByteArray {\n\t\tshortByteArray[i] = byte(1)\n\t}\n\n\texpected := append(\n\t\tmake([]byte, ecdsaPrivateKeySize-ecdsaPrivateKeySize/2),\n\t\tshortByteArray...)\n\n\tresult := EnsurePrivateKeySize(shortByteArray)\n\trequire.True(t, reflect.DeepEqual(expected, result))\n}",
"func (b *U16Bytes) TPMUnmarshal(in io.Reader) error {\n\tvar tmpSize uint16\n\tif err := binary.Read(in, binary.BigEndian, &tmpSize); err != nil {\n\t\treturn err\n\t}\n\tsize := int(tmpSize)\n\n\tif len(*b) >= size {\n\t\t*b = (*b)[:size]\n\t} else {\n\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t}\n\n\tn, err := in.Read(*b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != size {\n\t\treturn io.ErrUnexpectedEOF\n\t}\n\treturn nil\n}",
"func (b *U32Bytes) TPMUnmarshal(in io.Reader) error {\n\tvar tmpSize uint32\n\tif err := binary.Read(in, binary.BigEndian, &tmpSize); err != nil {\n\t\treturn err\n\t}\n\n\tif tmpSize > maxBytesBufferSize {\n\t\treturn bytes.ErrTooLarge\n\t}\n\t// We can now safely cast to an int on 32-bit or 64-bit machines\n\tsize := int(tmpSize)\n\n\tif len(*b) >= size {\n\t\t*b = (*b)[:size]\n\t} else {\n\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t}\n\n\tn, err := in.Read(*b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != size {\n\t\treturn fmt.Errorf(\"unable to read all contents in to U32Bytes\")\n\t}\n\treturn nil\n}",
"func TestPasswordGenerationWithCustomLength(t *testing.T) {\n\tlength := 15\n\tstringifyLength := strconv.Itoa(length)\n\tresponse := decodeResponse(Handle([]byte(\"{\\\"length\\\":\" + stringifyLength + \"}\")))\n\n\tvalidateResponseCode(t, http.StatusOK, response.Code)\n\tvalidatePassword(t, response.Password, length)\n}",
"func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)",
"func TestGetBlockTxns(t *testing.T) {\n\tpver := ProtocolVersion\n\tenc := BaseEncoding\n\n\t// Ensure the command is expected value.\n\twantCmd := \"getblocktxn\"\n\tmsg := NewMsgGetBlockTxns(chainhash.Hash{}, nil)\n\tif cmd := msg.Command(); cmd != wantCmd {\n\t\tt.Errorf(\"NewMsgGetBlockTxns: wrong command - got %v want %v\",\n\t\t\tcmd, wantCmd)\n\t}\n\n\t// Ensure max payload is expected value.\n\twantPayload := maxMessagePayload()\n\tmaxPayload := msg.MaxPayloadLength(pver)\n\tif maxPayload != wantPayload {\n\t\tt.Errorf(\"MaxPayloadLength: wrong max payload length for \"+\n\t\t\t\"protocol version %d - got %v, want %v\", pver,\n\t\t\tmaxPayload, wantPayload)\n\t}\n\n\t// Test encode with latest protocol version.\n\tvar buf bytes.Buffer\n\terr := msg.BchEncode(&buf, pver, enc)\n\tif err != nil {\n\t\tt.Errorf(\"encode of MsgGetBlockTxns failed %v err <%v>\", msg,\n\t\t\terr)\n\t}\n\n\t// Older protocol versions should fail encode since message didn't\n\t// exist yet.\n\toldPver := BIP0152Version - 1\n\terr = msg.BchEncode(&buf, oldPver, enc)\n\tif err == nil {\n\t\ts := \"encode of MsgGetBlockTxns passed for old protocol \" +\n\t\t\t\"version %v err <%v>\"\n\t\tt.Errorf(s, msg, err)\n\t}\n\n\t// Test decode with latest protocol version.\n\treadmsg := MsgGetBlockTxns{}\n\terr = readmsg.BchDecode(&buf, pver, enc)\n\tif err != nil {\n\t\tt.Errorf(\"decode of MsgGetBlockTxns failed [%v] err <%v>\", buf,\n\t\t\terr)\n\t}\n\n\t// Older protocol versions should fail decode since message didn't\n\t// exist yet.\n\terr = readmsg.BchDecode(&buf, oldPver, enc)\n\tif err == nil {\n\t\ts := \"decode of MsgGetBlockTxns passed for old protocol \" +\n\t\t\t\"version %v err <%v>\"\n\t\tt.Errorf(s, msg, err)\n\t}\n}",
"func Test16TPMPresent() (bool, error) {\n\tif tpm12Connection != nil {\n\t\tvid, err := tpm1.GetManufacturer(*tpm12Connection)\n\n\t\treturn vid != nil && err == nil, nil\n\t} else if tpm20Connection != nil {\n\t\tca, _, err := tpm2.GetCapability(*tpm20Connection, tpm2.CapabilityTPMProperties, 1, uint32(tpm2.Manufacturer))\n\n\t\treturn ca != nil && err == nil, nil\n\t} else {\n\t\treturn false, fmt.Errorf(\"No TPM connection\")\n\t}\n}",
"func PrefixUnAppend000Length(b []byte) (under, remains []byte, err error) {\n\t// parse keyVersion\n\tprefixLen, err := strconv.ParseInt(string(b[:3]), 10, 64)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn b[3 : 3+prefixLen], b[3+prefixLen:], nil\n}",
"func UnpackBuf(buf io.Reader, elts ...interface{}) error {\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tk := v.Kind()\n\t\tif k != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"all values passed to Unpack must be pointers, got %v\", k)\n\t\t}\n\n\t\tif v.IsNil() {\n\t\t\treturn errors.New(\"can't fill a nil pointer\")\n\t\t}\n\n\t\tiv := reflect.Indirect(v)\n\t\tswitch iv.Kind() {\n\t\tcase reflect.Struct:\n\t\t\t// Decompose the struct and copy over the values.\n\t\t\tfor i := 0; i < iv.NumField(); i++ {\n\t\t\t\tif err := UnpackBuf(buf, iv.Field(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tvar size int\n\t\t\t_, isHandles := e.(*[]Handle)\n\n\t\t\tswitch {\n\t\t\t// []Handle always uses 2-byte length, even with TPM 1.2.\n\t\t\tcase isHandles:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 2.0\n\t\t\tcase lengthPrefixSize == tpm20PrefixSize:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 1.2\n\t\t\tcase lengthPrefixSize == tpm12PrefixSize:\n\t\t\t\tvar tmpSize uint32\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"lengthPrefixSize is %d, must be either 2 or 4\", lengthPrefixSize)\n\t\t\t}\n\n\t\t\t// A zero size is used by the TPM to signal that certain elements\n\t\t\t// are not present.\n\t\t\tif size == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Make len(e) match size exactly.\n\t\t\tswitch b := e.(type) {\n\t\t\tcase *[]byte:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t\t\t\t}\n\t\t\tcase *[]Handle:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]Handle, size-len(*b))...)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"can't fill pointer to %T, only []byte or []Handle slices\", e)\n\t\t\t}\n\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func TestRequiredPackets(t *testing.T) {\n\tt.Parallel()\n\t// Initialize the cipher.\n\tkey := fastrand.Bytes(X25519KeyLen)\n\taead, err := chacha20poly1305.New(key[:])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Declare some helper vars.\n\tpacketSize := uint32(1440)\n\toverhead := marshaledFrameHeaderSize + uint32(2*(aead.NonceSize()+aead.Overhead()))\n\n\t// Prepare tests.\n\ttests := []struct {\n\t\tpayloadLen uint32\n\t\trequiredPackets uint32\n\t\tleftoverBytes uint32\n\t}{\n\t\t// empty payload\n\t\t{\n\t\t\tpayloadLen: 0,\n\t\t\trequiredPackets: 1,\n\t\t\tleftoverBytes: 1374,\n\t\t},\n\t\t// full packet\n\t\t{\n\t\t\tpayloadLen: packetSize - overhead,\n\t\t\trequiredPackets: 1,\n\t\t\tleftoverBytes: 0,\n\t\t},\n\t\t// full packet + 1\n\t\t{\n\t\t\tpayloadLen: packetSize - overhead + 1,\n\t\t\trequiredPackets: 2,\n\t\t\tleftoverBytes: 1439,\n\t\t},\n\t\t// 2 * full payload\n\t\t{\n\t\t\tpayloadLen: 2*packetSize - overhead,\n\t\t\trequiredPackets: 2,\n\t\t\tleftoverBytes: 0,\n\t\t},\n\t\t// 2 * full payload + 1\n\t\t{\n\t\t\tpayloadLen: 2*packetSize - overhead + 1,\n\t\t\trequiredPackets: 3,\n\t\t\tleftoverBytes: 1439,\n\t\t},\n\t}\n\n\t// Run tests.\n\tfor _, test := range tests {\n\t\trp, lb := requiredPackets(test.payloadLen, packetSize, aead)\n\t\tif rp != test.requiredPackets {\n\t\t\tt.Errorf(\"%v != %v\", rp, test.requiredPackets)\n\t\t}\n\t\tif lb != test.leftoverBytes {\n\t\t\tt.Errorf(\"%v != %v\", lb, test.leftoverBytes)\n\t\t}\n\t}\n}",
"func NewPMT(b []byte) (PMT, error) {\n\tminsize := 16\n\tif len(b) < minsize {\n\t\treturn nil, ErrTooShort\n\t}\n\treturn PMT(b), nil\n}",
"func GetSizePrefixedRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) {\n\tn := GetUOffsetT(buf[offset+sizePrefixLength:])\n\tfb.Init(buf, n+offset+sizePrefixLength)\n}",
"func TestAll() {\n log.Println(\"Begin Crypto test\")\n \n bufflen := 128\n\n b := RandBytes(bufflen)\n if len(b) != bufflen {\n log.Fatal(\"nacl.RandBytes() failed length test\")\n }\n \n for n := 1 ; n < 16 ; n++ {\n key := GenSignKeypair()\n defer key.Free()\n testSign(n * 1024, key)\n }\n \n for n := 1 ; n < 16 ; n++ {\n key := GenSignKeypair()\n defer key.Free()\n testFucky(n * 1024, key)\n }\n \n for n := 1 ; n < 16 ; n++ {\n tokey := GenBoxKeypair()\n fromkey := GenBoxKeypair()\n defer tokey.Free()\n defer fromkey.Free()\n testBox(n * 1024, tokey, fromkey)\n }\n \n \n log.Println(\"Crypto Test Done\")\n}",
"func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)",
"func TestEncryptPrivateMetadata(t *testing.T) {\n\tc := MakeCryptoCommon(kbfscodec.NewMsgpack())\n\n\t_, tlfPrivateKey, _, _, cryptKey, err := c.MakeRandomTLFKeys()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tprivateMetadata := PrivateMetadata{\n\t\tTLFPrivateKey: tlfPrivateKey,\n\t}\n\texpectedEncodedPrivateMetadata, err := c.codec.Encode(privateMetadata)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tencryptedPrivateMetadata, err := c.EncryptPrivateMetadata(privateMetadata, cryptKey)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tencodedPrivateMetadata := checkSecretboxOpen(t, encryptedData(encryptedPrivateMetadata), cryptKey.Data())\n\n\tif string(encodedPrivateMetadata) != string(expectedEncodedPrivateMetadata) {\n\t\tt.Fatalf(\"Expected encoded data %v, got %v\", expectedEncodedPrivateMetadata, encodedPrivateMetadata)\n\t}\n}",
"func TestSecretboxEncryptedLen(t *testing.T) {\n\tc := MakeCryptoCommon(kbfscodec.NewMsgpack())\n\n\tconst startSize = 100\n\tconst endSize = 100000\n\tconst iterations = 5\n\n\t// Generating random data is slow, so do it all up-front and\n\t// index into it. Note that we're intentionally re-using most\n\t// of the data between iterations intentionally.\n\trandomData := make([]byte, endSize+iterations)\n\tif err := kbfscrypto.RandRead(randomData); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcryptKeys := make([]kbfscrypto.BlockCryptKey, iterations)\n\tfor j := 0; j < iterations; j++ {\n\t\tcryptKeys[j] = makeFakeBlockCryptKey(t)\n\t}\n\n\tfor i := startSize; i < endSize; i += 1000 {\n\t\tvar enclen int\n\t\tfor j := 0; j < iterations; j++ {\n\t\t\tdata := randomData[j : j+i]\n\t\t\tenc := secretboxSealEncoded(t, &c, data, cryptKeys[j].Data())\n\t\t\tif j == 0 {\n\t\t\t\tenclen = len(enc.EncryptedData)\n\t\t\t} else if len(enc.EncryptedData) != enclen {\n\t\t\t\tt.Errorf(\"encrypted data len: %d, expected %d\", len(enc.EncryptedData), enclen)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (t T) FixedLength() int {\n\tswitch t {\n\tcase T_any:\n\t\treturn 0\n\tcase T_int8, T_uint8, T_bool:\n\t\treturn 1\n\tcase T_int16, T_uint16:\n\t\treturn 2\n\tcase T_int32, T_uint32, T_date, T_float32:\n\t\treturn 4\n\tcase T_int64, T_uint64, T_datetime, T_time, T_float64, T_timestamp:\n\t\treturn 8\n\tcase T_decimal64:\n\t\treturn 8\n\tcase T_decimal128:\n\t\treturn 16\n\tcase T_decimal256:\n\t\treturn 32\n\tcase T_uuid:\n\t\treturn 16\n\tcase T_TS:\n\t\treturn TxnTsSize\n\tcase T_Rowid:\n\t\treturn RowidSize\n\tcase T_Blockid:\n\t\treturn BlockidSize\n\tcase T_char, T_varchar, T_blob, T_json, T_text, T_binary, T_varbinary, T_array_float32, T_array_float64:\n\t\treturn -24\n\tcase T_enum:\n\t\treturn 2\n\t}\n\tpanic(moerr.NewInternalErrorNoCtx(fmt.Sprintf(\"unknown type %d\", t)))\n}",
"func CryptoBoxPrivKeySize() int {\n return int(C.crypto_box_secretkeybytes())\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
packedSize computes the size of a sequence of types that can be passed to binary.Read or binary.Write.
|
func packedSize(elts ...interface{}) (int, error) {
var size int
for _, e := range elts {
v := reflect.ValueOf(e)
switch v.Kind() {
case reflect.Ptr:
s, err := packedSize(reflect.Indirect(v).Interface())
if err != nil {
return 0, err
}
size += s
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
s, err := packedSize(v.Field(i).Interface())
if err != nil {
return 0, err
}
size += s
}
case reflect.Slice:
switch s := e.(type) {
case []byte:
size += lengthPrefixSize + len(s)
case RawBytes:
size += len(s)
default:
return 0, fmt.Errorf("encoding of %T is not supported, only []byte and RawBytes slices are", e)
}
default:
s := binary.Size(e)
if s < 0 {
return 0, fmt.Errorf("can't calculate size of type %T", e)
}
size += s
}
}
return size, nil
}
|
[
"func (i *InfoStruct) GetPackedLength() uint32 { return i.PackedLength }",
"func (p *Printer) sizeof(typ dwarf.Type) (uint64, bool) {\n\tsize := typ.Size() // Will be -1 if ByteSize is not set.\n\tif size >= 0 {\n\t\treturn uint64(size), true\n\t}\n\tswitch typ.(type) {\n\tcase *dwarf.PtrType:\n\t\t// This is the only one we know of, but more may arise.\n\t\treturn uint64(p.arch.PointerSize), true\n\t}\n\treturn 0, false\n}",
"func packType(buf io.Writer, elts ...interface{}) error {\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tswitch v.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif err := packType(buf, reflect.Indirect(v).Interface()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\t\tif err := packType(buf, v.Field(i).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tswitch s := e.(type) {\n\t\t\tcase []byte:\n\t\t\t\tswitch lengthPrefixSize {\n\t\t\t\tcase tpm20PrefixSize:\n\t\t\t\t\tif err := binary.Write(buf, binary.BigEndian, uint16(len(s))); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tcase tpm12PrefixSize:\n\t\t\t\t\tif err := binary.Write(buf, binary.BigEndian, uint32(len(s))); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"lengthPrefixSize is %d, must be either 2 or 4\", lengthPrefixSize)\n\t\t\t\t}\n\t\t\t\tif err := binary.Write(buf, binary.BigEndian, s); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase RawBytes:\n\t\t\t\tif err := binary.Write(buf, binary.BigEndian, s); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"only []byte and RawBytes slices are supported, got %T\", e)\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := binary.Write(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func calcSizeBinaryMemSize(p interface{}, type_ []UA_DataType) uint {\n\treturn uint(UA_UInt16(type_[0].memSize))\n}",
"func (manager *TypeManager) Size(offset dwarf.Offset) int {\n\tt := manager.types[offset]\n\tif t != nil {\n\t\treturn t.Size()\n\t}\n\treturn 0\n}",
"func rtype_Size(*Type) uintptr",
"func bitpackingByteConsumed(bitWidth int, numValues int) int {\n\treturn ((bitWidth * numValues) + 7) / 8\n}",
"func Sizeof(x ArbitraryType) uintptr",
"func String_calcSizeBinary(src []UA_String, __ []UA_DataType) uint {\n\treturn uint((4 + uint32((uint(src[0].length)))))\n}",
"func (f *field) SizeOfBits(val reflect.Value) (size int) {\n\tskipBits := f.Skip * 8\n\n\tif f.Name != \"_\" {\n\t\tif s, ok := f.bitSizeUsingInterface(val); ok {\n\t\t\treturn s\n\t\t}\n\t} else {\n\t\t// Non-trivial, unnamed fields do not make sense. You can't set a field\n\t\t// with no name, so the elements can't possibly differ.\n\t\t// N.B.: Though skip will still work, use struct{} instead for skip.\n\t\tif !isTypeTrivial(val.Type()) {\n\t\t\treturn skipBits\n\t\t}\n\t}\n\n\talen := 1\n\tswitch f.BinaryType.Kind() {\n\tcase reflect.Int8, reflect.Uint8, reflect.Bool:\n\t\treturn 8 + skipBits\n\tcase reflect.Int16, reflect.Uint16:\n\t\treturn 16 + skipBits\n\tcase reflect.Int, reflect.Int32,\n\t\treflect.Uint, reflect.Uint32,\n\t\treflect.Float32:\n\t\treturn 32 + skipBits\n\tcase reflect.Int64, reflect.Uint64,\n\t\treflect.Float64, reflect.Complex64:\n\t\treturn 64 + skipBits\n\tcase reflect.Complex128:\n\t\treturn 128 + skipBits\n\tcase reflect.Slice, reflect.String:\n\t\tswitch f.NativeType.Kind() {\n\t\tcase reflect.Slice, reflect.String, reflect.Array, reflect.Ptr:\n\t\t\talen = val.Len()\n\t\tdefault:\n\t\t\treturn 0\n\t\t}\n\t\tfallthrough\n\tcase reflect.Array, reflect.Ptr:\n\t\tsize += skipBits\n\n\t\t// If array type, get length from type.\n\t\tif f.BinaryType.Kind() == reflect.Array {\n\t\t\talen = f.BinaryType.Len()\n\t\t}\n\n\t\t// Optimization: if the array/slice is empty, bail now.\n\t\tif alen == 0 {\n\t\t\treturn size\n\t\t}\n\n\t\t// Optimization: if the type is trivial, we only need to check the\n\t\t// first element.\n\t\tswitch f.NativeType.Kind() {\n\t\tcase reflect.Slice, reflect.String, reflect.Array, reflect.Ptr:\n\t\t\telem := f.Elem()\n\t\t\tif f.Trivial {\n\t\t\t\tsize += elem.SizeOfBits(reflect.Zero(f.BinaryType.Elem())) * alen\n\t\t\t} else {\n\t\t\t\tfor i := 0; i < alen; i++ {\n\t\t\t\t\tsize += elem.SizeOfBits(val.Index(i))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn size\n\tcase reflect.Struct:\n\t\tsize += skipBits\n\t\tfor _, field := range cachedFieldsFromStruct(f.BinaryType) {\n\t\t\tif field.BitSize != 0 {\n\t\t\t\tsize += int(field.BitSize)\n\t\t\t} else {\n\t\t\t\tsize += field.SizeOfBits(val.Field(field.Index))\n\t\t\t}\n\t\t}\n\t\treturn size\n\tdefault:\n\t\treturn 0\n\t}\n}",
"func (i *IE) MarshalLen() int {\n\tif l, ok := tvLengthMap[int(i.Type)]; ok {\n\t\treturn l + 1\n\t}\n\n\tif i.Type < 128 {\n\t\treturn 1 + len(i.Payload)\n\t}\n\n\tif i.Type == ExtensionHeaderTypeList {\n\t\treturn 2 + len(i.Payload)\n\t}\n\n\treturn 3 + len(i.Payload)\n}",
"func TypeSize(any interface{}) uintptr {\n\tt := reflect.TypeOf(any)\n\treturn t.Size()\n}",
"func ExtensionObject_calcSizeBinary(src []UA_ExtensionObject, __ []UA_DataType) uint {\n\tvar s uint = 1\n\tif uint32(src[0].encoding) > uint32(UA_EXTENSIONOBJECT_ENCODED_XML) {\n\t\tif (*src[0].content.decoded()).type_ == nil || (*src[0].content.decoded()).data == nil {\n\t\t\t// encoding byte\n\t\t\treturn 0\n\t\t}\n\t\tif uint32(int(((*src[0].content.decoded()).type_[0].typeId.identifierType))) != uint32(int((UA_NODEIDTYPE_NUMERIC))) {\n\t\t\treturn 0\n\t\t}\n\t\ts += NodeId_calcSizeBinary((*[100000000]UA_NodeId)(unsafe.Pointer(&(*src[0].content.decoded()).type_[0].typeId))[:], nil)\n\t\t// length\n\t\ts += uint((4))\n\t\tvar type_ []UA_DataType = (*src[0].content.decoded()).type_\n\t\tvar encode_index uint = uint((func() uint32 {\n\t\t\tif int((int((UA_Boolean(type_[0].builtin))))) != 0 {\n\t\t\t\treturn uint32(uint16((uint16((uint16_t((UA_UInt16(type_[0].typeIndex))))))))\n\t\t\t}\n\t\t\treturn uint32(25)\n\t\t}()))\n\t\ts += calcSizeBinaryJumpTable[encode_index]((*src[0].content.decoded()).data, type_)\n\t} else {\n\t\ts += NodeId_calcSizeBinary((*[100000000]UA_NodeId)(unsafe.Pointer(&(*src[0].content.encoded()).typeId))[:], nil)\n\t\tswitch uint32(src[0].encoding) {\n\t\tcase uint32(UA_EXTENSIONOBJECT_ENCODED_NOBODY):\n\t\tcase uint32(UA_EXTENSIONOBJECT_ENCODED_BYTESTRING):\n\t\t\tfallthrough\n\t\tcase uint32(UA_EXTENSIONOBJECT_ENCODED_XML):\n\t\t\ts += String_calcSizeBinary((*[100000000]UA_ByteString)(unsafe.Pointer(&(*src[0].content.encoded()).body))[:], nil)\n\t\tdefault:\n\t\t\t{\n\t\t\t\treturn 0\n\t\t\t}\n\t\t}\n\t}\n\treturn uint(s)\n}",
"func DiagnosticInfo_calcSizeBinary(src []UA_DiagnosticInfo, __ []UA_DataType) uint {\n\tvar s uint = 1\n\tif int((int((UA_Boolean(src[0].hasSymbolicId))))) != 0 {\n\t\t// encoding byte\n\t\ts += uint((4))\n\t}\n\tif int((int((UA_Boolean(src[0].hasNamespaceUri))))) != 0 {\n\t\ts += uint((4))\n\t}\n\tif int((int((UA_Boolean(src[0].hasLocalizedText))))) != 0 {\n\t\ts += uint((4))\n\t}\n\tif int((int((UA_Boolean(src[0].hasLocale))))) != 0 {\n\t\ts += uint((4))\n\t}\n\tif int((int((UA_Boolean(src[0].hasAdditionalInfo))))) != 0 {\n\t\ts += String_calcSizeBinary((*[100000000]UA_String)(unsafe.Pointer(&src[0].additionalInfo))[:], nil)\n\t}\n\tif int((int((UA_Boolean(src[0].hasInnerStatusCode))))) != 0 {\n\t\ts += uint((4))\n\t}\n\tif int((int((UA_Boolean(src[0].hasInnerDiagnosticInfo))))) != 0 {\n\t\ts += DiagnosticInfo_calcSizeBinary(src[0].innerDiagnosticInfo, nil)\n\t}\n\treturn uint(s)\n}",
"func (t Type) Size() uintptr {\n\treturn t(z{}).Size()\n}",
"func (e Entry) MarshalBinaryLen() int {\n\textIDTotalSize := len(e.ExtIDs) * 2 // Two byte len(ExtID) per ExtID\n\tfor _, extID := range e.ExtIDs {\n\t\textIDTotalSize += len(extID)\n\t}\n\treturn EntryHeaderSize + extIDTotalSize + len(e.Content)\n}",
"func epSizeBin(point *C.ep_st, pack int32) int32 {\n\tp := C.int(pack)\n\tsize := C.ep_size_bin(point, p)\n\treturn int32(size)\n}",
"func Guid_calcSizeBinary(src []UA_Guid, __ []UA_DataType) uint {\n\treturn 16\n}",
"func (ud *UData) SerializeSize() int {\n\tvar ldsize int\n\tvar b bytes.Buffer\n\n\t// Grab the size of all the stxos\n\tfor _, l := range ud.Stxos {\n\t\tldsize += l.SerializeSize()\n\t}\n\n\tud.AccProof.Serialize(&b)\n\tif b.Len() != ud.AccProof.SerializeSize() {\n\t\tfmt.Printf(\" b.Len() %d, AccProof.SerializeSize() %d\\n\",\n\t\t\tb.Len(), ud.AccProof.SerializeSize())\n\t}\n\n\tguess := 8 + (4 * len(ud.TxoTTLs)) + ud.AccProof.SerializeSize() + ldsize\n\n\t// 8B height & numTTLs, 4B per TTL, accProof size, leaf sizes\n\treturn guess\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
packWithHeader takes a header and a sequence of elements that are either of fixed length or slices of fixedlength types and packs them into a single byte array using binary.Write. It updates the CommandHeader to have the right length.
|
func packWithHeader(ch commandHeader, cmd ...interface{}) ([]byte, error) {
hdrSize := binary.Size(ch)
bodySize, err := packedSize(cmd...)
if err != nil {
return nil, fmt.Errorf("couldn't compute packed size for message body: %v", err)
}
ch.Size = uint32(hdrSize + bodySize)
in := []interface{}{ch}
in = append(in, cmd...)
return Pack(in...)
}
|
[
"func (h *FileHeader) Pack() []byte {\n\tif len(h.FileID) != headerIDLen || h.Version != CurrentVersion {\n\t\tlog.Panic(\"FileHeader object not properly initialized\")\n\t}\n\tbuf := make([]byte, HeaderLen)\n\tp := 0\n\tbinary.BigEndian.PutUint16(buf[p:], h.Version)\n\tp += headerVersionLen\n\tcopy(buf[p:], h.FileID)\n\tp += headerIDLen\n\tbinary.BigEndian.PutUint32(buf[p:], h.Mode)\n\tp += headerPropertiesLen\n\tmac := hmac.New(md5.New, h.FileID)\n\tmac.Write(buf[:p])\n\tsign := mac.Sum(nil)\n\tcopy(buf[p:], sign)\n\treturn buf\n}",
"func (fh *Head) Pack(buf *bytes.Buffer) (err error) {\n\tif buf == nil {\n\t\terr = proto.ErrFrameHeadBufNil\n\t\treturn\n\t}\n\tif buf.Cap()-buf.Len() < proto.MLFrameHeadLen {\n\t\terr = proto.ErrFameHeadBufLen\n\t\treturn\n\t}\n\tbuf.WriteByte(fh.Magic)\n\tbuf.WriteByte(fh.Version)\n\tbinary.Write(buf, binary.LittleEndian, fh.CMD)\n\tbinary.Write(buf, binary.LittleEndian, fh.Seq)\n\tbinary.Write(buf, binary.LittleEndian, fh.Length)\n\treturn\n}",
"func (h2Conn *Http2Conn) EncodeHeader(header []hpack.HeaderField) []byte {\n\th2Conn.HeaderWriteBuf.Reset()\n\n\tfor _, hf := range header {\n\t\t_ = h2Conn.HpackEncoder.WriteField(hf)\n\t}\n\n\tdst := make([]byte, h2Conn.HeaderWriteBuf.Len())\n\tcopy(dst, h2Conn.HeaderWriteBuf.Bytes())\n\n\treturn dst\n}",
"func (header *OIC_Header) Pack() []byte {\n\tvar buf []byte\n\tbuf = append(buf, util.UIntToBytesBE(32, uint64(header.Kind))...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Type))...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.DataSize))...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.ProcStatus))...)\n\tbuf = append(buf, util.UIntToBytesLE(8, uint64(header.ClientSize))...)\n\tbuf = append(buf, util.UIntToBytesLE(8, uint64(header.FishStatus))...)\n\tbuf = append(buf, util.UIntToBytesLE(8, uint64(header.NavUsed))...)\n\tbuf = append(buf, util.UIntToBytesLE(8, uint64(header.NavType))...)\n\tbuf = append(buf, util.UIntToBytesBE(32, uint64(header.UTMZone))...)\n\tbuf = append(buf, util.Float64ToByteLE(header.ShipX)...)\n\tbuf = append(buf, util.Float64ToByteLE(header.ShipY)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.ShipCourse)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.ShipSpeed)...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Sec))...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.USec))...)\n\tbuf = append(buf, util.Float32ToByteLE(header.SpareGain)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishHeading)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishDepth)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishRange)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishPulseWidth)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.GainC0)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.GainC1)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.GainC2)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishPitch)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishRoll)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishYaw)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Temperature)...)\n\tbuf = append(buf, util.Float64ToByteLE(header.FishX)...)\n\tbuf = append(buf, util.Float64ToByteLE(header.FishY)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishLayback)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishAltitude)...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.FishAltitudeSamples))...)\n\tbuf = append(buf, util.Float32ToByteLE(header.FishPingPeriod)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.SoundVelocity)...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved1))...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.ChanNum))...)\n\tfor i := 0; i < 8; i++ {\n\t\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.ChanOffset[i]))...)\n\t}\n\tfor i := 0; i < 8; i++ {\n\t\tbuf = append(buf, header.Channel[i].Pack()...)\n\t}\n\tfor i := 0; i < 5; i++ {\n\t\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved2[i]))...)\n\t}\n\tbuf = append(buf, util.Float64ToByteLE(header.NavFixLatitude)...)\n\tbuf = append(buf, util.Float64ToByteLE(header.NavFixLongtitude)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.HDOP)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.EllipsoidElevation)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.VesselHeading)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Pitch)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Roll)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Heave)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Draft)...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Tide)...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved3))...)\n\tbuf = append(buf, util.Float32ToByteLE(header.Pressure)...)\n\tfor i := 0; i < 13; i++ {\n\t\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved4[i]))...)\n\t}\n\tbuf = append(buf, util.Float32ToByteLE(header.AuxFloat4)...)\n\tfor i := 0; i < 4; i++ {\n\t\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved5[i]))...)\n\t}\n\tbuf = append(buf, util.Float64ToByteLE(header.Aux3)...)\n\tbuf = append(buf, util.Float64ToByteLE(header.Aux4)...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved6[0]))...)\n\tbuf = append(buf, util.UIntToBytesLE(32, uint64(header.Reserved6[1]))...)\n\tbuf = append(buf, util.Float64ToByteLE(header.PingTime)...)\n\tfor i := 0; i < 18; i++ {\n\t\tbuf = append(buf, util.UIntToBytesBE(32, uint64(header.Reserved7[i]))...)\n\t}\n\treturn buf\n}",
"func (p *payload) updateHeader() {\n\tn := uint64(atomic.LoadUint32(&p.count))\n\tswitch {\n\tcase n <= 15:\n\t\tp.header[7] = msgpackArrayFix + byte(n)\n\t\tp.off = 7\n\tcase n <= 1<<16-1:\n\t\tbinary.BigEndian.PutUint64(p.header, n) // writes 2 bytes\n\t\tp.header[5] = msgpackArray16\n\t\tp.off = 5\n\tdefault: // n <= 1<<32-1\n\t\tbinary.BigEndian.PutUint64(p.header, n) // writes 4 bytes\n\t\tp.header[3] = msgpackArray32\n\t\tp.off = 3\n\t}\n}",
"func (h *Header) MarshalBinary(buf *bytes.Buffer) error {\n\tif _, err := MsgTypeToString(h.MsgType); err != nil {\n\t\treturn errors.New(\"unknown message type\")\n\t}\n\n\tsenderNonce := make([]byte, NonceLen)\n\tbyteOrder.PutUint32(senderNonce, h.RemotePeerNonce)\n\n\t// Verify Nonce-PoW\n\tif err := verifyIDNonce(h.RemotePeerID, senderNonce); err != nil {\n\t\treturn err\n\t}\n\n\tport := make([]byte, 2)\n\tbyteOrder.PutUint16(port, h.RemotePeerPort)\n\n\tif err := buf.WriteByte(h.MsgType); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := buf.Write(h.RemotePeerID[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := buf.Write(senderNonce); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := buf.Write(port); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := buf.Write(h.Reserved[:]); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *LeweiCmd) headerSet(index uint, value uint32) {\n\tbinary.LittleEndian.PutUint32(c.header[10+index*4:], value)\n}",
"func EncodeBinaryHeader(data []byte) string {\n\t// gRPC specification says that implementations should emit unpadded values.\n\treturn base64.RawStdEncoding.EncodeToString(data)\n}",
"func (w Writer) BytesWithHeader() []byte {\n\tcontentLength := uint16(w.idx)\n\n\tif (w.idx < 2) {\n\t\treturn w.byteSlice[:w.idx]\n\t}\n\n\tw.byteSlice[0] = byte(contentLength >> 0)\n\tw.byteSlice[1] = byte(contentLength >> 8)\n\n\treturn w.byteSlice[:w.idx]\n}",
"func EncodeHeader(b Box, w io.Writer) error {\n\tboxType, boxSize := b.Type(), b.Size()\n\tlog.Debugf(\"Writing %v size %d\\n\", boxType, boxSize)\n\tbuf := make([]byte, boxHeaderSize)\n\tif boxSize < 1<<32 {\n\t\tbinary.BigEndian.PutUint32(buf, uint32(boxSize))\n\t} else {\n\t\t// largesize will be sent as uint64 after header\n\t\tbinary.BigEndian.PutUint32(buf, 1)\n\t}\n\n\tstrtobuf(buf[4:], b.Type(), 4)\n\t_, err := w.Write(buf)\n\treturn err\n}",
"func (e *Encoder) EncodeHeader(h Header) error {\n\te.Header = h\n\n\terr := writeByte(e, h.Version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeMbUint32(e, h.PublicID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif h.PublicID == 0 {\n\t\terr = writeMbUint32(e, h.PublicID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = writeMbUint32(e, h.Charset)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeMbUint32(e, uint32(len(h.StringTable)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn writeSlice(e, h.StringTable)\n}",
"func EncodeHeader(b Box, w io.Writer) error {\n\tbuf := make([]byte, BoxHeaderSize)\n\tbinary.BigEndian.PutUint32(buf, uint32(b.Size()))\n\tstrtobuf(buf[4:], b.Type(), 4)\n\t_, err := w.Write(buf)\n\treturn err\n}",
"func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) {\n\tdefer errd.Wrap(&err, \"failed to write frame header\")\n\n\tvar b byte\n\tif h.fin {\n\t\tb |= 1 << 7\n\t}\n\tif h.rsv1 {\n\t\tb |= 1 << 6\n\t}\n\tif h.rsv2 {\n\t\tb |= 1 << 5\n\t}\n\tif h.rsv3 {\n\t\tb |= 1 << 4\n\t}\n\n\tb |= byte(h.opcode)\n\n\terr = w.WriteByte(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlengthByte := byte(0)\n\tif h.masked {\n\t\tlengthByte |= 1 << 7\n\t}\n\n\tswitch {\n\tcase h.payloadLength > math.MaxUint16:\n\t\tlengthByte |= 127\n\tcase h.payloadLength > 125:\n\t\tlengthByte |= 126\n\tcase h.payloadLength >= 0:\n\t\tlengthByte |= byte(h.payloadLength)\n\t}\n\terr = w.WriteByte(lengthByte)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase h.payloadLength > math.MaxUint16:\n\t\tbinary.BigEndian.PutUint64(buf, uint64(h.payloadLength))\n\t\t_, err = w.Write(buf)\n\tcase h.payloadLength > 125:\n\t\tbinary.BigEndian.PutUint16(buf, uint16(h.payloadLength))\n\t\t_, err = w.Write(buf[:2])\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif h.masked {\n\t\tbinary.LittleEndian.PutUint32(buf, h.maskKey)\n\t\t_, err = w.Write(buf[:4])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func EncodeHeader(w io.Writer, dataLen int32) error {\n\tif _, err := w.Write(FileIdentifier); err != nil {\n\t\treturn fmt.Errorf(\"unable to write identifier, %v\", err)\n\t}\n\tif err := binary.Write(w, binary.BigEndian, dataLen); err != nil {\n\t\treturn fmt.Errorf(\"unable to write the body size, %v\", err)\n\t}\n\treturn nil\n}",
"func (ish *IBCStubHelper) pack(params ...interface{}) []types.HexBytes {\n\tparamsRlp := make([]types.HexBytes, len(params))\n\tfor i, param := range params {\n\n\t\tparamRlp, err := rlp.EncodeToBytes(param)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tparamsRlp[i] = paramRlp\n\t}\n\n\treturn paramsRlp\n}",
"func Pack(p ProtocolType, b []byte) ([]byte, error) {\n\tout := new(bytes.Buffer)\n\t// magic code head\n\tout.WriteByte(0x0f)\n\t// protocol type\n\tout.WriteByte(byte(p))\n\t// data length\n\tif err := binary.Write(out, binary.BigEndian, uint16(len(b))); err != nil {\n\t\treturn nil, err\n\t}\n\t// data\n\tout.Write(b)\n\t// valid (not implemented)\n\tout.WriteByte(0x00)\n\t// magic code tail\n\tout.WriteByte(0xf0)\n\treturn out.Bytes(), nil\n}",
"func (e *Encoder) PackBinary(v []byte) error {\n\tn := uint64(len(v))\n\n\tif n > math.MaxUint32 {\n\t\treturn ErrLongStringOrBinary\n\t}\n\n\tif _, err := e.w.Write(e.encodeNum(binaryLenEncodings, n)); err != nil {\n\t\treturn err\n\t}\n\n\t_, err := e.w.Write(v)\n\treturn err\n}",
"func HeaderInfo(b []byte, dcidLength int, header *Header) error {\n\tvar ty C.uint8_t\n\tvar version C.uint32_t\n\tscidLen := clen(header.SCID)\n\tdcidLen := clen(header.DCID)\n\ttokenLen := clen(header.Token)\n\tn := C.quiche_header_info(cbytes(b), clen(b),\n\t\tC.size_t(dcidLength),\n\t\t&version, &ty,\n\t\tcbytes(header.SCID), &scidLen,\n\t\tcbytes(header.DCID), &dcidLen,\n\t\tcbytes(header.Token), &tokenLen)\n\tif n < 0 {\n\t\tif n == -1 {\n\t\t\treturn ErrBufferTooShort\n\t\t}\n\t\treturn Error(n)\n\t}\n\theader.Type = uint8(ty)\n\theader.Version = uint32(version)\n\theader.SCID = header.SCID[:scidLen]\n\theader.DCID = header.DCID[:dcidLen]\n\theader.Token = header.Token[:tokenLen]\n\treturn nil\n}",
"func encodeMessageHeader(header *MessageHeader) ([]byte, error) {\n\tbuf := make([]byte, 12)\n\tbinary.LittleEndian.PutUint32(buf, header.Magic)\n\tbinary.LittleEndian.PutUint32(buf[4:], uint32(header.MessageType))\n\tbinary.LittleEndian.PutUint32(buf[8:], header.Length)\n\treturn buf, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
packType recursively packs types the same way that encoding/binary does under binary.BigEndian, but with one difference: it packs a byte slice as a lengthPrefixSize size followed by the bytes. The function unpackType performs the inverse operation of unpacking slices stored in this manner and using encoding/binary for everything else.
|
func packType(buf io.Writer, elts ...interface{}) error {
for _, e := range elts {
v := reflect.ValueOf(e)
switch v.Kind() {
case reflect.Ptr:
if err := packType(buf, reflect.Indirect(v).Interface()); err != nil {
return err
}
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if err := packType(buf, v.Field(i).Interface()); err != nil {
return err
}
}
case reflect.Slice:
switch s := e.(type) {
case []byte:
switch lengthPrefixSize {
case tpm20PrefixSize:
if err := binary.Write(buf, binary.BigEndian, uint16(len(s))); err != nil {
return err
}
case tpm12PrefixSize:
if err := binary.Write(buf, binary.BigEndian, uint32(len(s))); err != nil {
return err
}
default:
return fmt.Errorf("lengthPrefixSize is %d, must be either 2 or 4", lengthPrefixSize)
}
if err := binary.Write(buf, binary.BigEndian, s); err != nil {
return err
}
case RawBytes:
if err := binary.Write(buf, binary.BigEndian, s); err != nil {
return err
}
default:
return fmt.Errorf("only []byte and RawBytes slices are supported, got %T", e)
}
default:
if err := binary.Write(buf, binary.BigEndian, e); err != nil {
return err
}
}
}
return nil
}
|
[
"func packedSize(elts ...interface{}) (int, error) {\n\tvar size int\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tswitch v.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\ts, err := packedSize(reflect.Indirect(v).Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\n\t\t\tsize += s\n\t\tcase reflect.Struct:\n\t\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\t\ts, err := packedSize(v.Field(i).Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\n\t\t\t\tsize += s\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tswitch s := e.(type) {\n\t\t\tcase []byte:\n\t\t\t\tsize += lengthPrefixSize + len(s)\n\t\t\tcase RawBytes:\n\t\t\t\tsize += len(s)\n\t\t\tdefault:\n\t\t\t\treturn 0, fmt.Errorf(\"encoding of %T is not supported, only []byte and RawBytes slices are\", e)\n\t\t\t}\n\t\tdefault:\n\t\t\ts := binary.Size(e)\n\t\t\tif s < 0 {\n\t\t\t\treturn 0, fmt.Errorf(\"can't calculate size of type %T\", e)\n\t\t\t}\n\n\t\t\tsize += s\n\t\t}\n\t}\n\n\treturn size, nil\n}",
"func Pack(p ProtocolType, b []byte) ([]byte, error) {\n\tout := new(bytes.Buffer)\n\t// magic code head\n\tout.WriteByte(0x0f)\n\t// protocol type\n\tout.WriteByte(byte(p))\n\t// data length\n\tif err := binary.Write(out, binary.BigEndian, uint16(len(b))); err != nil {\n\t\treturn nil, err\n\t}\n\t// data\n\tout.Write(b)\n\t// valid (not implemented)\n\tout.WriteByte(0x00)\n\t// magic code tail\n\tout.WriteByte(0xf0)\n\treturn out.Bytes(), nil\n}",
"func Unpack(p ProtocolType, b []byte) ([]byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, errors.New(\"invalid data\")\n\t}\n\n\t// find magic code head and valid\n\tidx := bytes.LastIndex(b, []byte{0x0f, byte(p)})\n\tif idx == -1 {\n\t\treturn nil, errors.New(\"magic code head is not valid\")\n\t}\n\n\t// find magic code tail and valid\n\tif !bytes.Equal(b[len(b)-1:], []byte{0xf0}) {\n\t\treturn nil, errors.New(\"magic code tail is not valid\")\n\t}\n\n\t// got data\n\tdata := b[idx:]\n\n\t// valid data length\n\tvar shouldLen uint16\n\tif err := binary.Read(bytes.NewReader(data[2:4]), binary.BigEndian, &shouldLen); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(data) != int(shouldLen+4+2) {\n\t\treturn nil, errors.New(\"data length is not valid\")\n\t}\n\n\treturn data[4 : shouldLen+4], nil\n}",
"func Unpack(i interface{}) T {\n\tp := *(*uintptr)(unsafe.Pointer(&i))\n\tv := T{\n\t\ttypeID: *(*int32)((unsafe.Pointer(p + rtypeStrOffset))),\n\t\ti: i,\n\t}\n\treturn v\n}",
"func (p *Packer) UnpackBytes() []byte {\n\tsize := p.UnpackInt()\n\treturn p.UnpackFixedBytes(int(size))\n}",
"func Unpack(dst, src []byte) ([]byte, error) {\n\tfor len(src) > 0 {\n\t\ttag := src[0]\n\t\tsrc = src[1:]\n\n\t\tpstart := len(dst)\n\t\tdst = allocWords(dst, 1)\n\t\tp := dst[pstart : pstart+wordSize]\n\t\tif len(src) >= wordSize {\n\t\t\ti := 0\n\t\t\tnz := tag & 1\n\t\t\tp[0] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 1 & 1\n\t\t\tp[1] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 2 & 1\n\t\t\tp[2] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 3 & 1\n\t\t\tp[3] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 4 & 1\n\t\t\tp[4] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 5 & 1\n\t\t\tp[5] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 6 & 1\n\t\t\tp[6] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 7 & 1\n\t\t\tp[7] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tsrc = src[i:]\n\t\t} else {\n\t\t\tfor i := uint(0); i < wordSize; i++ {\n\t\t\t\tif tag&(1<<i) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif len(src) == 0 {\n\t\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t\t}\n\t\t\t\tp[i] = src[0]\n\t\t\t\tsrc = src[1:]\n\t\t\t}\n\t\t}\n\t\tswitch tag {\n\t\tcase zeroTag:\n\t\t\tif len(src) == 0 {\n\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tdst = allocWords(dst, int(src[0]))\n\t\t\tsrc = src[1:]\n\t\tcase unpackedTag:\n\t\t\tif len(src) == 0 {\n\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tstart := len(dst)\n\t\t\tdst = allocWords(dst, int(src[0]))\n\t\t\tsrc = src[1:]\n\t\t\tn := copy(dst[start:], src)\n\t\t\tif n < len(dst)-start {\n\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tsrc = src[n:]\n\t\t}\n\t}\n\treturn dst, nil\n}",
"func (b *BloomFilter) Unpack(rawbytes []byte) error {\n\t/*\n\t\trawbytes, err := base64.StdEncoding.DecodeString(packed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}*/\n\n\ttmp := [3]uint64{0, 0, 0}\n\tgr, err := gzip.NewReader(bytes.NewReader(rawbytes))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = binary.Read(gr, binary.LittleEndian, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.advised = 0\n\tb.estError = 0.0\n\tb.size = tmp[0]\n\tb.keys = tmp[1]\n\tb.nadded = tmp[2]\n\n\tb.parts = make([]uint64, 1+(b.size/64))\n\n\t// force reload on next hash\n\tb.h0State = b.h0State[:0]\n\tb.h1State = b.h1State[:0]\n\n\treturn binary.Read(gr, binary.LittleEndian, b.parts)\n}",
"func UnpackBuf(buf io.Reader, elts ...interface{}) error {\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tk := v.Kind()\n\t\tif k != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"all values passed to Unpack must be pointers, got %v\", k)\n\t\t}\n\n\t\tif v.IsNil() {\n\t\t\treturn errors.New(\"can't fill a nil pointer\")\n\t\t}\n\n\t\tiv := reflect.Indirect(v)\n\t\tswitch iv.Kind() {\n\t\tcase reflect.Struct:\n\t\t\t// Decompose the struct and copy over the values.\n\t\t\tfor i := 0; i < iv.NumField(); i++ {\n\t\t\t\tif err := UnpackBuf(buf, iv.Field(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tvar size int\n\t\t\t_, isHandles := e.(*[]Handle)\n\n\t\t\tswitch {\n\t\t\t// []Handle always uses 2-byte length, even with TPM 1.2.\n\t\t\tcase isHandles:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 2.0\n\t\t\tcase lengthPrefixSize == tpm20PrefixSize:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 1.2\n\t\t\tcase lengthPrefixSize == tpm12PrefixSize:\n\t\t\t\tvar tmpSize uint32\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"lengthPrefixSize is %d, must be either 2 or 4\", lengthPrefixSize)\n\t\t\t}\n\n\t\t\t// A zero size is used by the TPM to signal that certain elements\n\t\t\t// are not present.\n\t\t\tif size == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Make len(e) match size exactly.\n\t\t\tswitch b := e.(type) {\n\t\t\tcase *[]byte:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t\t\t\t}\n\t\t\tcase *[]Handle:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]Handle, size-len(*b))...)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"can't fill pointer to %T, only []byte or []Handle slices\", e)\n\t\t\t}\n\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func packStruct(any dnsStruct, msg []byte, off int) (off1 int, ok bool) {\n\tok = any.Walk(func(field interface{}, name, tag string) bool {\n\t\tswitch fv := field.(type) {\n\t\tdefault:\n\t\t\tprintln(\"net: dns: unknown packing type\")\n\t\t\treturn false\n\t\tcase *uint16:\n\t\t\ti := *fv\n\t\t\tif off+2 > len(msg) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tmsg[off] = byte(i >> 8)\n\t\t\tmsg[off+1] = byte(i)\n\t\t\toff += 2\n\t\tcase *uint32:\n\t\t\ti := *fv\n\t\t\tmsg[off] = byte(i >> 24)\n\t\t\tmsg[off+1] = byte(i >> 16)\n\t\t\tmsg[off+2] = byte(i >> 8)\n\t\t\tmsg[off+3] = byte(i)\n\t\t\toff += 4\n\t\tcase []byte:\n\t\t\tn := len(fv)\n\t\t\tif off+n > len(msg) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcopy(msg[off:off+n], fv)\n\t\t\toff += n\n\t\tcase *string:\n\t\t\ts := *fv\n\t\t\tswitch tag {\n\t\t\tdefault:\n\t\t\t\tprintln(\"net: dns: unknown string tag\", tag)\n\t\t\t\treturn false\n\t\t\tcase \"domain\":\n\t\t\t\toff, ok = packDomainName(s, msg, off)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\tcase \"\":\n\t\t\t\t// Counted string: 1 byte length.\n\t\t\t\tif len(s) > 255 || off+1+len(s) > len(msg) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tmsg[off] = byte(len(s))\n\t\t\t\toff++\n\t\t\t\toff += copy(msg[off:], s)\n\t\t\t}\n\t\tcase *[]string:\n\t\t\t// Pack the strings back to back.\n\t\t\tif *fv == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tfor _, s := range *fv {\n\t\t\t\t// Counted string: 1 byte length.\n\t\t\t\tif len(s) > 255 || off+1+len(s) > len(msg) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tmsg[off] = byte(len(s))\n\t\t\t\toff++\n\t\t\t\toff += copy(msg[off:], s)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n\tif !ok {\n\t\treturn len(msg), false\n\t}\n\treturn off, true\n}",
"func (bp *BinaryPack) Pack(format []string, msg []interface{}) ([]byte, error) {\n\tif len(format) > len(msg) {\n\t\treturn nil, errors.New(\"Format is longer than values to pack\")\n\t}\n\n\tres := []byte{}\n\n\tfor i, f := range format {\n\t\tswitch f {\n\t\tcase \"?\":\n\t\t\tcasted_value, ok := msg[i].(bool)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (bool)\")\n\t\t\t}\n\t\t\tres = append(res, boolToBytes(casted_value)...)\n\t\tcase \"h\", \"H\":\n\t\t\tcasted_value, ok := msg[i].(int64)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (int, 2 bytes)\")\n\t\t\t}\n\t\t\tres = append(res, intToBytes(casted_value, 2)...)\n\t\tcase \"i\", \"I\", \"l\", \"L\":\n\t\t\tcasted_value, ok := msg[i].(int64)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (int, 4 bytes)\")\n\t\t\t}\n\t\t\tres = append(res, intToBytes(casted_value, 4)...)\n\t\tcase \"q\", \"Q\":\n\t\t\tcasted_value, ok := msg[i].(int64)\n\t\t\tif !ok {\n fmt.Printf(\"T %T\\n\", msg[i])\n\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (int, 8 bytes)\")\n\t\t\t}\n\t\t\tres = append(res, intToBytes(casted_value, 8)...)\n\t\tcase \"f\":\n\t\t\tcasted_value, ok := msg[i].(float32)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (float32)\")\n\t\t\t}\n\t\t\tres = append(res, float32ToBytes(casted_value, 4)...)\n\t\tcase \"d\":\n\t\t\tcasted_value, ok := msg[i].(float64)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (float64)\")\n\t\t\t}\n\t\t\tres = append(res, float64ToBytes(casted_value, 8)...)\n\t\tdefault:\n\t\t\tif strings.Contains(f, \"s\") {\n\t\t\t\tcasted_value, ok := msg[i].(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"Type of passed value doesn't match to expected '\" + f + \"' (string)\")\n\t\t\t\t}\n\t\t\t\tn, _ := strconv.Atoi(strings.TrimRight(f, \"s\"))\n\t\t\t\tres = append(res, []byte(fmt.Sprintf(\"%s%s\",\n\t\t\t\t\tcasted_value, strings.Repeat(\"\\x00\", n - len(casted_value))))...)\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"Unexpected format token: '\" + f + \"'\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res, nil\n}",
"func PackBytes(h, l byte) uint16 {\n\treturn (uint16(h) << 8) | uint16(l)\n}",
"func UnpackSlice(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar head byte\n\n\tsliceTyp := reflect.TypeOf(ptr).Elem()\n\tsliceVal := reflect.ValueOf(ptr).Elem()\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn err\n\t}\n\n\tif head == 0xc0 { // nil\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t\treturn nil\n\t}\n\n\t// handle bin format family\n\tif head == 0xc4 || head == 0xc5 || head == 0xc6 {\n\t\tif sliceTyp.Elem().Kind() == reflect.Uint8 {\n\t\t\tvar byteSlice []byte\n\n\t\t\tswitch head {\n\t\t\tcase 0xc4:\n\t\t\t\tbyteSlice, err = unpackBin8(r)\n\t\t\tcase 0xc5:\n\t\t\t\tbyteSlice, err = unpackBin16(r)\n\t\t\tcase 0xc6:\n\t\t\t\tbyteSlice, err = unpackBin32(r)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tsliceVal.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(byteSlice).Elem()), len(byteSlice), len(byteSlice)))\n\t\t\treflect.Copy(sliceVal, reflect.ValueOf(byteSlice))\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"msgp: byte array can't be assigned to other type[%v] slice\", sliceTyp.Elem().Kind())\n\t}\n\n\t// handle array format family\n\tvar srcLen = 0\n\tif head&0xf0 == 0x90 { // array\n\t\tsrcLen = int(head & 0x0f)\n\t} else if head == 0xdc {\n\t\tvar temp uint16\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else if head == 0xdd {\n\t\tvar temp uint32\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp) // maybe overflow.\n\t} else {\n\t\treturn fmt.Errorf(\"msgp: unpacked value is not an array\")\n\t}\n\n\tsliceVal.Set(reflect.MakeSlice(reflect.SliceOf(sliceTyp.Elem()), srcLen, srcLen)) // slice 생성.\n\tfor inx := 0; inx < srcLen; inx++ {\n\t\tif err = Unpack(r, sliceVal.Index(inx).Addr().Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func TestPackInterfaceSlice(t *testing.T) {\n\tb, err := Pack([]interface{}{\n\t\t1, \"two\", 3.1, \"four\", []interface{}{\"five\"},\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\terr = assertBytes([]byte(\"\\x83l\\x00\\x00\\x00\\x05a\\x01m\\x00\\x00\\x00\\x03twoF\\x40\\x08\\xcc\\xcc\\xcc\\xcc\\xcc\\xcdm\\x00\\x00\\x00\\x04fourl\\x00\\x00\\x00\\x01m\\x00\\x00\\x00\\x04fivejj\"), b)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}",
"func AppendType(dst []byte, t Type) []byte { return append(dst, byte(t)) }",
"func TestUnpackGenericArray(t *testing.T) {\n\tpacked := []byte(\"\\x83l\\x00\\x00\\x00\\x05a\\x01m\\x00\\x00\\x00\\x03twoF\\x40\\x08\\xcc\\xcc\\xcc\\xcc\\xcc\\xcdm\\x00\\x00\\x00\\x04fourl\\x00\\x00\\x00\\x01m\\x00\\x00\\x00\\x04fivejj\")\n\tvar a []interface{}\n\terr := Unpack(packed, &a)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpected := []interface{}{\n\t\tuint8(1), []byte(\"two\"), 3.1, []byte(\"four\"), []interface{}{[]byte(\"five\")},\n\t}\n\tif len(a) != len(expected) {\n\t\tt.Fatal(\"length is different\")\n\t}\n\tfor i, v := range expected {\n\t\tif !reflect.DeepEqual(v, a[i]) {\n\t\t\tt.Fatal(\"unexpected result:\", a[i], reflect.TypeOf(a[i]), \"!=\", v, reflect.TypeOf(v))\n\t\t}\n\t}\n}",
"func Variant_calcSizeBinary(src []UA_Variant, __ []UA_DataType) uint {\n\tvar s uint = 1\n\tif src[0].type_ == nil {\n\t\t// encoding byte\n\t\treturn uint(s)\n\t}\n\tvar isArray int = int((map[bool]int{false: 0, true: 1}[uint(src[0].arrayLength) > uint((0)) || src[0].data <= 1]))\n\tvar hasDimensions int = int((map[bool]int{false: 0, true: 1}[int((isArray)) != 0 && uint(src[0].arrayDimensionsSize) > uint((0))]))\n\tvar isBuiltin int = int((int((int((UA_Boolean(src[0].type_[0].builtin)))))))\n\tvar encode_index uint = uint((uint32(uint16((uint16((uint16_t((UA_UInt16(src[0].type_[0].typeIndex))))))))))\n\tif int((noarch.NotInt(isBuiltin))) != 0 {\n\t\tencode_index = uint(25)\n\t\tif uint32(int((src[0].type_[0].typeId.identifierType))) != uint32(int((UA_NODEIDTYPE_NUMERIC))) {\n\t\t\treturn 0\n\t\t}\n\t}\n\tvar ptr uintptr_t = uintptr_t(src[0].data)\n\tvar length uint = uint((func() uint32 {\n\t\tif int((isArray)) != 0 {\n\t\t\treturn uint32((uint(src[0].arrayLength)))\n\t\t}\n\t\treturn 1\n\t}()))\n\tif int((isArray)) != 0 {\n\t\ts += Array_calcSizeBinary(uint32((uintptr_t(ptr))), uint(length), src[0].type_)\n\t} else {\n\t\ts += calcSizeBinaryJumpTable[encode_index](uint32((uintptr_t(ptr))), src[0].type_)\n\t}\n\tif int((noarch.NotInt(isBuiltin))) != 0 {\n\t\t// The type is wrapped inside an extensionobject\n\t\t// (NodeId + encoding byte + extension object length) * array length\n\t\ts += (NodeId_calcSizeBinary((*[100000000]UA_NodeId)(unsafe.Pointer(&src[0].type_[0].typeId))[:], nil)+uint((1))+uint((4)))*length\n\t}\n\tif int((hasDimensions)) != 0 {\n\t\ts += Array_calcSizeBinary(src[0].arrayDimensions, uint(src[0].arrayDimensionsSize), (*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[5]))[:])\n\t}\n\treturn uint(s)\n}",
"func (pac *PACType) Unmarshal(b []byte) (err error) {\n\tpac.Data = b\n\tzb := make([]byte, len(b), len(b))\n\tcopy(zb, b)\n\tpac.ZeroSigData = zb\n\tr := mstypes.NewReader(bytes.NewReader(b))\n\tpac.CBuffers, err = r.Uint32()\n\tif err != nil {\n\t\treturn\n\t}\n\tpac.Version, err = r.Uint32()\n\tif err != nil {\n\t\treturn\n\t}\n\tbuf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers)\n\tfor i := range buf {\n\t\tbuf[i].ULType, err = r.Uint32()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tbuf[i].CBBufferSize, err = r.Uint32()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tbuf[i].Offset, err = r.Uint64()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tpac.Buffers = buf\n\treturn nil\n}",
"func (p *BBcRelation) Unpack(dat *[]byte) error {\n\tif p.IdLengthConf == nil {\n\t\tp.IdLengthConf = &BBcIdConfig{}\n\t}\n\n\tvar err error\n\tbuf := bytes.NewBuffer(*dat)\n\n\tp.AssetGroupID, p.IdLengthConf.AssetGroupIdLength, err = GetBigInt(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnumPointers, err := Get2byte(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := 0; i < int(numPointers); i++ {\n\t\tsize, err2 := Get2byte(buf)\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tptr, _, _ := GetBytes(buf, int(size))\n\t\tpointer := BBcPointer{}\n\t\tpointer.Unpack(&ptr)\n\t\tp.Pointers = append(p.Pointers, &pointer)\n\t}\n\n\tassetSize, err := Get4byte(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif assetSize > 0 {\n\t\tast, _, err := GetBytes(buf, int(assetSize))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.Asset = &BBcAsset{}\n\t\tp.Asset.Unpack(&ast)\n\t\tUpdateIdLengthConfig(p.IdLengthConf, p.Asset.IdLengthConf)\n\t}\n\n\tif p.Version >= 2 {\n\t\tassetSize, err := Get4byte(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif assetSize > 0 {\n\t\t\tast, _, err := GetBytes(buf, int(assetSize))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.AssetRaw = &BBcAssetRaw{}\n\t\t\tp.AssetRaw.Unpack(&ast)\n\t\t\tUpdateIdLengthConfig(p.IdLengthConf, p.AssetRaw.IdLengthConf)\n\t\t}\n\n\t\tassetSize, err = Get4byte(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif assetSize > 0 {\n\t\t\tast, _, err := GetBytes(buf, int(assetSize))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.AssetHash = &BBcAssetHash{}\n\t\t\tp.AssetHash.Unpack(&ast)\n\t\t\tUpdateIdLengthConfig(p.IdLengthConf, p.AssetHash.IdLengthConf)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func BenchmarkUnpack(b *testing.B) {\n\ttype test struct {\n\t\tA *int `erlpack:\"a\"`\n\t}\n\tvar x test\n\t_ = Unpack([]byte(\"\\x83t\\x00\\x00\\x00\\x01m\\x00\\x00\\x00\\x01aa\\x01\"), &x)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unpack is a convenience wrapper around UnpackBuf. Unpack returns the number of bytes read from b to fill elts and error, if any.
|
func Unpack(b []byte, elts ...interface{}) (int, error) {
buf := bytes.NewBuffer(b)
err := UnpackBuf(buf, elts...)
read := len(b) - buf.Len()
return read, err
}
|
[
"func BenchmarkUnpack(b *testing.B) {\n\ttype test struct {\n\t\tA *int `erlpack:\"a\"`\n\t}\n\tvar x test\n\t_ = Unpack([]byte(\"\\x83t\\x00\\x00\\x00\\x01m\\x00\\x00\\x00\\x01aa\\x01\"), &x)\n}",
"func (u *BinaryUnpacker) Unpack(r io.Reader, downloadInfo *binstack.DownloadInfo) (io.Reader, error) {\n\treturn r, nil\n}",
"func (p *Packer) UnpackBytes() []byte {\n\tsize := p.UnpackInt()\n\treturn p.UnpackFixedBytes(int(size))\n}",
"func (b *BloomFilter) Unpack(rawbytes []byte) error {\n\t/*\n\t\trawbytes, err := base64.StdEncoding.DecodeString(packed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}*/\n\n\ttmp := [3]uint64{0, 0, 0}\n\tgr, err := gzip.NewReader(bytes.NewReader(rawbytes))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = binary.Read(gr, binary.LittleEndian, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.advised = 0\n\tb.estError = 0.0\n\tb.size = tmp[0]\n\tb.keys = tmp[1]\n\tb.nadded = tmp[2]\n\n\tb.parts = make([]uint64, 1+(b.size/64))\n\n\t// force reload on next hash\n\tb.h0State = b.h0State[:0]\n\tb.h1State = b.h1State[:0]\n\n\treturn binary.Read(gr, binary.LittleEndian, b.parts)\n}",
"func Unpack(p ProtocolType, b []byte) ([]byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, errors.New(\"invalid data\")\n\t}\n\n\t// find magic code head and valid\n\tidx := bytes.LastIndex(b, []byte{0x0f, byte(p)})\n\tif idx == -1 {\n\t\treturn nil, errors.New(\"magic code head is not valid\")\n\t}\n\n\t// find magic code tail and valid\n\tif !bytes.Equal(b[len(b)-1:], []byte{0xf0}) {\n\t\treturn nil, errors.New(\"magic code tail is not valid\")\n\t}\n\n\t// got data\n\tdata := b[idx:]\n\n\t// valid data length\n\tvar shouldLen uint16\n\tif err := binary.Read(bytes.NewReader(data[2:4]), binary.BigEndian, &shouldLen); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(data) != int(shouldLen+4+2) {\n\t\treturn nil, errors.New(\"data length is not valid\")\n\t}\n\n\treturn data[4 : shouldLen+4], nil\n}",
"func (b *NodeMsgBody) Unpack(buf *bytes.Buffer) error {\n\tb.Payload = buf.Bytes()\n\treturn nil\n}",
"func (p *Packer) UnpackByte() byte {\n\tp.checkSpace(ByteLen)\n\tif p.Errored() {\n\t\treturn 0\n\t}\n\n\tval := p.Bytes[p.Offset]\n\tp.Offset += ByteLen\n\treturn val\n}",
"func UnpackBuf(buf io.Reader, elts ...interface{}) error {\n\tfor _, e := range elts {\n\t\tv := reflect.ValueOf(e)\n\t\tk := v.Kind()\n\t\tif k != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"all values passed to Unpack must be pointers, got %v\", k)\n\t\t}\n\n\t\tif v.IsNil() {\n\t\t\treturn errors.New(\"can't fill a nil pointer\")\n\t\t}\n\n\t\tiv := reflect.Indirect(v)\n\t\tswitch iv.Kind() {\n\t\tcase reflect.Struct:\n\t\t\t// Decompose the struct and copy over the values.\n\t\t\tfor i := 0; i < iv.NumField(); i++ {\n\t\t\t\tif err := UnpackBuf(buf, iv.Field(i).Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tvar size int\n\t\t\t_, isHandles := e.(*[]Handle)\n\n\t\t\tswitch {\n\t\t\t// []Handle always uses 2-byte length, even with TPM 1.2.\n\t\t\tcase isHandles:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 2.0\n\t\t\tcase lengthPrefixSize == tpm20PrefixSize:\n\t\t\t\tvar tmpSize uint16\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\t// TPM 1.2\n\t\t\tcase lengthPrefixSize == tpm12PrefixSize:\n\t\t\t\tvar tmpSize uint32\n\t\t\t\tif err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsize = int(tmpSize)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"lengthPrefixSize is %d, must be either 2 or 4\", lengthPrefixSize)\n\t\t\t}\n\n\t\t\t// A zero size is used by the TPM to signal that certain elements\n\t\t\t// are not present.\n\t\t\tif size == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Make len(e) match size exactly.\n\t\t\tswitch b := e.(type) {\n\t\t\tcase *[]byte:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]byte, size-len(*b))...)\n\t\t\t\t}\n\t\t\tcase *[]Handle:\n\t\t\t\tif len(*b) >= size {\n\t\t\t\t\t*b = (*b)[:size]\n\t\t\t\t} else {\n\t\t\t\t\t*b = append(*b, make([]Handle, size-len(*b))...)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"can't fill pointer to %T, only []byte or []Handle slices\", e)\n\t\t\t}\n\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := binary.Read(buf, binary.BigEndian, e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func UnpackUint(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar val interface{}\n\n\tif val, err = UnpackPrimitive(r); err != nil {\n\t\treturn err\n\t}\n\n\tif val == nil {\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t} else {\n\t\tswitch reflect.ValueOf(val).Kind() {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\treflect.ValueOf(ptr).Elem().SetUint(uint64(reflect.ValueOf(val).Int()))\n\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\treflect.ValueOf(ptr).Elem().SetUint(reflect.ValueOf(val).Uint())\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\treflect.ValueOf(ptr).Elem().SetUint(uint64(reflect.ValueOf(val).Float()))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"msgp: unpacked value[%v] is not assignable to unsigned integer type\", val)\n\t\t}\n\t}\n\treturn nil\n}",
"func TestUnpackStruct(t *testing.T) {\n\ttype test struct {\n\t\tA *int `erlpack:\"a\"`\n\t}\n\tvar x test\n\terr := Unpack([]byte(\"\\x83t\\x00\\x00\\x00\\x01m\\x00\\x00\\x00\\x01aa\\x01\"), &x)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif *x.A != 1 {\n\t\tt.Fatal(\"not 1\")\n\t}\n}",
"func BenchmarkLargerUnpack(b *testing.B) {\n\tm := map[int]int{}\n\tfor i := 0; i < 10000; i++ {\n\t\tm[i] = 1024\n\t}\n\tdata, err := Pack(&m)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.ResetTimer()\n\tm = nil\n\terr = Unpack(data, &m)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n}",
"func TestUnpackArray(t *testing.T) {\n\tpacked := []byte(\"\\x83l\\x00\\x00\\x00\\x01a\\x01\")\n\tvar a []int\n\terr := Unpack(packed, &a)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(a) != 1 {\n\t\tt.Fatal(\"length is not 1\")\n\t}\n\tif a[0] != 1 {\n\t\tt.Fatal(\"should be 1\")\n\t}\n}",
"func UnpackInt(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar val interface{}\n\n\tif val, err = UnpackPrimitive(r); err != nil {\n\t\treturn err\n\t}\n\n\tif val == nil {\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t} else {\n\t\tswitch reflect.ValueOf(val).Kind() {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\treflect.ValueOf(ptr).Elem().SetInt(reflect.ValueOf(val).Int())\n\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\treflect.ValueOf(ptr).Elem().SetInt(int64(reflect.ValueOf(val).Uint()))\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\treflect.ValueOf(ptr).Elem().SetInt(int64(reflect.ValueOf(val).Float()))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"msgp: unpacked value[%v] is not assignable to integer type\", val)\n\t\t}\n\t}\n\treturn nil\n}",
"func structUnpack(data string) (tries uint16, jobID string, err error) {\n\tbuf := []byte(data)\n\th1 := binary.LittleEndian.Uint16(buf[0:])\n\th2 := binary.LittleEndian.Uint16(buf[2:])\n\tjobID = string(buf[4:])\n\ttries = h1\n\tif len(jobID) != int(h2) {\n\t\terr = errors.New(\"corrupted data\")\n\t}\n\treturn\n}",
"func Unpack(dst, src []byte) ([]byte, error) {\n\tfor len(src) > 0 {\n\t\ttag := src[0]\n\t\tsrc = src[1:]\n\n\t\tpstart := len(dst)\n\t\tdst = allocWords(dst, 1)\n\t\tp := dst[pstart : pstart+wordSize]\n\t\tif len(src) >= wordSize {\n\t\t\ti := 0\n\t\t\tnz := tag & 1\n\t\t\tp[0] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 1 & 1\n\t\t\tp[1] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 2 & 1\n\t\t\tp[2] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 3 & 1\n\t\t\tp[3] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 4 & 1\n\t\t\tp[4] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 5 & 1\n\t\t\tp[5] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 6 & 1\n\t\t\tp[6] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tnz = tag >> 7 & 1\n\t\t\tp[7] = src[i] & -nz\n\t\t\ti += int(nz)\n\t\t\tsrc = src[i:]\n\t\t} else {\n\t\t\tfor i := uint(0); i < wordSize; i++ {\n\t\t\t\tif tag&(1<<i) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif len(src) == 0 {\n\t\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t\t}\n\t\t\t\tp[i] = src[0]\n\t\t\t\tsrc = src[1:]\n\t\t\t}\n\t\t}\n\t\tswitch tag {\n\t\tcase zeroTag:\n\t\t\tif len(src) == 0 {\n\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tdst = allocWords(dst, int(src[0]))\n\t\t\tsrc = src[1:]\n\t\tcase unpackedTag:\n\t\t\tif len(src) == 0 {\n\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tstart := len(dst)\n\t\t\tdst = allocWords(dst, int(src[0]))\n\t\t\tsrc = src[1:]\n\t\t\tn := copy(dst[start:], src)\n\t\t\tif n < len(dst)-start {\n\t\t\t\treturn dst, io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tsrc = src[n:]\n\t\t}\n\t}\n\treturn dst, nil\n}",
"func Unpack(r io.Reader, ptr interface{}) error {\n\tvar err error\n\n\twantType := reflect.TypeOf(ptr).Elem()\n\tswitch wantType.Kind() {\n\tcase reflect.Bool:\n\t\terr = UnpackBool(r, ptr)\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\terr = UnpackInt(r, ptr)\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\terr = UnpackUint(r, ptr)\n\tcase reflect.Float32, reflect.Float64:\n\t\terr = UnpackFloat(r, ptr)\n\tcase reflect.String:\n\t\terr = UnpackString(r, ptr)\n\tcase reflect.Array:\n\t\terr = UnpackArray(r, ptr)\n\tcase reflect.Slice:\n\t\terr = UnpackSlice(r, ptr)\n\tcase reflect.Map:\n\t\terr = UnpackMap(r, ptr)\n\tcase reflect.Struct:\n\t\terr = UnpackStruct(r, ptr)\n\tcase reflect.Ptr:\n\t\terr = UnpackPtr(r, ptr)\n\tcase reflect.Interface:\n\t\terr = UnpackInterface(r, ptr)\n\tdefault:\n\t\treturn fmt.Errorf(\"msgp: specified type[%v] is not supported\", wantType.Kind())\n\t}\n\n\treturn err\n}",
"func UnpackPrimitive(r io.Reader) (interface{}, error) {\n\tvar err error\n\tvar head byte\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif head == 0xc0 { // nil\n\t\treturn nil, nil\n\t} else if head == 0xc2 { // bool\n\t\treturn false, nil\n\t} else if head == 0xc3 {\n\t\treturn true, nil\n\t} else if head&0x80 == 0 { // int8\n\t\treturn int8(head), nil\n\t} else if head&0xe0 == 0xe0 {\n\t\treturn int8(head), nil\n\t} else if head == 0xd0 {\n\t\treturn unpackInt8(r)\n\t} else if head == 0xd1 {\n\t\treturn unpackInt16(r)\n\t} else if head == 0xd2 {\n\t\treturn unpackInt32(r)\n\t} else if head == 0xd3 {\n\t\treturn unpackInt64(r)\n\t} else if head == 0xcc {\n\t\treturn unpackUint8(r)\n\t} else if head == 0xcd {\n\t\treturn unpackUint16(r)\n\t} else if head == 0xce {\n\t\treturn unpackUint32(r)\n\t} else if head == 0xcf {\n\t\treturn unpackUint64(r)\n\t} else if head == 0xca {\n\t\treturn unpackFloat32(r)\n\t} else if head == 0xcb {\n\t\treturn unpackFloat64(r)\n\t} else if head&0xe0 == 0xa0 {\n\t\treturn unpackString5(r, int(head&0x1f))\n\t} else if head == 0xd9 {\n\t\treturn unpackString8(r)\n\t} else if head == 0xda {\n\t\treturn unpackString16(r)\n\t} else if head == 0xdb {\n\t\treturn unpackString32(r)\n\t} else if head == 0xc4 { // bin\n\t\treturn unpackBin8(r)\n\t} else if head == 0xc5 {\n\t\treturn unpackBin16(r)\n\t} else if head == 0xc6 {\n\t\treturn unpackBin32(r)\n\t} else if head&0xf0 == 0x90 { // array\n\t\treturn unpackArray4(r, int(head&0x0f))\n\t} else if head == 0xdc {\n\t\treturn unpackArray16(r)\n\t} else if head == 0xdd {\n\t\treturn unpackArray32(r)\n\t} else if head&0xf0 == 0x80 { // map\n\t\treturn unpackMap4(r, int(head&0x0f))\n\t} else if head == 0xde {\n\t\treturn unpackMap16(r)\n\t} else if head == 0xdf {\n\t\treturn unpackMap32(r)\n\t}\n\n\treturn nil, errors.New(\"msgp: UnpackPrimitive() reads unsupported(array, map) format family\")\n}",
"func Unpack(dst, pkt []byte, ciph Cipher) ([]byte, error) {\n\tsaltSize := ciph.SaltSize()\n\tif len(pkt) < saltSize {\n\t\treturn nil, ErrShortPacket\n\t}\n\tsalt := pkt[:saltSize]\n\taead, err := ciph.Decrypter(salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(pkt) < saltSize+aead.Overhead() {\n\t\treturn nil, ErrShortPacket\n\t}\n\tif saltSize+len(dst)+aead.Overhead() < len(pkt) {\n\t\treturn nil, io.ErrShortBuffer\n\t}\n\tb, err := aead.Open(dst[:0], _zerononce[:aead.NonceSize()], pkt[saltSize:], nil)\n\treturn b, err\n}",
"func (m *MockUnpacker) Unpack(arg0 []byte, arg1 *wire.Header, arg2 []byte) (*unpackedPacket, error) {\n\tret := m.ctrl.Call(m, \"Unpack\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*unpackedPacket)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func Unpack(i interface{}) T {\n\tp := *(*uintptr)(unsafe.Pointer(&i))\n\tv := T{\n\t\ttypeID: *(*int32)((unsafe.Pointer(p + rtypeStrOffset))),\n\t\ti: i,\n\t}\n\treturn v\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
UnpackBuf recursively unpacks types from a reader just as encoding/binary does under binary.BigEndian, but with one difference: it unpacks a byte slice by first reading an integer with lengthPrefixSize bytes, then reading that many bytes. It assumes that incoming values are pointers to values so that, e.g., underlying slices can be resized as needed.
|
func UnpackBuf(buf io.Reader, elts ...interface{}) error {
for _, e := range elts {
v := reflect.ValueOf(e)
k := v.Kind()
if k != reflect.Ptr {
return fmt.Errorf("all values passed to Unpack must be pointers, got %v", k)
}
if v.IsNil() {
return errors.New("can't fill a nil pointer")
}
iv := reflect.Indirect(v)
switch iv.Kind() {
case reflect.Struct:
// Decompose the struct and copy over the values.
for i := 0; i < iv.NumField(); i++ {
if err := UnpackBuf(buf, iv.Field(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Slice:
var size int
_, isHandles := e.(*[]Handle)
switch {
// []Handle always uses 2-byte length, even with TPM 1.2.
case isHandles:
var tmpSize uint16
if err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {
return err
}
size = int(tmpSize)
// TPM 2.0
case lengthPrefixSize == tpm20PrefixSize:
var tmpSize uint16
if err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {
return err
}
size = int(tmpSize)
// TPM 1.2
case lengthPrefixSize == tpm12PrefixSize:
var tmpSize uint32
if err := binary.Read(buf, binary.BigEndian, &tmpSize); err != nil {
return err
}
size = int(tmpSize)
default:
return fmt.Errorf("lengthPrefixSize is %d, must be either 2 or 4", lengthPrefixSize)
}
// A zero size is used by the TPM to signal that certain elements
// are not present.
if size == 0 {
continue
}
// Make len(e) match size exactly.
switch b := e.(type) {
case *[]byte:
if len(*b) >= size {
*b = (*b)[:size]
} else {
*b = append(*b, make([]byte, size-len(*b))...)
}
case *[]Handle:
if len(*b) >= size {
*b = (*b)[:size]
} else {
*b = append(*b, make([]Handle, size-len(*b))...)
}
default:
return fmt.Errorf("can't fill pointer to %T, only []byte or []Handle slices", e)
}
if err := binary.Read(buf, binary.BigEndian, e); err != nil {
return err
}
default:
if err := binary.Read(buf, binary.BigEndian, e); err != nil {
return err
}
}
}
return nil
}
|
[
"func UnpackSlice(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar head byte\n\n\tsliceTyp := reflect.TypeOf(ptr).Elem()\n\tsliceVal := reflect.ValueOf(ptr).Elem()\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn err\n\t}\n\n\tif head == 0xc0 { // nil\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t\treturn nil\n\t}\n\n\t// handle bin format family\n\tif head == 0xc4 || head == 0xc5 || head == 0xc6 {\n\t\tif sliceTyp.Elem().Kind() == reflect.Uint8 {\n\t\t\tvar byteSlice []byte\n\n\t\t\tswitch head {\n\t\t\tcase 0xc4:\n\t\t\t\tbyteSlice, err = unpackBin8(r)\n\t\t\tcase 0xc5:\n\t\t\t\tbyteSlice, err = unpackBin16(r)\n\t\t\tcase 0xc6:\n\t\t\t\tbyteSlice, err = unpackBin32(r)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tsliceVal.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(byteSlice).Elem()), len(byteSlice), len(byteSlice)))\n\t\t\treflect.Copy(sliceVal, reflect.ValueOf(byteSlice))\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"msgp: byte array can't be assigned to other type[%v] slice\", sliceTyp.Elem().Kind())\n\t}\n\n\t// handle array format family\n\tvar srcLen = 0\n\tif head&0xf0 == 0x90 { // array\n\t\tsrcLen = int(head & 0x0f)\n\t} else if head == 0xdc {\n\t\tvar temp uint16\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else if head == 0xdd {\n\t\tvar temp uint32\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp) // maybe overflow.\n\t} else {\n\t\treturn fmt.Errorf(\"msgp: unpacked value is not an array\")\n\t}\n\n\tsliceVal.Set(reflect.MakeSlice(reflect.SliceOf(sliceTyp.Elem()), srcLen, srcLen)) // slice 생성.\n\tfor inx := 0; inx < srcLen; inx++ {\n\t\tif err = Unpack(r, sliceVal.Index(inx).Addr().Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func Unpack(r io.Reader, ptr interface{}) error {\n\tvar err error\n\n\twantType := reflect.TypeOf(ptr).Elem()\n\tswitch wantType.Kind() {\n\tcase reflect.Bool:\n\t\terr = UnpackBool(r, ptr)\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\terr = UnpackInt(r, ptr)\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\terr = UnpackUint(r, ptr)\n\tcase reflect.Float32, reflect.Float64:\n\t\terr = UnpackFloat(r, ptr)\n\tcase reflect.String:\n\t\terr = UnpackString(r, ptr)\n\tcase reflect.Array:\n\t\terr = UnpackArray(r, ptr)\n\tcase reflect.Slice:\n\t\terr = UnpackSlice(r, ptr)\n\tcase reflect.Map:\n\t\terr = UnpackMap(r, ptr)\n\tcase reflect.Struct:\n\t\terr = UnpackStruct(r, ptr)\n\tcase reflect.Ptr:\n\t\terr = UnpackPtr(r, ptr)\n\tcase reflect.Interface:\n\t\terr = UnpackInterface(r, ptr)\n\tdefault:\n\t\treturn fmt.Errorf(\"msgp: specified type[%v] is not supported\", wantType.Kind())\n\t}\n\n\treturn err\n}",
"func (reply *Reply) ReadFromN(r *bufio.Reader, n int64) (Value, error) {\n\tid := reply.n\n\treply.values = reply.values[:cap(reply.values)]\n\terr := reply.readArray(r, n)\n\treply.values = reply.values[:reply.n]\n\treturn Value{id: id, reply: reply}, err\n}",
"func (d *decoder81) ReadValueBytes(fixedLen int, x *[]byte) (err error) {\n\t// StartValue. Initialize tt and lenHint, and track whether the []byte type\n\t// is already on the stack via isOnStack.\n\tisOnStack := d.flag.IgnoreNextStartValue()\n\td.flag = d.flag.Clear(decFlagIgnoreNextStartValue)\n\tvar tt *vdl.Type\n\tvar lenHint int\n\tif isOnStack {\n\t\ttop := d.top()\n\t\ttt, lenHint = top.Type, top.LenHint\n\t} else {\n\t\tif tt, err = d.dfsNextType(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar flag decStackFlag\n\t\tif tt, lenHint, flag, err = d.setupType(tt, nil); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// If tt isn't []byte or [n]byte (or a named variant of these), we need to\n\t\t// perform conversion byte-by-byte. This is complicated, and can't be\n\t\t// really fast, so we just push an entry onto the stack and handle this via\n\t\t// DecodeConvertedBytes below.\n\t\t//\n\t\t// We also need to perform the compatibility check, to make sure tt is\n\t\t// compatible with []byte. The check is fairly expensive, so skipping it\n\t\t// when tt is actually a bytes type makes the the common case faster.\n\t\tif !tt.IsBytes() {\n\t\t\tif !vdl.Compatible(tt, ttByteList) {\n\t\t\t\treturn errIncompatibleDecode(tt, \"bytes\")\n\t\t\t}\n\t\t\td.stack = append(d.stack, decStackEntry{\n\t\t\t\tType: tt,\n\t\t\t\tIndex: -1,\n\t\t\t\tLenHint: lenHint,\n\t\t\t\tFlag: flag,\n\t\t\t})\n\t\t\tisOnStack = true\n\t\t}\n\t}\n\t// Decode. The common-case fastpath reads directly from the buffer.\n\tif tt.IsBytes() {\n\t\tif err := d.decodeBytes(tt, lenHint, fixedLen, x); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := vdl.DecodeConvertedBytes(d, fixedLen, x); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// FinishValue\n\tif isOnStack {\n\t\tif err := d.FinishValue(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\td.flag = d.flag.Clear(decFlagIsParentBytes)\n\t\tif len(d.stack) == 0 {\n\t\t\tif err := d.endMessage(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func UnpackArray(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar head byte\n\n\tarrTyp := reflect.TypeOf(ptr).Elem()\n\tarrVal := reflect.ValueOf(ptr).Elem()\n\tarrLen := arrVal.Len()\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn err\n\t}\n\n\tif head == 0xc0 { // nil\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t\treturn nil\n\t}\n\n\t// handle bin format family\n\tif head == 0xc4 || head == 0xc5 || head == 0xc6 {\n\t\tif arrTyp.Elem().Kind() == reflect.Uint8 {\n\t\t\tvar byteSlice []byte\n\n\t\t\tswitch head {\n\t\t\tcase 0xc4:\n\t\t\t\tbyteSlice, err = unpackBin8(r)\n\t\t\tcase 0xc5:\n\t\t\t\tbyteSlice, err = unpackBin16(r)\n\t\t\tcase 0xc6:\n\t\t\t\tbyteSlice, err = unpackBin32(r)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tarrVal.Set(reflect.Zero(reflect.ArrayOf(len(byteSlice), reflect.TypeOf(byteSlice).Elem())))\n\t\t\treflect.Copy(arrVal, reflect.ValueOf(byteSlice))\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"msgp: byte array can't be assigned to other type[%v] array\", arrTyp.Elem().Kind())\n\t}\n\n\t// handle array format family\n\tvar srcLen = 0\n\tif head&0xf0 == 0x90 { // array\n\t\tsrcLen = int(head & 0x0f)\n\t} else if head == 0xdc {\n\t\tvar temp uint16\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else if head == 0xdd {\n\t\tvar temp uint32\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp) // maybe overflow.\n\t} else {\n\t\treturn fmt.Errorf(\"msgp: unpacked value is not an array\")\n\t}\n\n\tif arrLen < srcLen {\n\t\treturn fmt.Errorf(\"msgp: array size is too small\")\n\t}\n\n\tarrVal.Set(reflect.Zero(reflect.ArrayOf(arrLen, arrTyp.Elem()))) // array 생성.\n\tfor inx := 0; inx < srcLen; inx++ {\n\t\tif err = Unpack(r, arrVal.Index(inx).Addr().Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (ti *varBinaryType) ReadFrom(_ *types.NomsBinFormat, reader types.CodecReader) (interface{}, error) {\n\tk := reader.PeekKind()\n\tswitch k {\n\tcase types.BlobKind:\n\t\tval, err := reader.ReadBlob()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn fromBlob(val)\n\tcase types.NullKind:\n\t\t_ = reader.ReadKind()\n\t\treturn nil, nil\n\t}\n\n\treturn nil, fmt.Errorf(`\"%v\" cannot convert NomsKind \"%v\" to a value`, ti.String(), k)\n}",
"func (reply *Reply) ReadFrom(r *bufio.Reader) (Value, error) {\n\tid := reply.n\n\treply.values = reply.values[:cap(reply.values)]\n\terr := reply.read(r)\n\treply.values = reply.values[:reply.n]\n\treturn Value{id: id, reply: reply}, err\n}",
"func UnpackPrimitive(r io.Reader) (interface{}, error) {\n\tvar err error\n\tvar head byte\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif head == 0xc0 { // nil\n\t\treturn nil, nil\n\t} else if head == 0xc2 { // bool\n\t\treturn false, nil\n\t} else if head == 0xc3 {\n\t\treturn true, nil\n\t} else if head&0x80 == 0 { // int8\n\t\treturn int8(head), nil\n\t} else if head&0xe0 == 0xe0 {\n\t\treturn int8(head), nil\n\t} else if head == 0xd0 {\n\t\treturn unpackInt8(r)\n\t} else if head == 0xd1 {\n\t\treturn unpackInt16(r)\n\t} else if head == 0xd2 {\n\t\treturn unpackInt32(r)\n\t} else if head == 0xd3 {\n\t\treturn unpackInt64(r)\n\t} else if head == 0xcc {\n\t\treturn unpackUint8(r)\n\t} else if head == 0xcd {\n\t\treturn unpackUint16(r)\n\t} else if head == 0xce {\n\t\treturn unpackUint32(r)\n\t} else if head == 0xcf {\n\t\treturn unpackUint64(r)\n\t} else if head == 0xca {\n\t\treturn unpackFloat32(r)\n\t} else if head == 0xcb {\n\t\treturn unpackFloat64(r)\n\t} else if head&0xe0 == 0xa0 {\n\t\treturn unpackString5(r, int(head&0x1f))\n\t} else if head == 0xd9 {\n\t\treturn unpackString8(r)\n\t} else if head == 0xda {\n\t\treturn unpackString16(r)\n\t} else if head == 0xdb {\n\t\treturn unpackString32(r)\n\t} else if head == 0xc4 { // bin\n\t\treturn unpackBin8(r)\n\t} else if head == 0xc5 {\n\t\treturn unpackBin16(r)\n\t} else if head == 0xc6 {\n\t\treturn unpackBin32(r)\n\t} else if head&0xf0 == 0x90 { // array\n\t\treturn unpackArray4(r, int(head&0x0f))\n\t} else if head == 0xdc {\n\t\treturn unpackArray16(r)\n\t} else if head == 0xdd {\n\t\treturn unpackArray32(r)\n\t} else if head&0xf0 == 0x80 { // map\n\t\treturn unpackMap4(r, int(head&0x0f))\n\t} else if head == 0xde {\n\t\treturn unpackMap16(r)\n\t} else if head == 0xdf {\n\t\treturn unpackMap32(r)\n\t}\n\n\treturn nil, errors.New(\"msgp: UnpackPrimitive() reads unsupported(array, map) format family\")\n}",
"func (d *Deserializer) ReadNum(dest interface{}, errProducer ErrProducer) *Deserializer {\n\tif d.err != nil {\n\t\treturn d\n\t}\n\n\tl := len(d.src)\n\n\tswitch x := dest.(type) {\n\tcase *uint8:\n\t\tif l < OneByte {\n\t\t\td.err = errProducer(ErrDeserializationNotEnoughData)\n\t\t\treturn d\n\t\t}\n\t\tl = OneByte\n\t\t*x = d.src[0]\n\n\tcase *uint16:\n\t\tif l < UInt16ByteSize {\n\t\t\td.err = errProducer(ErrDeserializationNotEnoughData)\n\t\t\treturn d\n\t\t}\n\t\tl = UInt16ByteSize\n\t\t*x = binary.LittleEndian.Uint16(d.src[:UInt16ByteSize])\n\n\tcase *uint32:\n\t\tif l < UInt32ByteSize {\n\t\t\td.err = errProducer(ErrDeserializationNotEnoughData)\n\t\t\treturn d\n\t\t}\n\t\tl = UInt32ByteSize\n\t\t*x = binary.LittleEndian.Uint32(d.src[:UInt32ByteSize])\n\tcase *uint64:\n\t\tif l < UInt64ByteSize {\n\t\t\td.err = errProducer(ErrDeserializationNotEnoughData)\n\t\t\treturn d\n\t\t}\n\t\tl = UInt64ByteSize\n\t\t*x = binary.LittleEndian.Uint64(d.src[:UInt64ByteSize])\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported ReadNum type %T\", dest))\n\t}\n\n\td.offset += l\n\td.src = d.src[l:]\n\n\treturn d\n}",
"func UnpackInt(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar val interface{}\n\n\tif val, err = UnpackPrimitive(r); err != nil {\n\t\treturn err\n\t}\n\n\tif val == nil {\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t} else {\n\t\tswitch reflect.ValueOf(val).Kind() {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\treflect.ValueOf(ptr).Elem().SetInt(reflect.ValueOf(val).Int())\n\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\treflect.ValueOf(ptr).Elem().SetInt(int64(reflect.ValueOf(val).Uint()))\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\treflect.ValueOf(ptr).Elem().SetInt(int64(reflect.ValueOf(val).Float()))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"msgp: unpacked value[%v] is not assignable to integer type\", val)\n\t\t}\n\t}\n\treturn nil\n}",
"func (v *Value) FromBytes(in Reader, limit uint64, buf *[]byte) error {\n\tvlen, err := readerReadUvarint(in)\n\tif err != nil {\n\t\treturn errors.New(corruptinputdata)\n\t}\n\tklen, err := readerReadUvarint(in)\n\tif err != nil {\n\t\treturn errors.New(corruptinputdata)\n\t}\n\tif 1+vlen+1+klen+1 > limit {\n\t\treturn errors.New(oversizedInputData)\n\t}\n\ttotalLength := int(1 + vlen + 1 + klen + 1)\n\tif tryGrowByReslice(buf, totalLength) {\n\t\t*buf = make([]byte, 1+vlen+1+klen+1)\n\t}\n\tdata := (*buf)[:1+vlen+1+klen+1]\n\terr = readAtLeast(in, data)\n\tif err != nil {\n\t\treturn errors.New(corruptinputdata)\n\t}\n\n\tv.Value.Vtype = ttType(data[0])\n\tv.Value.Value = data[1 : 1+vlen]\n\tv.Key.Vtype = ttType(data[1+vlen])\n\tv.Key.Value = data[1+vlen+1 : 1+vlen+1+klen]\n\n\tif data[1+vlen+1+klen] < 0x80 {\n\t\tv.Childrenn = uint64(data[1+vlen+1+klen])\n\t} else {\n\t\tchildren, err := binary.ReadUvarint(readFirstByte{\n\t\t\tb: data[1+vlen+1+klen],\n\t\t\tr: in,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.New(corruptinputdata)\n\t\t}\n\t\tv.Childrenn = children\n\t}\n\treturn nil\n}",
"func UnpackStruct(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar head byte\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn err\n\t}\n\tif head == 0xc0 { // nil\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t\treturn nil\n\t}\n\n\tvar srcLen = 0\n\tif head&0xf0 == 0x80 { // map\n\t\tsrcLen = int(head & 0x0f)\n\t} else if head == 0xde {\n\t\tvar temp uint16\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else if head == 0xdf {\n\t\tvar temp uint32\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else {\n\t\treturn fmt.Errorf(\"msgp: unpacked value is not a map\")\n\t}\n\n\ttype StructField struct {\n\t\tProps FieldProps\n\t\tVal reflect.Value\n\t}\n\tfieldMap := make(map[string]StructField)\n\n\tstructTyp := reflect.TypeOf(ptr).Elem()\n\tstructVal := reflect.ValueOf(ptr).Elem()\n\n\tstructVal.Set(reflect.Zero(structTyp)) // init with zero value\n\n\tstructNumField := structTyp.NumField()\n\tfor inx := 0; inx < structNumField; inx++ {\n\t\tvar fp FieldProps\n\n\t\tfieldTyp := structTyp.Field(inx)\n\t\tfieldVal := structVal.Field(inx)\n\t\tfp.parseTag(fieldTyp)\n\t\tif fp.Skip {\n\t\t\tcontinue\n\t\t}\n\t\tfieldMap[fp.Name] = StructField{fp, fieldVal}\n\t}\n\n\tfor inx := 0; inx < srcLen; inx++ {\n\t\tvar key string\n\t\tif err = Unpack(r, &key); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstructField, ok := fieldMap[key]\n\t\tif ok {\n\t\t\tif structField.Props.Skip {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif structField.Props.String {\n\t\t\t\tvar str string\n\t\t\t\tif err = Unpack(r, &str); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err = assignValueFromString(structField.Val, str); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err = Unpack(r, structField.Val.Addr().Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func UnpackUint(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar val interface{}\n\n\tif val, err = UnpackPrimitive(r); err != nil {\n\t\treturn err\n\t}\n\n\tif val == nil {\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t} else {\n\t\tswitch reflect.ValueOf(val).Kind() {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\treflect.ValueOf(ptr).Elem().SetUint(uint64(reflect.ValueOf(val).Int()))\n\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\treflect.ValueOf(ptr).Elem().SetUint(reflect.ValueOf(val).Uint())\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\treflect.ValueOf(ptr).Elem().SetUint(uint64(reflect.ValueOf(val).Float()))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"msgp: unpacked value[%v] is not assignable to unsigned integer type\", val)\n\t\t}\n\t}\n\treturn nil\n}",
"func UnpackMap(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar head byte\n\n\tmapTyp := reflect.TypeOf(ptr).Elem()\n\tmapVal := reflect.ValueOf(ptr).Elem()\n\n\tif err = binary.Read(r, binary.BigEndian, &head); err != nil {\n\t\treturn err\n\t}\n\n\tif head == 0xc0 { // nil\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t\treturn nil\n\t}\n\n\tvar srcLen = 0\n\tif head&0xf0 == 0x80 { // map\n\t\tsrcLen = int(head & 0x0f)\n\t} else if head == 0xde {\n\t\tvar temp uint16\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else if head == 0xdf {\n\t\tvar temp uint32\n\t\tif err = binary.Read(r, binary.BigEndian, &temp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcLen = int(temp)\n\t} else {\n\t\treturn fmt.Errorf(\"msgp: unpacked value is not a map\")\n\t}\n\n\tmapVal.Set(reflect.MakeMap(reflect.MapOf(mapTyp.Key(), mapTyp.Elem()))) // map 생성.\n\tfor inx := 0; inx < srcLen; inx++ {\n\t\tkeyPtr := reflect.New(mapTyp.Key())\n\t\tif err = Unpack(r, keyPtr.Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvalPtr := reflect.New(mapTyp.Elem())\n\t\tif err = Unpack(r, valPtr.Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmapVal.SetMapIndex(keyPtr.Elem(), valPtr.Elem())\n\t}\n\treturn nil\n}",
"func Unpack(p ProtocolType, b []byte) ([]byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, errors.New(\"invalid data\")\n\t}\n\n\t// find magic code head and valid\n\tidx := bytes.LastIndex(b, []byte{0x0f, byte(p)})\n\tif idx == -1 {\n\t\treturn nil, errors.New(\"magic code head is not valid\")\n\t}\n\n\t// find magic code tail and valid\n\tif !bytes.Equal(b[len(b)-1:], []byte{0xf0}) {\n\t\treturn nil, errors.New(\"magic code tail is not valid\")\n\t}\n\n\t// got data\n\tdata := b[idx:]\n\n\t// valid data length\n\tvar shouldLen uint16\n\tif err := binary.Read(bytes.NewReader(data[2:4]), binary.BigEndian, &shouldLen); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(data) != int(shouldLen+4+2) {\n\t\treturn nil, errors.New(\"data length is not valid\")\n\t}\n\n\treturn data[4 : shouldLen+4], nil\n}",
"func (c *Chunk) Populate(buf* []byte, ptr int) (int, error) {\n\n var err error\n c.Length, err = uInt32ToInt((*buf)[ptr:ptr+4])\n if err != nil {\n return ptr, errors.New(\"cannot convert length to int\")\n }\n\n\tc.CType = string((*buf)[ptr+4:ptr+8])\n\n\tc.Data = (*buf)[ptr+8:ptr+8+c.Length]\n\t\n\tptr += (12+c.Length)\n\n\tc.Crc32 = (*buf)[ptr-4:ptr]\n\n return ptr, nil\n}",
"func UnpackPtr(r io.Reader, ptr interface{}) error {\n\tvar err error\n\tvar peek byte\n\n\tbr := NewPeekableReader(r)\n\tif peek, err = br.Peek(); err != nil {\n\t\treturn err\n\t}\n\tif peek == 0xc0 { // nil value unpacked.\n\t\treflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))\n\t\treturn nil\n\t}\n\n\tnewVal := reflect.New(reflect.TypeOf(ptr).Elem().Elem())\n\tif err = Unpack(br, newVal.Interface()); err != nil { // peeked byte will be consumed in Unpack()\n\t\treturn err\n\t}\n\n\treflect.ValueOf(ptr).Elem().Set(newVal)\n\treturn nil\n}",
"func (u *BinaryUnpacker) Unpack(r io.Reader, downloadInfo *binstack.DownloadInfo) (io.Reader, error) {\n\treturn r, nil\n}",
"func FromUncompressedReader(reader io.Reader) (*MinecraftRawPacket, error) {\n\tpacket := new(MinecraftRawPacket)\n\n\tlength, _, err := readLength(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpacket.packetLength = length\n\tpacket.dataLength = -1\n\n\tpacket.data = make([]byte, length)\n\t_, err = io.ReadFull(reader, packet.data)\n\n\treturn packet, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If debugEnabled is true, debugf formats its arguments and prints to stderr. If debugEnabled is false, it is a noop.
|
func debugf(format string, args ...interface{}) {
if !debugEnabled {
return
}
if false {
_ = fmt.Sprintf(format, args...) // encourage vet to validate format strings
}
fmt.Fprintf(os.Stderr, ">>> "+format+"\n", args...)
}
|
[
"func OptionalDebug(debugEnabled bool) func(string, ...interface{}) {\n\tif !debugEnabled {\n\t\t// return a noop function\n\t\treturn func(msg string, args ...interface{}) {\n\t\t\treturn\n\t\t}\n\t}\n\t// return the real debug function\n\treturn func(msg string, args ...interface{}) {\n\t\tif args == nil {\n\t\t\tlog.Print(msg)\n\t\t}\n\t\tlog.Printf(msg, args...)\n\t}\n}",
"func debug(format string, args ...interface{}) {\n\tif IsDebug {\n\t\tlog.Printf(format, args...)\n\t}\n}",
"func (mr *MockLoggingMockRecorder) Debugf(arg0 interface{}, arg1 ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{arg0}, arg1...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Debugf\", reflect.TypeOf((*MockLogging)(nil).Debugf), varargs...)\n}",
"func Sdebugf(format string, args ...interface{}) string {\n\treturn sprint(\"debug\", fmt.Sprintf(format, args...))\n}",
"func debug(format string, a ...interface{}) {\n\tif *verbose {\n\t\tlog.Printf(format, a...)\n\t}\n}",
"func Debug(args ...interface{}) {\n\tdefaultLogger.Debug(args...)\n}",
"func (s *Loggable) DebugF(message string, params ...interface{}) {\n\tif s.Logger == nil {\n\t\tconsole.DebugF(defaultLogger, message, params...)\n\t\treturn\n\t}\n\n\ts.Logger.DebugF(message, params...)\n}",
"func (n *noop) Debug(msg ...interface{}) {}",
"func (cfg *BaseConfig) Debugqf(format string, vals ...interface{}) {\n\tif cfg.s_log != nil && cfg.Verbosity.Value != 0 && 1 <= cfg.Verbosity.Value {\n\t\tcfg.s_log.DebugStructured(fmt.Sprintf(format, vals...))\n\t}\n}",
"func (l *Logger) Debugd(msg string, details ...interface{}) {\n\tl.DebugfFunc(msg, details...)\n}",
"func debugLogf(ctx context.Context, format string, args ...interface{}) {\n\tif debugLogging.Value() == \"true\" {\n\t\ttesting.ContextLogf(ctx, format, args...)\n\t}\n}",
"func EnableDebugOutput() {\n\tenableDebugOutput = true\n}",
"func Debug(args ...string) *RunBuilder {\n\treturn withDefaults(\"debug\", args)\n}",
"func (c *ChaincodeLogger) Debug(args ...interface{}) {\n\tc.logger.Debug(args...)\n}",
"func debugLog(ctx context.Context, args ...interface{}) {\n\tif debugLogging.Value() == \"true\" {\n\t\ttesting.ContextLog(ctx, args...)\n\t}\n}",
"func (l AppLogger) DebugCtxf(context string, format string, args ...interface{}) {\n\tl.addContextFields(context).Debugf(format, args...)\n}",
"func DebugfWithFields(f Fields, m string, args ...interface{}) {\n\tlogger.WithFields(decorate(\"debug\", f)).Debugf(m, args...)\n}",
"func D(format string, args ...interface{}) {\n\tif !debugOutput {\n\t\treturn\n\t}\n\n\tfmt.Printf(format, args...)\n}",
"func (m *MockLogging) Debugf(arg0 string, arg1 ...interface{}) {\n\tvarargs := []interface{}{arg0}\n\tfor _, a := range arg1 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"Debugf\", varargs...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If debugEnabled is true, dumpWorkspace prints a summary of workspace packages to stderr. If debugEnabled is false, it is a noop.
|
func (s *snapshot) dumpWorkspace(context string) {
if !debugEnabled {
return
}
debugf("workspace (after %s):", context)
var ids []PackageID
for id := range s.workspacePackages {
ids = append(ids, id)
}
sort.Slice(ids, func(i, j int) bool {
return ids[i] < ids[j]
})
for _, id := range ids {
pkgPath := s.workspacePackages[id]
_, ok := s.meta.metadata[id]
debugf(" %s:%s (metadata: %t)", id, pkgPath, ok)
}
}
|
[
"func EnableDebugOutput() {\n\tenableDebugOutput = true\n}",
"func (ds *Server) IsDebugOutputEnabled() bool {\n\treturn ds.DebugOutput\n}",
"func (pvs *PVSearch) PrintDebug() {\n\ttotal := 0\n\tfor _, num := range debugNodes {\n\t\ttotal += num\n\t}\n\n\tfmt.Println(\"Total nodes searched:\\t\", IntToStr(int(total)))\n\tif len(debugNodes) != 0 {\n\t\tfmt.Println(\"Each routine:\")\n\t\tfor i, nodes := range debugNodes {\n\t\t\tfmt.Println(\" \", i, \"\\t\", IntToStr(nodes), \":\", debugResults[i])\n\t\t}\n\t} else {\n\t\tfmt.Println(\"No routine specific data\")\n\t}\n\tfmt.Printf(\"Time taken: %v\\n\", timeTaken)\n\tfmt.Printf(\"Nodes/Second: %f\\n\", float64(total)/timeTaken.Seconds())\n\tfmt.Printf(\"\\tExact Nodes: %d\\n\\tAlpha Nodes: %d\\n\\tBeta Nodes: %d\\n\", evaluatedNodes, alphaCutoffs, betaCutoffs)\n\tfmt.Printf(\"\\tNull Moves: %d\\n\", nullMoves)\n\tfmt.Println(\"Current principal variation:\")\n\tfor _, move := range pvs.PVSLine.Table[0] {\n\t\tif move.GetPiece() != 0 {\n\t\t\tfmt.Print(move.Print())\n\t\t}\n\t}\n}",
"func (runCtx *RunContext) Debug() error {\n\tartifacts, err := newDebugLocalGrader()\n\tif err != nil {\n\t\treturn err\n\t}\n\trunCtx.RunInfo.Artifacts = artifacts\n\trunCtx.RunInfo.Run.Debug = true\n\treturn nil\n}",
"func EnableDebugging() {\n\tdebuggingEnabled = true\n}",
"func (n *noop) Debug(msg ...interface{}) {}",
"func (o *PersistOptions) IsDebugMetricsEnabled() bool {\n\treturn o.GetScheduleConfig().EnableDebugMetrics\n}",
"func DisableDebug() {\n\tdebug = false\n\tclear()\n}",
"func (Backend) Debug() {\n\trun(\"mage\", \"sdk:build:debug\")\n}",
"func (layer *NeuronLayer) PrintDebugString(prefix string) {\n\tfor i := 0; i < len(layer.nodes); i++ {\n\t\tlayer.nodes[i].PrintDebugString(prefix)\n\t}\n}",
"func (stack *StackedNet) PrintDebugString(prefix string) {\n\tfor i := 0; i < len(stack.layers); i++ {\n\t\tstack.layers[i].PrintDebugString(prefix + \":layer_\" + strconv.Itoa(i))\n\t}\n}",
"func (st *SymbolTable) Debug() {\n\tfmt.Printf(\"%v\", st.table)\n}",
"func (gameTile *GameTile) PrintDebug() {\n\tfmt.Printf(\"[Tile id:%v, name:%v, con[%v, %v, %v, %v]]\", gameTile.ID(), gameTile.description, gameTile.tileUp.ID(), gameTile.tileRight.ID(), gameTile.tileDown.ID(), gameTile.tileLeft.ID())\n}",
"func Sdebug(args ...interface{}) string {\n\treturn sprint(\"debug\", args...)\n}",
"func DisableDebugging() {\n\tdebuggingEnabled = false\n}",
"func (ds *Server) PrintDebug(v ...interface{}) {\n\tif ds.DebugOutput {\n\t\tds.LogPrint(v...)\n\t}\n}",
"func (pkg *Package) SetDebugMode(debug bool) {\n\t// TODO(adonovan): do we want ast.File granularity?\n\tpkg.debug = debug\n}",
"func (ng *cherryNodeGroup) Debug() string {\n\treturn fmt.Sprintf(\"%s min=%d max=%d target=%d\", ng.id, ng.minSize, ng.maxSize, ng.targetSize)\n}",
"func Debug(ctx *app.Context) error {\n\tcli.HTTPClient = &http.Client{Timeout: 20 * time.Second}\n\tc := cli.New(ctx.App.Version)\n\n\tcfg, err := config.New(ctx.GlobalString(\"config\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tuc := config.UserConfig{\n\t\tPath: cfg.File,\n\t\tHome: paths.Home,\n\t\tWorkspace: cfg.Dir,\n\t\tToken: cfg.APIKey,\n\t}\n\n\tstatus := cli.NewStatus(c, uc)\n\tstatus.Censor = !ctx.Bool(\"full-api-key\")\n\ts, err := status.Check()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(s)\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WaitGoSchedule sleeps momentarily so that other goroutines can process. (etcd rafthttp.waitSchedule)
|
func WaitGoSchedule() { time.Sleep(1 * time.Millisecond) }
|
[
"func WaitSchedule() { time.Sleep(10 * time.Millisecond) }",
"func WaitForScheduleOnFargate(clientSet kubeclient.Interface, retryPolicy retry.Policy) error {\n\t// Clone the retry policy to ensure this method is re-entrant/thread-safe:\n\tretryPolicy = retryPolicy.Clone()\n\tfor !retryPolicy.Done() {\n\t\tisScheduled, err := IsScheduledOnFargate(clientSet)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif isScheduled {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(retryPolicy.Duration())\n\t}\n\treturn fmt.Errorf(\"timed out while waiting for %q to be scheduled on Fargate\", Name)\n}",
"func (s *Scheduler) loopSchedule() {\n\ts.inc()\n\tdefer s.dec()\n\ttick := time.Tick(s.delay)\n\tfor {\n\t\tselect {\n\t\tcase <-s.quit:\n\t\t\t// Quit signal received, return.\n\t\t\treturn\n\t\tcase t := <-tick:\n\t\t\t// Chunk the time into intervals separated by s.delay.\n\t\t\tt = t.Truncate(s.delay)\n\t\t\t// Iterate over each step previous to t.\n\t\t\tfor s.step.Before(t) {\n\t\t\t\ts.schedMutex.Lock()\n\t\t\t\t// Read any jobs for the current time step, if any.\n\t\t\t\tjobs := s.scheduled[s.step]\n\t\t\t\t// Delete the index in the map. If it didn't exist, it's a no-op.\n\t\t\t\tdelete(s.scheduled, s.step)\n\t\t\t\ts.schedMutex.Unlock()\n\n\t\t\t\t// Move any jobs to the waiting channel.\n\t\t\t\tfor _, j := range jobs {\n\t\t\t\t\ts.waiting <- j\n\t\t\t\t}\n\n\t\t\t\ts.schedMutex.Lock()\n\t\t\t\t// Increment the current step.\n\t\t\t\ts.step = s.step.Add(s.delay)\n\t\t\t\ts.schedMutex.Unlock()\n\t\t\t}\n\t\t}\n\t}\n}",
"func (n *Node) Schedule(cfg *Config) {\n\tfor {\n\t\tcons := n.Consensus\n\t\tts := time.Now()\n\t\twait := time.Duration(cfg.Node.Rounds.PaceMs) * time.Millisecond\n\t\tstartTime := ts.Truncate(wait).Add(wait)\n\t\tsyncNodeWaitBeforeRoundStart := startTime.Sub(ts)\n\t\tn.log.Infof(\"round start time: %s\", startTime.String())\n\t\tn.log.Infof(\"sync wait for: %.2f ms\", float64(syncNodeWaitBeforeRoundStart/time.Millisecond))\n\t\ttime.Sleep(syncNodeWaitBeforeRoundStart)\n\t\t// event will be dispatched every N seconds on every node if ntpd is working\n\t\tcons.GetStartChan() <- startTime.Unix()\n\t}\n}",
"func schedFunc(schedin <-chan got.GoTaskMsg, schedout chan<- got.GoTaskMsg, schedevent <-chan got.SchedEvent) {\n\tvar msg schedMsg\n\ttasks := make(map[string]bool)\n\texitCh := make(chan bool,1)\n\ttaskCount := DefaultNumberOfElevators + DefaultNumberOfFloors\n\t//run a goroutine to check scheduling events\n\tgo func() {\n\t\tschedEventLoop: for {\n\t\t\tselect {\n\t\t\tcase evt := <- schedevent:\n\t\t\t\t//just dump it, not real logic\n\t\t\t\tlog.Info(evt)\n\t\t\tcase <- exitCh:\n\t\t\t\tbreak schedEventLoop\n\t\t\t}\n\t\t}\n\t\texitCh<-true\n\t}()\n\n\t//first wait for tasks come up\nwaitloop:\n\tfor i := 0; i < taskCount; i++ {\n\t\tselect {\n\t\tcase taskMsg := <-schedin:\n\t\t\terr := msg.decode(taskMsg)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif msg.currFloor != 0 || msg.goalFloor != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttasks[msg.taskName] = true\n\t\tcase <-time.After(TimeWaitForTasksUp):\n\t\t\tbreak waitloop\n\t\t}\n\t}\n\t//tell all tasks start\n\tmsg.currFloor = 0\n\tmsg.goalFloor = 0\n\tfor tname, _ := range tasks {\n\t\tlog.Infoln(\"notify: \", tname)\n\t\tmsg.taskName = tname\n\t\tschedout <- msg.encode()\n\t}\n\t//run scheduler\n\t//init scheduler\n\tsched := NewScheduler(DefaultNumberOfFloors, DefaultNumberOfElevators, schedin, schedout)\n\tsched.Run()\n\t//tell all tasks stop\n\tmsg.currFloor = -1\n\tmsg.goalFloor = -1\n\tfor tname, _ := range tasks {\n\t\tlog.Infoln(\"notify: \", tname)\n\t\tmsg.taskName = tname\n\t\tschedout <- msg.encode()\n\t}\n\t//wait for all tasks exit\nexitloop:\n\tfor m := range schedin {\n\t\terr := msg.decode(m)\n\t\tif err != nil {\n\t\t\tlog.Infoln(\"failed decode msg: \", err)\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase msg.currFloor == -1 && msg.goalFloor == -1:\n\t\t\tlog.Infoln(\"finished task: \", msg.taskName)\n\t\t\tdelete(tasks, msg.taskName)\n\t\t\tif len(tasks) == 0 {\n\t\t\t\tbreak exitloop\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Infof(\"recv %v\\n\", msg)\n\t\t}\n\t}\n\t//tell sched event loop exit\n\texitCh <- true\n\t//wait for it\n\t<- exitCh\n}",
"func scheduleWaitSSH(vm **framework.VMInterface, ssh *framework.Executor) {\n\tBeforeAll(func() {\n\t\tEventually(\n\t\t\tfunc() error {\n\t\t\t\tvar err error\n\t\t\t\t*ssh, err = (*vm).SSH(\"cirros\", sshPrivateKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t_, err = framework.ExecSimple(*ssh)\n\t\t\t\treturn err\n\t\t\t}, 60*5, 3).Should(Succeed())\n\t})\n\n\tAfterAll(func() {\n\t\t(*ssh).Close()\n\t})\n}",
"func schedule(ctx context.Context, id int, endpoints []*Endpoint, minWait, maxWait time.Duration, results chan EndpointResult, wg *sync.WaitGroup) error {\n\tlog.Info().\n\t\tStr(\"component\", \"schedule\").\n\t\tInt(\"id\", id).\n\t\tMsg(\"start new schedule\")\n\n\tdefer wg.Done()\n\n\tfor {\n\t\terr := sleepBetween(minWait, maxWait)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tselect {\n\t\tdefault:\n\t\t\turl := endpoints[rand.Intn(len(endpoints))].URL\n\t\t\tttfb, code, msg, cached, err := runner.Call(ctx, url)\n\n\t\t\tif err != nil {\n\t\t\t\tif err == context.Canceled {\n\t\t\t\t\tlog.Debug().\n\t\t\t\t\t\tStr(\"component\", \"schedule\").\n\t\t\t\t\t\tInt(\"id\", id).\n\t\t\t\t\t\tMsg(\"context canceld mid request\")\n\n\t\t\t\t\treturn nil\n\t\t\t\t} else if err == context.DeadlineExceeded {\n\t\t\t\t\tlog.Warn().\n\t\t\t\t\t\tStr(\"component\", \"schedule\").\n\t\t\t\t\t\tInt(\"id\", id).\n\t\t\t\t\t\tMsg(\"request timed out\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tresults <- EndpointResult{\n\t\t\t\tURL: url,\n\t\t\t\tHTTPStatusCode: code,\n\t\t\t\tHTTPStatusMessage: msg,\n\t\t\t\tTTFB: ttfb,\n\t\t\t\tCached: cached,\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info().\n\t\t\t\tStr(\"component\", \"schedule\").\n\t\t\t\tInt(\"id\", id).\n\t\t\t\tMsg(\"stop schedule\")\n\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func consumerSchedule(wtm *waterTimeManager, eventer gobot.Eventer, wg *sync.WaitGroup) {\n\n\tdefer wg.Done()\n\n\tcommands := eventer.Subscribe()\n\tquit := make(chan bool)\n\n\t// This routine will wait events from commands channel\n\t// and in case a stopAndQuit signal will be received,\n\t// we close the scheduler\n\tgo func() {\n\t\tfor e := range commands {\n\t\t\tif e.Name != stopWorkers {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tstatusExit, ok := e.Data.(StopSignal)\n\t\t\tif !ok || statusExit == stopAndQuit {\n\t\t\t\tquit <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar timer *time.Timer\n\nWAIT_FIRST_SLOT:\n\tfor {\n\t\tselect {\n\n\t\tcase <-wtm.resetTimer: // Wait the first incoming waterTime.\n\t\tWAIT_SLOTS:\n\t\t\tfor {\n\t\t\t\tnextSlot := wtm.GetNextSlot() // Get the next waterTime.\n\t\t\t\tif nextSlot == nil {\n\t\t\t\t\t// No more work to do, empty schedule,\n\t\t\t\t\t// go to start and wait the first waterTime incoming.\n\t\t\t\t\tcontinue WAIT_FIRST_SLOT\n\t\t\t\t}\n\t\t\t\td := time.Until(nextSlot.start)\n\t\t\t\t// If a duration is negative, means that the slot received is currently active.\n\t\t\t\t// This happens when the scheduler has been reseted during an active task.\n\n\t\t\t\tif d > 0 {\n\t\t\t\t\tlog.Printf(\"Next timer will start at: %v\", d)\n\t\t\t\t\ttimer = time.AfterFunc(d, func() {\n\t\t\t\t\t\teventer.Publish(startRemoteRobots, struct{}{})\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(time.Until(nextSlot.end)): // Wait the end of the process.\n\t\t\t\t\teventer.Publish(stopWorkers, stopRemote)\n\t\t\t\t\tcontinue WAIT_SLOTS\n\t\t\t\tcase <-wtm.resetTimer: // Wait if the meanwhile the manager has been reset.\n\t\t\t\t\tif timer != nil {\n\t\t\t\t\t\ttimer.Stop()\n\t\t\t\t\t}\n\t\t\t\t\tlog.Println(\"reset timer!\")\n\t\t\t\t\tcontinue WAIT_SLOTS\n\t\t\t\tcase <-quit: // Quit signal. Exits\n\t\t\t\t\tif timer != nil {\n\t\t\t\t\t\ttimer.Stop()\n\t\t\t\t\t}\n\t\t\t\t\teventer.Unsubscribe(commands)\n\t\t\t\t\tlog.Printf(\"close the schedule\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase <-quit: // Quit signal. Exits\n\t\t\teventer.Unsubscribe(commands)\n\t\t\tlog.Printf(\"close the schedule\")\n\t\t\treturn\n\n\t\t}\n\t}\n\n}",
"func (w *Worker) MaybeSchedule(ctx context.Context) (bool, error) {\n\t// Get our current balance.\n\tbalance, err := w.broker.GetBalance(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tlog.Println(\"Balance=\", balance)\n\tif balance <= 0 {\n\t\treturn false, nil\n\t}\n\tequivDuration := money.UsdCentsToDuration(balance)\n\tlog.Println(\"Current balance can schedule up to\", equivDuration)\n\tlength, err := w.picker.ScheduleRandom(ctx, equivDuration)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"sheduling random lesson: %v\", err)\n\t}\n\tif length <= 0 {\n\t\tlog.Println(\"Nothing to schedule, bailing\")\n\t\treturn false, nil\n\t}\n\tlog.Println(\"New task scheduled:\", length)\n\tequivBalance := money.DurationToUsdCents(length)\n\tif err := w.broker.ChangeBalance(ctx, -equivBalance); err != nil {\n\t\treturn false, err\n\t}\n\tlog.Println(\"Decreased balance\")\n\treturn true, nil\n}",
"func (clst *amazonCluster) wait(awsIDs []awsID, boot bool) error {\nOuterLoop:\n\tfor i := 0; i < 100; i++ {\n\t\tmachines, err := clst.List()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warn(\"Failed to get machines.\")\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\texists := make(map[awsID]struct{})\n\t\tfor _, inst := range machines {\n\t\t\tid := awsID{\n\t\t\t\tspotID: inst.ID,\n\t\t\t\tregion: inst.Region,\n\t\t\t}\n\n\t\t\texists[id] = struct{}{}\n\t\t}\n\n\t\tfor _, id := range awsIDs {\n\t\t\tif _, ok := exists[id]; ok != boot {\n\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\tcontinue OuterLoop\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"timed out\")\n}",
"func wait(id string, expected string) {\n status := \"\"\n start := TimeMillis()\n for status != expected && TimeMillis() - start < 60000 {\n res, _ := service.Instances.Get(project, zone, id).Do()\n status = res.Status\n }\n}",
"func (s *Scheduler) Schedule() {\n state := db.NewSchedulerStateDAO(s.App.Mongo).Get()\n\n s.schedule(state)\n}",
"func (w *TimerWheel) Schedule(node Node) {\n\tsentinel := w.findBucket(node.GetVariableTime())\n\tlink(sentinel, node)\n}",
"func Schedule(what func(), clock ...int) (chan bool, error) {\n\tspecificTime := []int(clock)\n\tlength := len(specificTime)\n\tif length > 7 || length < 6 {\n\t\treturn nil, errors.New(\"illegal parameters\")\n\t}\n\tif length == 6 {\n\t\tspecificTime = append(specificTime, 0)\n\t}\n\tstop := make(chan bool)\n\tgo func() {\n\t\tnow := time.Now()\n\t\t\n\t\tfor {\n\t\t\ttoday := time.Date(\n\t\t\t\tnow.Year(),\n\t\t\t\tnow.Month(),\n\t\t\t\tnow.Day(),\n\t\t\t\tspecificTime[3], specificTime[4],\n\t\t\t\tspecificTime[5], specificTime[6],\n\t\t\t\ttime.Local,\n\t\t\t)\n\t\t\tdelay := today.Sub(now)\n\t\t\tif delay <= 0 {\n\t\t\t\tnext := now.AddDate(specificTime[0],\n\t\t\t\t\tspecificTime[1],\n\t\t\t\t\tspecificTime[2])\n\t\t\t\ttomorrow := time.Date(\n\t\t\t\tnext.Year(),\n\t\t\t\tnext.Month(),\n\t\t\t\tnext.Day(),\n\t\t\t\tspecificTime[3], specificTime[4],\n\t\t\t\tspecificTime[5], specificTime[6],\n\t\t\t\ttime.Local)\n\t\t\t\tdelay = tomorrow.Sub(now)\n\t\t\t\tif delay <= 0 {\n\t\t\t\t\t<-stop\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tnow = tomorrow\n\t\t\t} else {\n\t\t\t\tnow = today\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-time.After(delay):\n\t\t\t\tgo what()\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn stop, nil\n}",
"func (s *Scheduler) schedule(state *db.SchedulerState) {\n now := time.Now()\n startMinute := state.LastScheduledMinute\n if state.LastScheduledMinute.IsZero() {\n // Schedule the compute of each minute\n startMinute = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())\n }\n\n // Creates all the task to compute\n lastScheduledMinute := s.createMinuteTaskUntilNow(&now, &startMinute)\n\n // Tasks created, we must update the scheduling state\n state.LastScheduledMinute = *lastScheduledMinute\n db.NewSchedulerStateDAO(s.App.Mongo).Save(state)\n}",
"func wait(ctx context.Context, t *testing.T, url string) {\n\tbackoff, err := retry.NewFibonacci(10 * time.Millisecond)\n\trequire.Nil(t, err)\n\tbackoff = retry.WithMaxDuration(3*time.Second, backoff)\n\terr = retry.Do(ctx, backoff, func(ctx context.Context) error {\n\t\tif err := ping(ctx, url); err != nil {\n\t\t\treturn retry.RetryableError(err)\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n}",
"func (s *Server) schedulePeriodic(stopCh chan struct{}) {\n\tevalGC := time.NewTicker(s.config.EvalGCInterval)\n\tdefer evalGC.Stop()\n\tnodeGC := time.NewTicker(s.config.NodeGCInterval)\n\tdefer nodeGC.Stop()\n\tjobGC := time.NewTicker(s.config.JobGCInterval)\n\tdefer jobGC.Stop()\n\tdeploymentGC := time.NewTicker(s.config.DeploymentGCInterval)\n\tdefer deploymentGC.Stop()\n\n\t// getLatest grabs the latest index from the state store. It returns true if\n\t// the index was retrieved successfully.\n\tgetLatest := func() (uint64, bool) {\n\t\tsnapshotIndex, err := s.fsm.State().LatestIndex()\n\t\tif err != nil {\n\t\t\ts.logger.Printf(\"[ERR] nomad: failed to determine state store's index: %v\", err)\n\t\t\treturn 0, false\n\t\t}\n\n\t\treturn snapshotIndex, true\n\t}\n\n\tfor {\n\n\t\tselect {\n\t\tcase <-evalGC.C:\n\t\t\tif index, ok := getLatest(); ok {\n\t\t\t\ts.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobEvalGC, index))\n\t\t\t}\n\t\tcase <-nodeGC.C:\n\t\t\tif index, ok := getLatest(); ok {\n\t\t\t\ts.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobNodeGC, index))\n\t\t\t}\n\t\tcase <-jobGC.C:\n\t\t\tif index, ok := getLatest(); ok {\n\t\t\t\ts.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobJobGC, index))\n\t\t\t}\n\t\tcase <-deploymentGC.C:\n\t\t\tif index, ok := getLatest(); ok {\n\t\t\t\ts.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobDeploymentGC, index))\n\t\t\t}\n\t\tcase <-stopCh:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (s *EleScheduler) Schedule(ch check.Check) {\n\tif !ch.IsDisabled() {\n\t\tgo s.runCheckTimerLoop(ch)\n\t}\n}",
"func GenerateSchedule(now time.Time, gracePeriod time.Duration) (*RotationSchedule, error) {\n\tif gracePeriod <= 0 {\n\t\treturn nil, trace.BadParameter(\"invalid grace period %q, provide value > 0\", gracePeriod)\n\t}\n\treturn &RotationSchedule{\n\t\tUpdateClients: now.UTC().Add(gracePeriod / 3),\n\t\tUpdateServers: now.UTC().Add((gracePeriod * 2) / 3),\n\t\tStandby: now.UTC().Add(gracePeriod),\n\t}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WaitSchedule briefly sleeps in order to invoke the go scheduler. (etcd pkg.testutil.WaitSchedule)
|
func WaitSchedule() { time.Sleep(10 * time.Millisecond) }
|
[
"func WaitGoSchedule() { time.Sleep(1 * time.Millisecond) }",
"func WaitForScheduleOnFargate(clientSet kubeclient.Interface, retryPolicy retry.Policy) error {\n\t// Clone the retry policy to ensure this method is re-entrant/thread-safe:\n\tretryPolicy = retryPolicy.Clone()\n\tfor !retryPolicy.Done() {\n\t\tisScheduled, err := IsScheduledOnFargate(clientSet)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif isScheduled {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(retryPolicy.Duration())\n\t}\n\treturn fmt.Errorf(\"timed out while waiting for %q to be scheduled on Fargate\", Name)\n}",
"func (s *Scheduler) loopSchedule() {\n\ts.inc()\n\tdefer s.dec()\n\ttick := time.Tick(s.delay)\n\tfor {\n\t\tselect {\n\t\tcase <-s.quit:\n\t\t\t// Quit signal received, return.\n\t\t\treturn\n\t\tcase t := <-tick:\n\t\t\t// Chunk the time into intervals separated by s.delay.\n\t\t\tt = t.Truncate(s.delay)\n\t\t\t// Iterate over each step previous to t.\n\t\t\tfor s.step.Before(t) {\n\t\t\t\ts.schedMutex.Lock()\n\t\t\t\t// Read any jobs for the current time step, if any.\n\t\t\t\tjobs := s.scheduled[s.step]\n\t\t\t\t// Delete the index in the map. If it didn't exist, it's a no-op.\n\t\t\t\tdelete(s.scheduled, s.step)\n\t\t\t\ts.schedMutex.Unlock()\n\n\t\t\t\t// Move any jobs to the waiting channel.\n\t\t\t\tfor _, j := range jobs {\n\t\t\t\t\ts.waiting <- j\n\t\t\t\t}\n\n\t\t\t\ts.schedMutex.Lock()\n\t\t\t\t// Increment the current step.\n\t\t\t\ts.step = s.step.Add(s.delay)\n\t\t\t\ts.schedMutex.Unlock()\n\t\t\t}\n\t\t}\n\t}\n}",
"func scheduleWaitSSH(vm **framework.VMInterface, ssh *framework.Executor) {\n\tBeforeAll(func() {\n\t\tEventually(\n\t\t\tfunc() error {\n\t\t\t\tvar err error\n\t\t\t\t*ssh, err = (*vm).SSH(\"cirros\", sshPrivateKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t_, err = framework.ExecSimple(*ssh)\n\t\t\t\treturn err\n\t\t\t}, 60*5, 3).Should(Succeed())\n\t})\n\n\tAfterAll(func() {\n\t\t(*ssh).Close()\n\t})\n}",
"func (n *Node) Schedule(cfg *Config) {\n\tfor {\n\t\tcons := n.Consensus\n\t\tts := time.Now()\n\t\twait := time.Duration(cfg.Node.Rounds.PaceMs) * time.Millisecond\n\t\tstartTime := ts.Truncate(wait).Add(wait)\n\t\tsyncNodeWaitBeforeRoundStart := startTime.Sub(ts)\n\t\tn.log.Infof(\"round start time: %s\", startTime.String())\n\t\tn.log.Infof(\"sync wait for: %.2f ms\", float64(syncNodeWaitBeforeRoundStart/time.Millisecond))\n\t\ttime.Sleep(syncNodeWaitBeforeRoundStart)\n\t\t// event will be dispatched every N seconds on every node if ntpd is working\n\t\tcons.GetStartChan() <- startTime.Unix()\n\t}\n}",
"func (wt *WaitTrigger) Wait(timeout time.Duration) error {\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\treturn wt.WaitContext(ctx)\n}",
"func (s *EleScheduler) Schedule(ch check.Check) {\n\tif !ch.IsDisabled() {\n\t\tgo s.runCheckTimerLoop(ch)\n\t}\n}",
"func schedFunc(schedin <-chan got.GoTaskMsg, schedout chan<- got.GoTaskMsg, schedevent <-chan got.SchedEvent) {\n\tvar msg schedMsg\n\ttasks := make(map[string]bool)\n\texitCh := make(chan bool,1)\n\ttaskCount := DefaultNumberOfElevators + DefaultNumberOfFloors\n\t//run a goroutine to check scheduling events\n\tgo func() {\n\t\tschedEventLoop: for {\n\t\t\tselect {\n\t\t\tcase evt := <- schedevent:\n\t\t\t\t//just dump it, not real logic\n\t\t\t\tlog.Info(evt)\n\t\t\tcase <- exitCh:\n\t\t\t\tbreak schedEventLoop\n\t\t\t}\n\t\t}\n\t\texitCh<-true\n\t}()\n\n\t//first wait for tasks come up\nwaitloop:\n\tfor i := 0; i < taskCount; i++ {\n\t\tselect {\n\t\tcase taskMsg := <-schedin:\n\t\t\terr := msg.decode(taskMsg)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif msg.currFloor != 0 || msg.goalFloor != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttasks[msg.taskName] = true\n\t\tcase <-time.After(TimeWaitForTasksUp):\n\t\t\tbreak waitloop\n\t\t}\n\t}\n\t//tell all tasks start\n\tmsg.currFloor = 0\n\tmsg.goalFloor = 0\n\tfor tname, _ := range tasks {\n\t\tlog.Infoln(\"notify: \", tname)\n\t\tmsg.taskName = tname\n\t\tschedout <- msg.encode()\n\t}\n\t//run scheduler\n\t//init scheduler\n\tsched := NewScheduler(DefaultNumberOfFloors, DefaultNumberOfElevators, schedin, schedout)\n\tsched.Run()\n\t//tell all tasks stop\n\tmsg.currFloor = -1\n\tmsg.goalFloor = -1\n\tfor tname, _ := range tasks {\n\t\tlog.Infoln(\"notify: \", tname)\n\t\tmsg.taskName = tname\n\t\tschedout <- msg.encode()\n\t}\n\t//wait for all tasks exit\nexitloop:\n\tfor m := range schedin {\n\t\terr := msg.decode(m)\n\t\tif err != nil {\n\t\t\tlog.Infoln(\"failed decode msg: \", err)\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase msg.currFloor == -1 && msg.goalFloor == -1:\n\t\t\tlog.Infoln(\"finished task: \", msg.taskName)\n\t\t\tdelete(tasks, msg.taskName)\n\t\t\tif len(tasks) == 0 {\n\t\t\t\tbreak exitloop\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Infof(\"recv %v\\n\", msg)\n\t\t}\n\t}\n\t//tell sched event loop exit\n\texitCh <- true\n\t//wait for it\n\t<- exitCh\n}",
"func consumerSchedule(wtm *waterTimeManager, eventer gobot.Eventer, wg *sync.WaitGroup) {\n\n\tdefer wg.Done()\n\n\tcommands := eventer.Subscribe()\n\tquit := make(chan bool)\n\n\t// This routine will wait events from commands channel\n\t// and in case a stopAndQuit signal will be received,\n\t// we close the scheduler\n\tgo func() {\n\t\tfor e := range commands {\n\t\t\tif e.Name != stopWorkers {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tstatusExit, ok := e.Data.(StopSignal)\n\t\t\tif !ok || statusExit == stopAndQuit {\n\t\t\t\tquit <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar timer *time.Timer\n\nWAIT_FIRST_SLOT:\n\tfor {\n\t\tselect {\n\n\t\tcase <-wtm.resetTimer: // Wait the first incoming waterTime.\n\t\tWAIT_SLOTS:\n\t\t\tfor {\n\t\t\t\tnextSlot := wtm.GetNextSlot() // Get the next waterTime.\n\t\t\t\tif nextSlot == nil {\n\t\t\t\t\t// No more work to do, empty schedule,\n\t\t\t\t\t// go to start and wait the first waterTime incoming.\n\t\t\t\t\tcontinue WAIT_FIRST_SLOT\n\t\t\t\t}\n\t\t\t\td := time.Until(nextSlot.start)\n\t\t\t\t// If a duration is negative, means that the slot received is currently active.\n\t\t\t\t// This happens when the scheduler has been reseted during an active task.\n\n\t\t\t\tif d > 0 {\n\t\t\t\t\tlog.Printf(\"Next timer will start at: %v\", d)\n\t\t\t\t\ttimer = time.AfterFunc(d, func() {\n\t\t\t\t\t\teventer.Publish(startRemoteRobots, struct{}{})\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(time.Until(nextSlot.end)): // Wait the end of the process.\n\t\t\t\t\teventer.Publish(stopWorkers, stopRemote)\n\t\t\t\t\tcontinue WAIT_SLOTS\n\t\t\t\tcase <-wtm.resetTimer: // Wait if the meanwhile the manager has been reset.\n\t\t\t\t\tif timer != nil {\n\t\t\t\t\t\ttimer.Stop()\n\t\t\t\t\t}\n\t\t\t\t\tlog.Println(\"reset timer!\")\n\t\t\t\t\tcontinue WAIT_SLOTS\n\t\t\t\tcase <-quit: // Quit signal. Exits\n\t\t\t\t\tif timer != nil {\n\t\t\t\t\t\ttimer.Stop()\n\t\t\t\t\t}\n\t\t\t\t\teventer.Unsubscribe(commands)\n\t\t\t\t\tlog.Printf(\"close the schedule\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase <-quit: // Quit signal. Exits\n\t\t\teventer.Unsubscribe(commands)\n\t\t\tlog.Printf(\"close the schedule\")\n\t\t\treturn\n\n\t\t}\n\t}\n\n}",
"func testBubbleScheduler_Blocking(t *testing.T) {\n\t// Initialize a bubble scheduler\n\tbs := newBubbleScheduler(&Renter{})\n\n\t// queue a bubble update request\n\tsiaPath := modules.RandomSiaPath()\n\tcompleteChan := bs.callQueueBubble(siaPath)\n\n\t// Call complete in a go routine.\n\tstart := time.Now()\n\tduration := time.Second\n\tgo func() {\n\t\ttime.Sleep(duration)\n\t\t// Call pop to prevent panic for incorrect status when complete is called\n\t\tbu := bs.managedPop()\n\t\tif bu == nil {\n\t\t\tt.Error(\"no bubble update\")\n\t\t\treturn\n\t\t}\n\t\t// calling complete should close the channel\n\t\tbs.managedCompleteBubbleUpdate(siaPath)\n\t}()\n\n\t// Should be blocking until after the duration\n\tselect {\n\tcase <-completeChan:\n\tcase <-time.After(bubbleWaitInTestTime):\n\t\tt.Fatal(\"test blocked too long for bubble\")\n\t}\n\tif time.Since(start) < duration {\n\t\tt.Error(\"complete chan closed sooner than expected\")\n\t}\n\n\t// Complete chan should not block anymore\n\tselect {\n\tcase <-completeChan:\n\tcase <-time.After(bubbleWaitInTestTime):\n\t\tt.Fatal(\"test blocked too long for bubble\")\n\t}\n\n\t// If multiple calls are made to queue the same bubble update, they should all\n\t// block on the same channel\n\tstart = time.Now()\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 5; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tcompleteChan := bs.callQueueBubble(siaPath)\n\t\t\tselect {\n\t\t\tcase <-completeChan:\n\t\t\tcase <-time.After(bubbleWaitInTestTime):\n\t\t\t\tt.Error(\"test blocked too long for bubble\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif time.Since(start) < duration {\n\t\t\t\tt.Error(\"complete chan closed before time duration\")\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Sleep for the duration\n\ttime.Sleep(duration)\n\t// Call pop to prevent panic for incorrect status when complete is called\n\tbu := bs.managedPop()\n\tif bu == nil {\n\t\tt.Fatal(\"no bubble update\")\n\t}\n\t// calling complete should close the channel\n\tbs.managedCompleteBubbleUpdate(siaPath)\n\n\t// Wait for go routines to finish\n\twg.Wait()\n\n\t// Queue the bubble update request\n\tcompleteChan = bs.callQueueBubble(siaPath)\n\n\t// Pop the update\n\tbu = bs.managedPop()\n\tif bu == nil {\n\t\tt.Fatal(\"no bubble update\")\n\t}\n\n\t// Call queue again to update the status to pending\n\t//\n\t// The complete channel returned should be the same as the original channel\n\tcompleteChan2 := bs.callQueueBubble(siaPath)\n\n\t// Call complete\n\tbs.managedCompleteBubbleUpdate(siaPath)\n\n\t// Both of the original complete channels should not longer be blocking\n\tselect {\n\tcase <-completeChan:\n\tdefault:\n\t\tt.Error(\"first complete chan is still blocking\")\n\t}\n\tselect {\n\tcase <-completeChan2:\n\tdefault:\n\t\tt.Error(\"second complete chan is still blocking\")\n\t}\n\n\t// The complete chan in the bubble update should still be blocking\n\tselect {\n\tcase <-bu.complete:\n\t\tt.Error(\"bubble update complete chan is not blocking\")\n\tdefault:\n\t}\n}",
"func TestWaitWithZeroTimeout(t *testing.T) {\n\tpod := mocks.MakePod(\"fail\")\n\tresdef := client.ResourceDefinition{Pod: pod}\n\toptions := interfaces.DependencyGraphOptions{FlowName: \"test\"}\n\tgraph := &dependencyGraph{graphOptions: options}\n\tgc := &graphContext{graph: graph}\n\tresource := resources.KindToResourceTemplate[\"pod\"].New(resdef, mocks.NewClient(pod), gc)\n\tsr := newScheduledResourceFor(resource, \"\", gc, false)\n\tstopChan := make(chan struct{})\n\tdefer close(stopChan)\n\n\tnow := time.Now()\n\tres, err := sr.Wait(CheckInterval, 0, stopChan)\n\tif res {\n\t\tt.Error(\"Wait() succeded\")\n\t}\n\tif err == nil {\n\t\tt.Error(\"No error was returned\")\n\t} else {\n\t\texpectedMessage := \"test flow: timeout waiting for resource pod/fail\"\n\t\tif err.Error() != expectedMessage {\n\t\t\tt.Error(\"Got unexpected error:\", err)\n\t\t}\n\t\tif sr.Error != err {\n\t\t\tt.Error(\"ScheduledResource was not marked as permanently failed\")\n\t\t}\n\t}\n\tif time.Now().Sub(now) >= time.Second {\n\t\tt.Error(\"Wait() was running for too long\")\n\t}\n}",
"func (w *Worker) MaybeSchedule(ctx context.Context) (bool, error) {\n\t// Get our current balance.\n\tbalance, err := w.broker.GetBalance(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tlog.Println(\"Balance=\", balance)\n\tif balance <= 0 {\n\t\treturn false, nil\n\t}\n\tequivDuration := money.UsdCentsToDuration(balance)\n\tlog.Println(\"Current balance can schedule up to\", equivDuration)\n\tlength, err := w.picker.ScheduleRandom(ctx, equivDuration)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"sheduling random lesson: %v\", err)\n\t}\n\tif length <= 0 {\n\t\tlog.Println(\"Nothing to schedule, bailing\")\n\t\treturn false, nil\n\t}\n\tlog.Println(\"New task scheduled:\", length)\n\tequivBalance := money.DurationToUsdCents(length)\n\tif err := w.broker.ChangeBalance(ctx, -equivBalance); err != nil {\n\t\treturn false, err\n\t}\n\tlog.Println(\"Decreased balance\")\n\treturn true, nil\n}",
"func (w *TimerWheel) Schedule(node Node) {\n\tsentinel := w.findBucket(node.GetVariableTime())\n\tlink(sentinel, node)\n}",
"func (s *schedule) Schedule(name string, delay time.Duration, runnable func()) error {\n if s.isShutdown() {\n return ErrScheduleShutdown\n }\n s.queue.Offer(NewTask(name, delay, 0, runnable))\n return nil\n}",
"func TestWait(t *testing.T) {\n\tstates := []State{Canceled, Completed}\n\tfor _, state := range states {\n\t\ttestWait(t, state)\n\t}\n}",
"func (clst *amazonCluster) wait(awsIDs []awsID, boot bool) error {\nOuterLoop:\n\tfor i := 0; i < 100; i++ {\n\t\tmachines, err := clst.List()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warn(\"Failed to get machines.\")\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\texists := make(map[awsID]struct{})\n\t\tfor _, inst := range machines {\n\t\t\tid := awsID{\n\t\t\t\tspotID: inst.ID,\n\t\t\t\tregion: inst.Region,\n\t\t\t}\n\n\t\t\texists[id] = struct{}{}\n\t\t}\n\n\t\tfor _, id := range awsIDs {\n\t\t\tif _, ok := exists[id]; ok != boot {\n\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\tcontinue OuterLoop\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"timed out\")\n}",
"func wait(ticks int, mSeconds time.Duration) {\n\tfor i := 0; i < ticks; i++ {\n\t\ttime.Sleep(mSeconds * time.Millisecond)\n\t\tfmt.Print(\".\")\n\t}\n\ttime.Sleep(mSeconds * time.Millisecond)\n\tfmt.Println()\n}",
"func (pit Pit) TriggerTimeoutSched() {\n\tC.MinSched_Trigger(pit.getPriv().timeoutSched)\n}",
"func (hc *pxlHealthCheck) Wait(ctx context.Context, clusterCtx *cluster.Context, clusterSpec *experimentpb.ClusterSpec) error {\n\tif err := hc.prepareScript(clusterSpec); err != nil {\n\t\treturn err\n\t}\n\n\texpBackoff := backoff.NewExponentialBackOff()\n\texpBackoff.InitialInterval = time.Second\n\texpBackoff.MaxElapsedTime = 10 * time.Minute\n\tbo := backoff.WithContext(expBackoff, ctx)\n\n\top := func() error {\n\t\thc.scriptSuccess = false\n\t\treturn hc.runHealthCheck(ctx)\n\t}\n\tnotify := func(err error, dur time.Duration) {\n\t\tlog.WithError(err).Tracef(\"failed pxl healthcheck, retrying in %v\", dur.Round(time.Second))\n\t}\n\terr := backoff.RetryNotify(op, bo, notify)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CheckForPrivateKey checks if already an private key exists, if not it will be randomly generated and saved as a private.dat file in the data directory
|
func CheckForPrivateKey() error {
privateDatPath := filepath.Join(Config.DataDir, "private.dat")
privateDatContent, err := ioutil.ReadFile(privateDatPath)
if err == nil {
privateKey = privateDatContent
} else if os.IsNotExist(err) {
randomGeneratedKey := make([]byte, 256)
if _, err = rand.Read(randomGeneratedKey); err != nil {
return errors.Wrap(err, "could not read random bytes")
}
if err = ioutil.WriteFile(privateDatPath, randomGeneratedKey, 0644); err != nil {
return errors.Wrap(err, "could not write private key")
}
privateKey = randomGeneratedKey
} else if err != nil {
return errors.Wrap(err, "could not read private key")
}
return nil
}
|
[
"func TestPrivateKey(t *testing.T) {\n\tpriv := func(p *big.Int) *big.Int {\n\t\ta := PrivateKey(p)\n\t\tif a.Cmp(_one) <= 0 || a.Cmp(p) >= 0 {\n\t\t\tt.Fatalf(\"PrivateKey(%s) = %s, out of range (1, %s)\", p.String(), a.String(), p.String())\n\t\t}\n\t\treturn a\n\t}\n\tms := map[string]bool{}\n\tmb := map[string]bool{}\n\tfor i := 0; i < 100; i++ {\n\t\tms[priv(smallTest.p).String()] = true\n\t\tmb[priv(biggerTest.p).String()] = true\n\t}\n\tif len(ms) == 1 {\n\t\tt.Fatalf(\"For prime %s same key generated every time. \"+\n\t\t\t\"Want random keys.\", smallTest.p.String())\n\t}\n\tif len(mb) < 100 {\n\t\tt.Fatalf(\"For prime %s duplicate keys generated. \"+\n\t\t\t\"Want unique keys.\", biggerTest.p.String())\n\t}\n}",
"func (c Configuration) generatePrivateKey() error {\n\tkey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.MkdirAll(path.Join(c.Settings.BasePath, \".sftp\"), 0755); err != nil {\n\t\treturn err\n\t}\n\n\to, err := os.OpenFile(path.Join(c.Settings.BasePath, \".sftp/id_rsa\"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer o.Close()\n\n\tpkey := &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(key),\n\t}\n\n\tif err := pem.Encode(o, pkey); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (f *OnionFile) StorePrivateKey(_ OnionType, privateKey []byte) error {\n\treturn ioutil.WriteFile(f.privateKeyPath, privateKey, f.privateKeyPerm)\n}",
"func generatePrivateKey() (string, error) {\n\tpkey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tf, err := os.CreateTemp(\"\", \"gce_pkey\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\tpemBlock := &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(pkey),\n\t}\n\treturn f.Name(), pem.Encode(f, pemBlock)\n}",
"func SavePrivateKey(key *PrivateKey, filename string, force bool) error {\n\tif err := canWrite(filename, force); err != nil {\n\t\treturn err\n\t}\n\t// actually do the write\n\thexKey, err := EncodePrivateKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, []byte(hexKey), KeyPerm)\n}",
"func ValidatePrivateKey(privateKey []byte, username string, url string) (bool, error) {\n\tSSHAuth, err := ssh.NewPublicKeys(username, privateKey, \"\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tvar b bytes.Buffer\n\tif _, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{\n\t\tAuth: SSHAuth,\n\t\tURL: url,\n\t\tProgress: &b,\n\t\tNoCheckout: true,\n\t\tRecurseSubmodules: git.NoRecurseSubmodules,\n\t}); err != nil {\n\t\treturn false, err\n\t}\n\tlog.Debugf(b.String())\n\treturn true, nil\n}",
"func (store *ConnectorFileSystemKeyStore) CheckIfPrivateKeyExists(id []byte) (bool, error) {\n\t_, err := ioutil.ReadFile(filepath.Join(store.directory, getConnectorKeyFilename(id)))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func (k *KeyProvider) GeneratePrivateKey() (datastruct.Key, error) {\n\tkey, err := k.GenerateKey()\n\tif err != nil {\n\t\treturn datastruct.Key{}, err\n\t}\n\n\t// Modify random bytes using algorithm described at:\n\t// https://cr.yp.to/ecdh.html.\n\tkey[0] &= 248\n\tkey[31] &= 127\n\tkey[31] |= 64\n\n\treturn key, nil\n}",
"func SavePrivateKey(privateKey *rsa.PrivateKey) (err error) {\n\tpemPriv := pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"RSA PRIVATE KEY\",\n\t\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t\t})\n\terr = ioutil.WriteFile(\"private.pem\", pemPriv, 0644)\n\n\treturn\n}",
"func generatePrivateKey(bitSize int) (*rsa.PrivateKey, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, bitSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = privateKey.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn privateKey, nil\n}",
"func GeneratePrivateKey() (*PrivateKey, error) {\n\t// the private key is uniformly random integer such that 0 <= pk < r\n\tpk, err := rand.Int(rand.Reader, curveOrder)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bls12: failed to generate private key: %w\", err)\n\t}\n\treturn &PrivateKey{\n\t\tp: pk,\n\t}, nil\n}",
"func genKey() error {\n\n\t// Generate a new private key.\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a file for the private key information in PEM form.\n\tprivateFile, err := os.Create(\"private.pem\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating private file\")\n\t}\n\tdefer privateFile.Close()\n\n\t// Construct a PEM block for the private key.\n\tprivateBlock := pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t}\n\n\t// Write the private key to the private key file.\n\tif err := pem.Encode(privateFile, &privateBlock); err != nil {\n\t\treturn errors.Wrap(err, \"encoding to private file\")\n\t}\n\n\t// Marshal the public key from the private key to PKIX.\n\tasn1Bytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshaling public key\")\n\t}\n\n\t// Create a file for the public key information in PEM form.\n\tpublicFile, err := os.Create(\"public.pem\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating public file\")\n\t}\n\tdefer publicFile.Close()\n\n\t// Construct a PEM block for the public key.\n\tpublicBlock := pem.Block{\n\t\tType: \"RSA PUBLIC KEY\",\n\t\tBytes: asn1Bytes,\n\t}\n\n\t// Write the public key to the private key file.\n\tif err := pem.Encode(publicFile, &publicBlock); err != nil {\n\t\treturn errors.Wrap(err, \"encoding to public file\")\n\t}\n\n\tfmt.Println(\"private and public key files generated\")\n\treturn nil\n}",
"func TestImportPrivate(t *testing.T) {\n\tprv, err := GenerateKey(rand.Reader)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tt.Fail()\n\t}\n\n\tout := prv.ExportPrivate()\n\tprv1, err := ImportPrivate(rand.Reader, out)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tt.Fail()\n\t} else if prv.X.Cmp(prv1.X) != 0 {\n\t\tfmt.Println(ErrInvalidPrivateKey.Error())\n\t\tt.Fail()\n\t} else if prv.PublicKey.A.Cmp(prv1.PublicKey.A) != 0 {\n\t\tfmt.Println(\"dhkam: private key import failed.\")\n\t\tt.Fail()\n\t}\n}",
"func genPrivKey(bitSize int) (*rsa.PrivateKey, error) {\n\t// Private Key generation\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, bitSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate Private Key\n\terr = privateKey.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn privateKey, nil\n}",
"func generatePrivateKey(lenght int) (*rsa.PrivateKey, error) {\n key, err := rsa.GenerateKey(rand.Reader, lenght); if err != nil {\n // TODO: log\n return nil, err\n }\n return key, nil;\n}",
"func WritePrivateKey(privateKeyPath string, data []byte) error {\n\tdir := filepath.Dir(privateKeyPath)\n\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\tif err = os.MkdirAll(dir, 0700); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"dir\", dir).Msg(\"can't create dir\")\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := ioutil.WriteFile(privateKeyPath, data, 0700); err != nil {\n\t\tlog.Error().Err(err).Str(\"file\", privateKeyPath).Msg(\"write ssh private key failed\")\n\t\treturn err\n\t}\n\treturn nil\n}",
"func TestFromPrivateKeyFile(t *testing.T) {\n\tfile, _ := filepath.Abs(\"../../test/test_data/verifier_private_seed\")\n\tidentity, err := FromPrivateKeyFile(file)\n\tif err != nil {\n\t\tt.Error(\"Error reading from private key file: \" + err.Error())\n\t} else {\n\t\twant := \"ec25e1a9aa7819bf-7748eeeebcae05a1-7739ab9905865351-fa2b971abb660047\"\n\t\tif identity.PublicHex != want {\n\t\t\tt.Errorf(\"Public Key Hex format incorrect, got: %s, want: %s.\", identity.PublicHex, want)\n\t\t}\n\t\twant = \"f61ef799d1b1c171-977347a70bc6e89e-de9ce9be655ffc7a-965f810c936c3ad0\"\n\t\tif identity.PrivateHex != want {\n\t\t\tt.Errorf(\"Private Key Hex format incorrect, got: %s, want: %s.\", identity.PublicHex, want)\n\t\t}\n\t\twant = \"key_8fpv.XEhJt5PCVd7GNM6Y9ZvEeD~qm_-vGqwxgQjs3IghxxMfuRm\"\n\t\tif identity.NyzoStringPrivate != want {\n\t\t\tt.Errorf(\"Private Key Nyzo String format incorrect, got: %s, want: %s.\", identity.PublicHex, want)\n\t\t}\n\t\twant = \"id__8eNCWrDHv1D_uSALZIQL1r5VerLq1pqjkwFICPHZqx17rKd-_1Gt\"\n\t\tif identity.NyzoStringPublic != want {\n\t\t\tt.Errorf(\"Public Key Nyzo String format incorrect, got: %s, want: %s.\", identity.PublicHex, want)\n\t\t}\n\t}\n\tfile, _ = filepath.Abs(\"../../test/test_data/new_private_seed\")\n\tuntested, _ := filepath.Abs(\"../../test/test_data/untested_verifier_info\")\n\t_ = os.Remove(file)\n\tnewIdentity, err := New(file, untested)\n\tif err != nil {\n\t\tt.Error(\"Error generating new identity: \" + err.Error())\n\t} else {\n\t\tnewIdentityReloaded, err := FromPrivateKeyFile(file)\n\t\tif err != nil {\n\t\t\tt.Error(\"Could not reload newly generated identity: \" + err.Error())\n\t\t} else {\n\t\t\tif newIdentity.NyzoStringPrivate != newIdentityReloaded.NyzoStringPrivate {\n\t\t\t\tt.Error(\"Newly generated identity and reloaded version of it don't match.\")\n\t\t\t} else {\n\t\t\t\tnickname, _ := filepath.Abs(\"../../test/test_data/nickname\")\n\t\t\t\tnewIdentity.LoadNicknameFromFile(nickname)\n\t\t\t\tif newIdentity.Nickname != \"go-nyzo😱\" {\n\t\t\t\t\tt.Error(\"Could not load nickname.\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func main() {\n\n reader := rand.Reader\n bitSize := 512\n key, err := rsa.GenerateKey(reader, bitSize)\n checkError(err)\n\n fmt.Println(\"Private key primes: \", key.Primes[0].String(), key.Primes[1].String())\n fmt.Println(\"Private key exponent: \", key.D.String())\n\n publicKey := key.PublicKey\n fmt.Println(\"Public key mod: \", publicKey.N.String())\n fmt.Println(\"Public key exponent: \", publicKey.E)\n\n saveGobKey(\"private.key\", key)\n saveGobKey(\"public.key\", publicKey)\n\n savePemKey(\"private.pem\", key)\n}",
"func (w *AcmeWrapper) savePrivateKey(filename string, key crypto.PrivateKey) error {\n\tvar pemType string\n\tvar keyBytes []byte\n\tswitch key := key.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\tvar err error\n\t\tpemType = \"EC\"\n\t\tkeyBytes, err = x509.MarshalECPrivateKey(key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *rsa.PrivateKey:\n\t\tpemType = \"RSA\"\n\t\tkeyBytes = x509.MarshalPKCS1PrivateKey(key)\n\t}\n\tpemKey := pem.Block{Type: pemType + \" PRIVATE KEY\", Bytes: keyBytes}\n\tpemEncoded := bytes.Buffer{}\n\tif err := pem.Encode(&pemEncoded, &pemKey); err != nil {\n\t\treturn err\n\t}\n\treturn w.saveFile(filename, pemEncoded.Bytes())\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetPrivateKey returns the private key
|
func GetPrivateKey() []byte {
switch backend := GetConfig().Backend; backend {
case "redis":
return []byte(GetConfig().Redis.SharedKey)
default:
return privateKey
}
}
|
[
"func GetPrivateKey(secret kubeTypes.NamespacedName, c client.Client) ([]byte, error) {\n\tprivateKeySecret := &core.Secret{}\n\tif err := c.Get(context.TODO(), secret, privateKeySecret); err != nil {\n\t\t// Error reading the object - requeue the request.\n\t\treturn []byte{}, err\n\t}\n\tprivateKey, ok := privateKeySecret.Data[PrivateKeySecretKey]\n\tif !ok {\n\t\treturn []byte{}, errors.New(\"cloud-private-key missing 'private-key.pem' secret\")\n\t}\n\treturn privateKey, nil\n}",
"func (a *RSA) GetPrivateKey() ([]byte, error) {\n\tkey := x509.MarshalPKCS1PrivateKey(a.MyPrivateKey)\n\treturn key, nil\n}",
"func (cfg Config) GetPrivateKey() string {\n\treturn cfg.privateKey\n}",
"func GetPrivateKey(key *bls.SecretKey) *PrivateKey {\n\treturn &PrivateKey{PrivateKey: []*bls.SecretKey{key}}\n}",
"func (ed *Ed25519) GetPrivateKey() []byte {\n\treturn ed.privateKey.Seed()\n}",
"func (store *ConnectorFileSystemKeyStore) GetPrivateKey(id []byte) (*keys.PrivateKey, error) {\n\tkeyData, err := ioutil.ReadFile(filepath.Join(store.directory, getConnectorKeyFilename(id)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar privateKey []byte\n\tif privateKey, err = store.encryptor.Decrypt(keyData, id); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &keys.PrivateKey{Value: privateKey}, nil\n}",
"func (ln *Client) GetPrivateKey() (sk *btcec.PrivateKey, err error) {\n\treturn ln.GetCustomKey(0, \"nodeid\")\n}",
"func (s *Server) GetPrivateKey() *rsa.PrivateKey {\n\treturn s.rsaPrivateKey\n}",
"func (k *Account) GetPrivateKey(password string) (*wallet.PrivateKey, error) {\n\tkey, err := crypto.GenerateEncryptionKeyFromPassword(password)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error generating key from password,error :%s\", err.Error())\n\t}\n\tdecrypted, err := crypto.Decrypt(k.Secret, key)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decrypt,error :%s\", err.Error())\n\t}\n\tprivateKey := wallet.PrivateKey{\n\t\tSecret: decrypted,\n\t}\n\n\treturn &privateKey, nil\n}",
"func (x *RawPrivateKey) GetPrivateKey() (interface{}, error) {\n\tx.RLock()\n\tif x.pk != nil {\n\t\tx.RUnlock()\n\t\treturn x.pk, nil\n\t}\n\tx.RUnlock()\n\tx.Lock()\n\tdefer x.Unlock()\n\tif x.PEMData == nil {\n\t\t// No data, load from file\n\t\tif len(x.File) > 0 {\n\t\t\tkey, err := ioutil.ReadFile(x.File)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Cannot read private key %s: %s\", x.File, err)\n\t\t\t}\n\t\t\tx.PEMData = key\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"No private key can be loaded\")\n\t\t}\n\t}\n\tif x.PEMData != nil {\n\t\tpk, err := ssh.ParseRawPrivateKey(x.PEMData)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"encrypted\") {\n\n\t\t\t\tif len(x.Passphrase) == 0 {\n\t\t\t\t\tvar prompt string\n\t\t\t\t\tif len(x.Name) > 0 {\n\t\t\t\t\t\tprompt = fmt.Sprintf(\"Enter passphrase for %s: \", x.Name)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tprompt = \"Enter passphrase: \"\n\t\t\t\t\t}\n\t\t\t\t\tx.Passphrase = AskPassword(prompt)\n\t\t\t\t}\n\n\t\t\t\tpk, err = ssh.ParseRawPrivateKeyWithPassphrase(x.PEMData, []byte(x.Passphrase))\n\t\t\t}\n\t\t}\n\t\tx.pk = pk\n\t\treturn x.pk, err\n\t}\n\n\treturn nil, fmt.Errorf(\"No private key\")\n}",
"func (c *ConnectorKeyStore) GetPrivateKey(clientID []byte) (*keys.PrivateKey, error) {\n\tlog := c.log.WithField(\"clientID\", clientID)\n\tring, err := c.OpenKeyRing(c.connectorTransportKeyPairPath(clientID))\n\tif err != nil {\n\t\tlog.WithError(err).Debug(\"failed to open connector transport key ring for client\")\n\t\treturn nil, err\n\t}\n\tprivateKey, err := c.currentPairPrivateKey(ring)\n\tif err != nil {\n\t\tlog.WithError(err).Debug(\"failed to get current connector transport private key for client\")\n\t\treturn nil, err\n\t}\n\treturn privateKey, nil\n}",
"func PrivateKey() string {\n\tinstance := getInstanceInfo()\n\treturn instance.PrivateKey\n}",
"func GetPrivateKeyObject(keyPair db.KeyPair) *btcec.PrivateKey {\n\tpkDecoded, _ := hex.DecodeString(keyPair.PrivateKey)\n\tprivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), pkDecoded)\n\n\treturn privKey\n}",
"func (w *Wallet) PrivateKey() *ecdsa.PrivateKey {\n\treturn w.priv\n}",
"func GetPrivateKey(pathOrContent string) (*PrivateKey, error) {\n\t// By default consider that it is a key content\n\tk := &PrivateKey{Content: []byte(pathOrContent)}\n\tpath, err := homedir.Expand(pathOrContent)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to read private key file, error in fs home expansion\")\n\t}\n\tif isFilePath(path) {\n\t\t// Well in fact it is a valid file path so read its content\n\t\tk.Path = path\n\t\tvar err error\n\t\tk.Content, err = ioutil.ReadFile(k.Path)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to read file\")\n\t\t}\n\t}\n\tif !isPrivateKey(k.Content) {\n\t\t// not a valid key\n\t\treturn nil, errors.New(`invalid key content`)\n\t}\n\treturn k, nil\n}",
"func MtlsPrivateKey() string {\n\tgetMtlsCerts.Do(genmtlscerts)\n\treturn mtlsPrivKey\n}",
"func (d *Drand) PrivateKey(ctx context.Context, in *control.PrivateKeyRequest) (*control.PrivateKeyResponse, error) {\n\td.state.Lock()\n\tdefer d.state.Unlock()\n\tkey, err := d.store.LoadKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprotoKey, err := key.Key.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &control.PrivateKeyResponse{PriKey: protoKey}, nil\n}",
"func NewPrivateKey() (*rsa.PrivateKey, error) {\n\treturn rsa.GenerateKey(rand.Reader, privateKeySize)\n}",
"func (k Keypair) Private() PrivateKey {\n\treturn k.private\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Echom formats it's arguments using fmt.Sprintf, then performs the echom command with the resulting string. Basically a printf to vim's status line and stores it in vim's messages.
|
func (n Basejump) Echom(fmts string, args ...interface{}) {
s := fmt.Sprintf(fmts, args...)
s = strings.Replace(s, "'", "''", -1)
n.P.Nvim.Command(fmt.Sprintf(":echom '%s'", s))
}
|
[
"func (s *BaseFGListener) EnterSprintf(ctx *SprintfContext) {}",
"func (s *BaseFGListener) ExitSprintf(ctx *SprintfContext) {}",
"func escarg(arg []byte) []byte {\n\t// format the argument of a troff escape like \\s or \\f\n\tvar buf []byte = make([]byte, 256)\n\tif noarch.Not(arg[1]) {\n\t\tnoarch.Sprintf(buf, []byte(\"%c\\x00\"), int32(arg[0]))\n\t} else if noarch.Not(arg[2]) {\n\t\tnoarch.Sprintf(buf, []byte(\"(%c%c\\x00\"), int32(arg[0]), int32(arg[1]))\n\t} else {\n\t\tnoarch.Sprintf(buf, []byte(\"[%s]\\x00\"), arg)\n\t}\n\treturn buf\n}",
"func FormatMessage(args ...interface{}) string {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn \"\"\n\tcase 1:\n\t\treturn fmt.Sprint(args[0])\n\tdefault:\n\t\tif format, args := fmt.Sprint(args[0]), args[1:]; strings.Contains(format, \"%\") {\n\t\t\tif result := fmt.Sprintf(format, args...); !strings.Contains(result, \"%!\") {\n\t\t\t\treturn result\n\t\t\t}\n\t\t}\n\t\treturn strings.TrimSuffix(fmt.Sprintln(args...), EOL)\n\t}\n}",
"func Magentaf(msg string, args ...interface{}) string {\n\tif is.TTY() {\n\t\treturn magenta + fmt.Sprintf(msg, args...) + reset\n\t}\n\treturn fmt.Sprintf(msg, args...)\n}",
"func Message(format string, args ...interface{}) {\n\tmsg := fmt.Sprintf(format, args...)\n\tcolor.White(msg)\n}",
"func Yellowf(msg string, args ...interface{}) string {\n\tif is.TTY() {\n\t\treturn yellow + fmt.Sprintf(msg, args...) + reset\n\t}\n\treturn fmt.Sprintf(msg, args...)\n}",
"func (c *Cache) CmdEcho(arg string) string {\n\treturn arg\n}",
"func (c ExpectedCommand) String() string {\n\treturn fmt.Sprintf(\"%s %s\", c.Cmd, strings.Join(c.Args, \" \"))\n}",
"func Emerg(format string, args ...interface{}) {\n\tlogMessage(LogLevelEmerg, format, args...)\n}",
"func (m GenericMessage) String() string {\n\treturn fmt.Sprintf(\"command=%s args=%+v\", m.Cmd(), m.Args())\n}",
"func (cls *Console) formatMsg(msg string) string {\n\treturn strings.Replace(msg, \"\\t\", \" \", -1)\n}",
"func CommandString(args ...string) string {\n\tvar buf bytes.Buffer\n\tfor i, arg := range args {\n\t\tneedsQuotes := false\n\t\tvar argBuf bytes.Buffer\n\t\tfor _, r := range arg {\n\t\t\tif unicode.IsSpace(r) {\n\t\t\t\tneedsQuotes = true\n\t\t\t} else if r == '\"' || r == '$' || r == '\\\\' {\n\t\t\t\tneedsQuotes = true\n\t\t\t\targBuf.WriteByte('\\\\')\n\t\t\t}\n\t\t\targBuf.WriteRune(r)\n\t\t}\n\t\tif i > 0 {\n\t\t\tbuf.WriteByte(' ')\n\t\t}\n\t\tif needsQuotes {\n\t\t\tbuf.WriteByte('\"')\n\t\t\targBuf.WriteTo(&buf)\n\t\t\tbuf.WriteByte('\"')\n\t\t} else {\n\t\t\targBuf.WriteTo(&buf)\n\t\t}\n\t}\n\treturn buf.String()\n}",
"func (v *SimpleService_Echo_Args) String() string {\n\tif v == nil {\n\t\treturn \"<nil>\"\n\t}\n\n\tvar fields [1]string\n\ti := 0\n\tfields[i] = fmt.Sprintf(\"Msg: %v\", v.Msg)\n\ti++\n\n\treturn fmt.Sprintf(\"SimpleService_Echo_Args{%v}\", strings.Join(fields[:i], \", \"))\n}",
"func colorizeUpdateMessage(updateString string, newerThan string) string {\n\tmsgLine1Fmt := \" You are running an older version of MinIO released %s \"\n\tmsgLine2Fmt := \" Update: %s \"\n\n\t// Calculate length *without* color coding: with ANSI terminal\n\t// color characters, the result is incorrect.\n\tline1Length := len(fmt.Sprintf(msgLine1Fmt, newerThan))\n\tline2Length := len(fmt.Sprintf(msgLine2Fmt, updateString))\n\n\t// Populate lines with color coding.\n\tline1InColor := fmt.Sprintf(msgLine1Fmt, color.YellowBold(newerThan))\n\tline2InColor := fmt.Sprintf(msgLine2Fmt, color.CyanBold(updateString))\n\n\t// calculate the rectangular box size.\n\tmaxContentWidth := int(math.Max(float64(line1Length), float64(line2Length)))\n\n\t// termWidth is set to a default one to use when we are\n\t// not able to calculate terminal width via OS syscalls\n\ttermWidth := 25\n\tif width, err := pb.GetTerminalWidth(); err == nil {\n\t\ttermWidth = width\n\t}\n\n\t// Box cannot be printed if terminal width is small than maxContentWidth\n\tif maxContentWidth > termWidth {\n\t\treturn \"\\n\" + line1InColor + \"\\n\" + line2InColor + \"\\n\\n\"\n\t}\n\n\ttopLeftChar := \"┏\"\n\ttopRightChar := \"┓\"\n\tbottomLeftChar := \"┗\"\n\tbottomRightChar := \"┛\"\n\thorizBarChar := \"━\"\n\tvertBarChar := \"┃\"\n\t// on windows terminal turn off unicode characters.\n\tif runtime.GOOS == globalWindowsOSName {\n\t\ttopLeftChar = \"+\"\n\t\ttopRightChar = \"+\"\n\t\tbottomLeftChar = \"+\"\n\t\tbottomRightChar = \"+\"\n\t\thorizBarChar = \"-\"\n\t\tvertBarChar = \"|\"\n\t}\n\n\tlines := []string{\n\t\tcolor.YellowBold(topLeftChar + strings.Repeat(horizBarChar, maxContentWidth) + topRightChar),\n\t\tvertBarChar + line1InColor + strings.Repeat(\" \", maxContentWidth-line1Length) + vertBarChar,\n\t\tvertBarChar + line2InColor + strings.Repeat(\" \", maxContentWidth-line2Length) + vertBarChar,\n\t\tcolor.YellowBold(bottomLeftChar + strings.Repeat(horizBarChar, maxContentWidth) + bottomRightChar),\n\t}\n\treturn \"\\n\" + strings.Join(lines, \"\\n\") + \"\\n\"\n}",
"func (c *cli) exec(args []string) {\n\tif len(args) != 2 {\n\t\tfmt.Fprintln(c.e, `example usage: ./parse \"1 + 1\"`)\n\t\tc.exitFn(1)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintln(c.e, r)\n\t\t}\n\t}()\n\tp := parser.NewParser(args[1])\n\n\ts := fmt.Sprintf(\"%s\", p.Parse())\n\ts = strings.TrimPrefix(s, \"(\")\n\ts = strings.TrimSuffix(s, \")\")\n\tfmt.Fprintln(c.o, s)\n}",
"func (info *GuildInfo) FormatUsage(c Command, usage *CommandUsage) *discordgo.MessageEmbed {\n\tr := info.GetRoles(c)\n\tch := info.GetChannels(c)\n\tfields := make([]*discordgo.MessageEmbedField, 0, len(usage.Params))\n\tuse := \"> \" + info.config.Basic.CommandPrefix + strings.ToLower(c.Name())\n\tfor _, v := range usage.Params {\n\t\topt := \"\"\n\t\tif v.Optional {\n\t\t\topt = \" [OPTIONAL]\"\n\t\t\tuse += fmt.Sprintf(\" [%s]\", v.Name)\n\t\t} else {\n\t\t\tuse += fmt.Sprintf(\" {%s}\", v.Name)\n\t\t}\n\t\tif v.Variadic {\n\t\t\topt = \" (...) \" + opt\n\t\t\tuse += \"...\"\n\t\t}\n\t\tfields = append(fields, &discordgo.MessageEmbedField{Name: v.Name + opt, Value: v.Desc, Inline: false})\n\t}\n\n\tif len(ch) > 0 {\n\t\tch = fmt.Sprintf(\"Available on: %s\", ch)\n\t}\n\tembed := &discordgo.MessageEmbed{\n\t\tType: \"rich\",\n\t\tAuthor: &discordgo.MessageEmbedAuthor{\n\t\t\tURL: \"https://github.com/blackhole12/sweetiebot#configuration\",\n\t\t\tName: c.Name() + \" Command\",\n\t\t\tIconURL: fmt.Sprintf(\"https://cdn.discordapp.com/avatars/%v/%s.jpg\", sb.SelfID, sb.SelfAvatar),\n\t\t},\n\t\tColor: 0xaaaaaa,\n\t\tDescription: fmt.Sprintf(\"```%s```\\n%s\\n\\n%s\", use, usage.Desc, ch),\n\t\tFields: fields,\n\t}\n\n\tif len(r) > 0 {\n\t\tembed.Footer = &discordgo.MessageEmbedFooter{Text: \"Only usable by: \" + r}\n\t}\n\treturn embed\n}",
"func (tam *TerminalActivityMonitor) Message(format string, v ...interface{}) {\n\tfmt.Fprint(tam.Writer, fmt.Sprintf(format, v...))\n}",
"func (ui *consoleUi) messagef(format string, args ...interface{}) {\n\tui.printf(format, args...)\n\tui.nextLine()\n\ttermbox.Flush()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Selection returns the coordinates of the current selection, if it is within a single line Note that if we are not in visual mode, this returns the last selection in visual mode.
|
func (n Basejump) Selection() (startLine, startCol, endLine, endCol int, err error) {
result := make([]float32, 4)
nv := n.nvim()
err = nv.Call("getpos", result, "'<")
if err != nil {
return
}
startLine = int(result[1])
startCol = int(result[2])
err = nv.Call("getpos", result, "'>")
if err != nil {
return
}
endLine = int(result[1])
endCol = int(result[2])
return
}
|
[
"func (e *textView) Selection() (start, end int) {\n\treturn e.caret.start, e.caret.end\n}",
"func (ui *Edit) Selection() ([]byte, error) {\n\tui.ensureInit()\n\treturn ui.selectionText()\n}",
"func (tf *TextField) Selection() string {\n\tif tf.HasSelection() {\n\t\treturn string(tf.EditTxt[tf.SelectStart:tf.SelectEnd])\n\t}\n\treturn \"\"\n}",
"func (r *Range) IsOnlyLineSelected(n int) bool {\n\treturn len(r.Sel) == 1 && r.Sel[0].Begin == n && r.Sel[0].End == n\n}",
"func (r *Range) IsLineSelected(n int) bool {\n\tfor i := 0; i < len(r.Sel); i++ {\n\t\tif n >= r.Sel[i].Begin && n <= r.Sel[i].End {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (e *textView) SelectionLen() int {\n\treturn abs(e.caret.start - e.caret.end)\n}",
"func (tb *TextBuffer) GetSelectionBounds() (start, end *TextIter) {\n\tsid := tb.Candy().ServerOpaque()\n\teid := tb.Candy().ServerOpaque()\n\ttb.Candy().Guify(\"gtk_text_buffer_get_selection_bounds\", tb, sid, eid)\n\tstart = NewTextIter(tb.Candy(), sid)\n\tend = NewTextIter(tb.Candy(), eid)\n\treturn\n}",
"func TestSelect(t *testing.T) {\n\te := new(Editor)\n\te.SetText(`a123456789a\nb123456789b\nc123456789c\nd123456789d\ne123456789e\nf123456789f\ng123456789g\n`)\n\n\tgtx := layout.Context{Ops: new(op.Ops)}\n\tcache := text.NewCache(gofont.Collection())\n\tfont := text.Font{}\n\tfontSize := unit.Px(10)\n\n\tselected := func(start, end int) string {\n\t\t// Layout once with no events; populate e.lines.\n\t\tgtx.Queue = nil\n\t\te.Layout(gtx, cache, font, fontSize)\n\t\t_ = e.Events() // throw away any events from this layout\n\n\t\t// Build the selection events\n\t\tstartPos, endPos := e.offsetToScreenPos2(sortInts(start, end))\n\t\ttq := &testQueue{\n\t\t\tevents: []event.Event{\n\t\t\t\tpointer.Event{\n\t\t\t\t\tButtons: pointer.ButtonPrimary,\n\t\t\t\t\tType: pointer.Press,\n\t\t\t\t\tSource: pointer.Mouse,\n\t\t\t\t\tPosition: f32.Pt(textWidth(e, startPos.lineCol.Y, 0, startPos.lineCol.X), textHeight(e, startPos.lineCol.Y)),\n\t\t\t\t},\n\t\t\t\tpointer.Event{\n\t\t\t\t\tType: pointer.Release,\n\t\t\t\t\tSource: pointer.Mouse,\n\t\t\t\t\tPosition: f32.Pt(textWidth(e, endPos.lineCol.Y, 0, endPos.lineCol.X), textHeight(e, endPos.lineCol.Y)),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tgtx.Queue = tq\n\n\t\te.Layout(gtx, cache, font, fontSize)\n\t\tfor _, evt := range e.Events() {\n\t\t\tswitch evt.(type) {\n\t\t\tcase SelectEvent:\n\t\t\t\treturn e.SelectedText()\n\t\t\t}\n\t\t}\n\t\treturn \"\"\n\t}\n\n\ttype testCase struct {\n\t\t// input text offsets\n\t\tstart, end int\n\n\t\t// expected selected text\n\t\tselection string\n\t\t// expected line/col positions of selection after resize\n\t\tstartPos, endPos screenPos\n\t}\n\n\tfor n, tst := range []testCase{\n\t\t{0, 1, \"a\", screenPos{}, screenPos{Y: 0, X: 1}},\n\t\t{0, 4, \"a123\", screenPos{}, screenPos{Y: 0, X: 4}},\n\t\t{0, 11, \"a123456789a\", screenPos{}, screenPos{Y: 1, X: 5}},\n\t\t{2, 6, \"2345\", screenPos{Y: 0, X: 2}, screenPos{Y: 1, X: 0}},\n\t\t{41, 66, \"56789d\\ne123456789e\\nf12345\", screenPos{Y: 6, X: 5}, screenPos{Y: 11, X: 0}},\n\t} {\n\t\t// printLines(e)\n\n\t\tgtx.Constraints = layout.Exact(image.Pt(100, 100))\n\t\tif got := selected(tst.start, tst.end); got != tst.selection {\n\t\t\tt.Errorf(\"Test %d pt1: Expected %q, got %q\", n, tst.selection, got)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Constrain the editor to roughly 6 columns wide and redraw\n\t\tgtx.Constraints = layout.Exact(image.Pt(36, 36))\n\t\t// Keep existing selection\n\t\tgtx.Queue = nil\n\t\te.Layout(gtx, cache, font, fontSize)\n\n\t\tif e.caret.end.lineCol != tst.startPos || e.caret.start.lineCol != tst.endPos {\n\t\t\tt.Errorf(\"Test %d pt2: Expected %#v, %#v; got %#v, %#v\",\n\t\t\t\tn,\n\t\t\t\te.caret.end.lineCol, e.caret.start.lineCol,\n\t\t\t\ttst.startPos, tst.endPos)\n\t\t\tcontinue\n\t\t}\n\n\t\t// printLines(e)\n\t}\n}",
"func (recv *Text) GetSelection(selectionNum int32) (string, int32, int32) {\n\tc_selection_num := (C.gint)(selectionNum)\n\n\tvar c_start_offset C.gint\n\n\tvar c_end_offset C.gint\n\n\tretC := C.atk_text_get_selection((*C.AtkText)(recv.native), c_selection_num, &c_start_offset, &c_end_offset)\n\tretGo := C.GoString(retC)\n\tdefer C.free(unsafe.Pointer(retC))\n\n\tstartOffset := (int32)(c_start_offset)\n\n\tendOffset := (int32)(c_end_offset)\n\n\treturn retGo, startOffset, endOffset\n}",
"func (n Basejump) SelectionText() (text string, err error) {\n\tnv := n.nvim()\n\n\tvar startLine, startCol, endLine, endCol int\n\tstartLine, startCol, endLine, endCol, err = n.Selection()\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttrace(n, \"selected text is from line %d col %d to line %d col %d\", startLine, startCol, endLine, endCol)\n\n\tvar buf nvim.Buffer\n\tbuf, err = nv.CurrentBuffer()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar lines [][]byte\n\t// Indexing is zero-based, end-exclusive\n\tlines, err = nv.BufferLines(buf, startLine-1, endLine, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar bbuf bytes.Buffer\n\n\tif len(lines) == 1 {\n\t\tbbuf.Write(lines[0][startCol-1 : endCol])\n\t} else if len(lines) > 1 {\n\t\tbbuf.Write(lines[0][startCol-1 : len(lines[0])])\n\t\tfor i := 1; i < len(lines)-1; i++ {\n\t\t\tbbuf.Write(lines[i])\n\t\t}\n\t\tbbuf.Write(lines[len(lines)-1][0:endCol])\n\t}\n\n\ttext = bbuf.String()\n\treturn\n}",
"func (v *SourceView) GetHighlightCurrentLine() bool {\n\treturn gobool(C.gtk_source_view_get_highlight_current_line(v.native()))\n}",
"func (img BettyImage) Selection(ratioString string) image.Rectangle {\n\t// If this selection is specified, just return it.\n\tif selection, ok := img.Selections[ratioString]; ok {\n\t\treturn selection\n\t}\n\n\t// The selection for this ratio hasn't been set. Let's just use\n\t// the middle of the image.\n\tvar ratio = img.Size\n\tif ratioString != \"original\" {\n\t\tvar w, _ = strconv.Atoi(strings.Split(ratioString, \"x\")[0])\n\t\tvar h, _ = strconv.Atoi(strings.Split(ratioString, \"x\")[1])\n\t\tratio = image.Point{w, h}\n\t}\n\n\tvar originalRatio = float64(img.Size.X) / float64(img.Size.Y)\n\tvar selectionRatio = float64(ratio.X) / float64(ratio.Y)\n\n\tvar min = image.Pt(0, 0)\n\tvar max = img.Size\n\n\tif selectionRatio < originalRatio {\n\t\tvar xOffset = (float64(img.Size.X) - (float64(img.Size.Y) * float64(ratio.X) / float64(ratio.Y))) / 2.0\n\t\tmin = image.Pt(int(math.Floor(xOffset)), 0)\n\t\tmax = image.Pt(img.Size.X-int(math.Floor(xOffset)), img.Size.Y)\n\t}\n\tif selectionRatio > originalRatio {\n\t\tvar yOffset = (float64(img.Size.Y) - (float64(img.Size.X) * float64(ratio.Y) / float64(ratio.X))) / 2.0\n\n\t\tmin = image.Pt(0, int(math.Floor(yOffset)))\n\t\tmax = image.Pt(img.Size.X, img.Size.Y-int(math.Floor(yOffset)))\n\t}\n\n\treturn image.Rectangle{min, max}\n}",
"func (node *GraphNode) getSelection() selector.Selection {\n\tparent := node.Parent\n\t//* parent == nil occurs only when node is a root node or a fatal error occurs.\n\t//if it is a root node, because the selection of the root node is constant, it returns directly.\n\tif parent == nil {\n\t\tif node.Name != TypeRootNode {\n\t\t\tnode.addError(fmt.Sprintf(ErrFatalError,\n\t\t\t\t\"get selection\",\n\t\t\t))\n\t\t}\n\t\treturn node.Selection\n\t}\n\t//When the node selection already exists, it returns directly.\n\t//* Note that this will skip the node's calculation, so be sure to clear the node's selection value when the node's context changes\n\tif node.Selection != nil {\n\t\treturn node.Selection\n\t}\n\t//calculate the selection of the current node through pipeline\n\tconseq, err := pipeline.Process(parent.getSelection(), node.getPipelines())\n\tif err != nil {\n\t\tnode.addError(err)\n\t}\n\tnode.Selection = conseq\n\n\treturn conseq\n}",
"func (s *EditState) GetSelStart() *int32 {\n\tvar ret *int32\n\tret = (*int32)(unsafe.Pointer(&s.sel_start))\n\treturn ret\n}",
"func (d *Domdocument) GetSelectionStartOffset() int32 {\n\treturn int32(C.gocef_domdocument_get_selection_start_offset(d.toNative(), d.get_selection_start_offset))\n}",
"func (v *HistoryView) SelectLastLine() {\n\t_, h := v.GetBounds()\n\tv.SetCursor(0, h-1-MaxEmtpyVisibleLines)\n}",
"func (e *Editor) currentLine() *Line {\n\tif e.isInvalidated() {\n\t\te.setLasts()\n\t}\n\treturn &e.frame.lines[e.lastL]\n}",
"func (d *Domdocument) GetSelectionEndOffset() int32 {\n\treturn int32(C.gocef_domdocument_get_selection_end_offset(d.toNative(), d.get_selection_end_offset))\n}",
"func (e *textView) PaintSelection(gtx layout.Context, material op.CallOp) {\n\tlocalViewport := image.Rectangle{Max: e.viewSize}\n\tdocViewport := image.Rectangle{Max: e.viewSize}.Add(e.scrollOff)\n\tdefer clip.Rect(localViewport).Push(gtx.Ops).Pop()\n\te.regions = e.index.locate(docViewport, e.caret.start, e.caret.end, e.regions)\n\tfor _, region := range e.regions {\n\t\tarea := clip.Rect(region.Bounds).Push(gtx.Ops)\n\t\tmaterial.Add(gtx.Ops)\n\t\tpaint.PaintOp{}.Add(gtx.Ops)\n\t\tarea.Pop()\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the current line and column of the cursor
|
func (n Basejump) Cursor() (line, col int, err error) {
result := make([]float32, 4)
nv := n.nvim()
err = nv.Call("getpos", result, ".")
if err != nil {
return
}
line = int(result[1])
col = int(result[2])
return
}
|
[
"func (r *Reader) GetCurrentLineAndColumn() (line int, col int, ok error) {\n\treturn r.track.GetCurrentLineAndColumn()\n}",
"func (c *RuneCursor) Column() int {\n\treturn c.column\n}",
"func (s screen) getCursorIndex() int {\n\treturn s.cy * s.width + s.cx\n}",
"func (c *Cursor) Position() int {\n\treturn c.inx\n}",
"func (w *Window) CursorCol() int {\n\treturn w.cursor.x\n}",
"func (c *Iterator) Cursor() int {\n\treturn c.current\n}",
"func (m *memento) getCursorPos() int {\n\treturn m.cursorPos\n}",
"func (s *Scanner) Pos() (line, col int) {\n\treturn s.line, s.col\n}",
"func (l *lexer) curPos() Pos {\n\treturn Pos{Line: l.startLine, Column: l.startCol}\n}",
"func (c *DisplayContext) CursorPosition() (int, int) {\n\treturn ebiten.CursorPosition()\n}",
"func Col() int {\n\tvar info wConsoleScreenBufferInfo\n\tstdout := GetStdOut()\n\twGetConsoleScreenBufferInfo.Call(stdout, uintptr(unsafe.Pointer(&info)))\n\treturn int(info.CursorPosition.X)\n}",
"func (i *InputField) GetCursorPosition() int {\n\ti.RLock()\n\tdefer i.RUnlock()\n\n\treturn i.cursorPos\n}",
"func (p position) lineColumn() string {\n\treturn fmt.Sprintf(\"%d:%d\", p.line, p.column)\n}",
"func (w *Window) CursorRow() int {\n\treturn w.cursor.y\n}",
"func (c *cursorManager) Pos() (x, y float32) {\n\tc.mutex.RLock()\n\tx, y = c.x, c.y\n\tc.mutex.RUnlock()\n\treturn\n}",
"func (e *textView) CaretPos() (line, col int) {\n\tpos := e.closestToRune(e.caret.start)\n\treturn pos.lineCol.line, pos.lineCol.col\n}",
"func (m *Monitor) GetPosition() (int, int) {\n\tvar xpos, ypos C.int\n\n\tC.glfwGetMonitorPos(m.data, &xpos, &ypos)\n\treturn int(xpos), int(ypos)\n}",
"func (e *Editor) currentLine() *Line {\n\tif e.isInvalidated() {\n\t\te.setLasts()\n\t}\n\treturn &e.frame.lines[e.lastL]\n}",
"func CursorPosY() float32 {\n\treturn float32(C.iggCursorPosY())\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SelectionText returns the text contained in the current selection.
|
func (n Basejump) SelectionText() (text string, err error) {
nv := n.nvim()
var startLine, startCol, endLine, endCol int
startLine, startCol, endLine, endCol, err = n.Selection()
if err != nil {
return
}
trace(n, "selected text is from line %d col %d to line %d col %d", startLine, startCol, endLine, endCol)
var buf nvim.Buffer
buf, err = nv.CurrentBuffer()
if err != nil {
return
}
var lines [][]byte
// Indexing is zero-based, end-exclusive
lines, err = nv.BufferLines(buf, startLine-1, endLine, true)
if err != nil {
return
}
var bbuf bytes.Buffer
if len(lines) == 1 {
bbuf.Write(lines[0][startCol-1 : endCol])
} else if len(lines) > 1 {
bbuf.Write(lines[0][startCol-1 : len(lines[0])])
for i := 1; i < len(lines)-1; i++ {
bbuf.Write(lines[i])
}
bbuf.Write(lines[len(lines)-1][0:endCol])
}
text = bbuf.String()
return
}
|
[
"func (tf *TextField) Selection() string {\n\tif tf.HasSelection() {\n\t\treturn string(tf.EditTxt[tf.SelectStart:tf.SelectEnd])\n\t}\n\treturn \"\"\n}",
"func (ui *Edit) Selection() ([]byte, error) {\n\tui.ensureInit()\n\treturn ui.selectionText()\n}",
"func (d *Domdocument) GetSelectionAsText() string {\n\treturn cefuserfreestrToString(C.gocef_domdocument_get_selection_as_text(d.toNative(), d.get_selection_as_text))\n}",
"func (m *menu) SelectedText() string {\n\treturn C.GoString(C.go_fltk_Menu_selected_text((*C.Fl_Menu_)(m.ptr())))\n}",
"func (recv *Text) GetSelection(selectionNum int32) (string, int32, int32) {\n\tc_selection_num := (C.gint)(selectionNum)\n\n\tvar c_start_offset C.gint\n\n\tvar c_end_offset C.gint\n\n\tretC := C.atk_text_get_selection((*C.AtkText)(recv.native), c_selection_num, &c_start_offset, &c_end_offset)\n\tretGo := C.GoString(retC)\n\tdefer C.free(unsafe.Pointer(retC))\n\n\tstartOffset := (int32)(c_start_offset)\n\n\tendOffset := (int32)(c_end_offset)\n\n\treturn retGo, startOffset, endOffset\n}",
"func (e *textView) SelectedText(buf []byte) []byte {\n\tstartOff := e.runeOffset(e.caret.start)\n\tendOff := e.runeOffset(e.caret.end)\n\tstart := min(startOff, endOff)\n\tend := max(startOff, endOff)\n\tif cap(buf) < end-start {\n\t\tbuf = make([]byte, end-start)\n\t}\n\tbuf = buf[:end-start]\n\tn, _ := e.rr.ReadAt(buf, int64(start))\n\t// There is no way to reasonably handle a read error here. We rely upon\n\t// implementations of textSource to provide other ways to signal errors\n\t// if the user cares about that, and here we use whatever data we were\n\t// able to read.\n\treturn buf[:n]\n}",
"func (e *textView) Selection() (start, end int) {\n\treturn e.caret.start, e.caret.end\n}",
"func (cb *CurrentClipboard) GetText() string {\n\tcb.mutex.Lock()\n\n\tText := cb.text\n\n\tcb.mutex.Unlock()\n\n\treturn Text\n}",
"func (s *Segmenter) Text() string {\n\treturn string(s.activeSegment)\n}",
"func (v *SourceCompletionProposal) GetText() string {\n\treturn toGoStringFree(C.gtk_source_completion_proposal_get_text(v.native()))\n}",
"func (tf *TextField) DeleteSelection() string {\n\ttf.SelectUpdate()\n\tif !tf.HasSelection() {\n\t\treturn \"\"\n\t}\n\tupdt := tf.UpdateStart()\n\tdefer tf.UpdateEnd(updt)\n\tcut := tf.Selection()\n\ttf.Edited = true\n\ttf.EditTxt = append(tf.EditTxt[:tf.SelectStart], tf.EditTxt[tf.SelectEnd:]...)\n\tif tf.CursorPos > tf.SelectStart {\n\t\tif tf.CursorPos < tf.SelectEnd {\n\t\t\ttf.CursorPos = tf.SelectStart\n\t\t} else {\n\t\t\ttf.CursorPos -= tf.SelectEnd - tf.SelectStart\n\t\t}\n\t}\n\ttf.SelectReset()\n\treturn cut\n}",
"func (scn *Scanner) Text() string {\n\tif scn.scanErr != nil {\n\t\treturn \"\"\n\t}\n\treturn string(scn.b)\n}",
"func (s *StyleEdit) GetSelectedTextNormal() *Color {\n\tvar ret *Color\n\tret = (*Color)(unsafe.Pointer(&s.selected_text_normal))\n\treturn ret\n}",
"func (e *textView) SelectionLen() int {\n\treturn abs(e.caret.start - e.caret.end)\n}",
"func (d *Domdocument) GetSelectionAsMarkup() string {\n\treturn cefuserfreestrToString(C.gocef_domdocument_get_selection_as_markup(d.toNative(), d.get_selection_as_markup))\n}",
"func (s *StyleEdit) GetSelectedTextHover() *Color {\n\tvar ret *Color\n\tret = (*Color)(unsafe.Pointer(&s.selected_text_hover))\n\treturn ret\n}",
"func (tb *TextBuffer) GetSelectionBounds() (start, end *TextIter) {\n\tsid := tb.Candy().ServerOpaque()\n\teid := tb.Candy().ServerOpaque()\n\ttb.Candy().Guify(\"gtk_text_buffer_get_selection_bounds\", tb, sid, eid)\n\tstart = NewTextIter(tb.Candy(), sid)\n\tend = NewTextIter(tb.Candy(), eid)\n\treturn\n}",
"func (e *textView) PaintSelection(gtx layout.Context, material op.CallOp) {\n\tlocalViewport := image.Rectangle{Max: e.viewSize}\n\tdocViewport := image.Rectangle{Max: e.viewSize}.Add(e.scrollOff)\n\tdefer clip.Rect(localViewport).Push(gtx.Ops).Pop()\n\te.regions = e.index.locate(docViewport, e.caret.start, e.caret.end, e.regions)\n\tfor _, region := range e.regions {\n\t\tarea := clip.Rect(region.Bounds).Push(gtx.Ops)\n\t\tmaterial.Add(gtx.Ops)\n\t\tpaint.PaintOp{}.Add(gtx.Ops)\n\t\tarea.Pop()\n\t}\n}",
"func SelectionString(s *Selection, qf Qualifier) string {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CurrentWordText returns the current word under the cursor
|
func (n Basejump) CurrentWordText() (text string, err error) {
nv := n.nvim()
err = nv.Call("expand", &text, "<cWORD>")
return
}
|
[
"func (a *Args) Current() string {\n\treturn a.words[a.index]\n}",
"func (t *Token) GetWord() string {\n\treturn t.word\n}",
"func (result *TranslationResult) GetWord() string {\n\treturn result.Word\n}",
"func (l *lipsum) Word() string {\n\tidx := l.updateIdx()\n\tword := l.dictionary[idx]\n\treturn word\n}",
"func Word() string {\n\targs := Args()\n\tif len(args) > 0 {\n\t\treturn args[len(args)-1]\n\t}\n\treturn \"\"\n}",
"func (kw *Keyword) GetWord() string {\n\treturn kw.word\n}",
"func (tf *TextField) SelectWord() {\n\tupdt := tf.UpdateStart()\n\tdefer tf.UpdateEnd(updt)\n\tsz := len(tf.EditTxt)\n\tif sz <= 3 {\n\t\ttf.SelectAll()\n\t\treturn\n\t}\n\ttf.SelectStart = tf.CursorPos\n\tif tf.SelectStart >= sz {\n\t\ttf.SelectStart = sz - 2\n\t}\n\tif !tf.IsWordBreak(tf.EditTxt[tf.SelectStart]) {\n\t\tfor tf.SelectStart > 0 {\n\t\t\tif tf.IsWordBreak(tf.EditTxt[tf.SelectStart-1]) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttf.SelectStart--\n\t\t}\n\t\ttf.SelectEnd = tf.CursorPos + 1\n\t\tfor tf.SelectEnd < sz {\n\t\t\tif tf.IsWordBreak(tf.EditTxt[tf.SelectEnd]) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttf.SelectEnd++\n\t\t}\n\t} else { // keep the space start -- go to next space..\n\t\ttf.SelectEnd = tf.CursorPos + 1\n\t\tfor tf.SelectEnd < sz {\n\t\t\tif !tf.IsWordBreak(tf.EditTxt[tf.SelectEnd]) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttf.SelectEnd++\n\t\t}\n\t\tfor tf.SelectEnd < sz { // include all trailing spaces\n\t\t\tif tf.IsWordBreak(tf.EditTxt[tf.SelectEnd]) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttf.SelectEnd++\n\t\t}\n\t}\n\ttf.SelectInit = tf.SelectStart\n}",
"func forward_word(e *Event) {\n\tbuf := e.Buffer()\n\tdoc := buf.Document()\n\n\twstart := doc.FindStartOfNextWord()\n\tif wstart == 0 { // nothing found at all -> go to end of text\n\t\twstart = doc.GetEndOfTextOffset()\n\t}\n\tbuf.CursorForward(wstart)\n}",
"func (e *Ed) BackWord() {\n\tif e.Pos == 0 {\n\t\treturn\n\t}\n\n\tw := lastWord(e.Chars[:e.Pos], true)\n\te.MoveCursor(len(w), Back)\n\te.Chars = delete(e.Chars, e.Pos, len(w))\n\te.Del(len(w))\n\te.update()\n}",
"func (d *Dispenser) LastWord() string {\n\tif d.i-2 < 0 || d.i-2 >= len(d.str) {\n\t\treturn \"\"\n\t}\n\n\treturn d.str[d.i-2]\n}",
"func (v *View) Word(x, y int) (string, error) {\n\tx = v.ox + x\n\ty = v.oy + y\n\n\tif y < 0 || y >= len(v.lines) || x >= len(v.lines[y]) {\n\t\treturn \"\", errors.New(\"invalid point\")\n\t}\n\tl := string(v.lines[y])\n\tnl := strings.LastIndexFunc(l[:x], indexFunc)\n\tif nl == -1 {\n\t\tnl = 0\n\t} else {\n\t\tnl = nl + 1\n\t}\n\tnr := strings.IndexFunc(l[x:], indexFunc)\n\tif nr == -1 {\n\t\tnr = len(l)\n\t} else {\n\t\tnr = nr + x\n\t}\n\treturn string(l[nl:nr]), nil\n}",
"func GetWordsFocusedString(s string) string {\n\ts = strings.Map(letterLowerMapper, s)\n\n\t// it's possible to end up with a leading or trailing space (if there's was\n\t// character a non-letter there before).\n\n\tr, size := utf8.DecodeRuneInString(s)\n\tif unicode.IsSpace(r) {\n\t\ts = s[size:]\n\t}\n\n\tr, size = utf8.DecodeLastRuneInString(s)\n\tif unicode.IsSpace(r) {\n\t\ts = s[:len(s)-size]\n\t}\n\n\treturn s\n}",
"func (t *Tokenizer) NextText() string {\n\tif t.TokenP >= len(t.Tokens) {\n\t\treturn EndOfTokens.spelling\n\t}\n\n\ttoken := t.Tokens[t.TokenP]\n\tt.TokenP++\n\n\treturn token.spelling\n}",
"func (m *Word) GetOriginal() string {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\treturn m.original\n}",
"func (s *State) Current() rune {\n\treturn s.undo[s.ur].r\n}",
"func (c *ColumnNode) TokenWord() []rune {\n\treturn c.Word\n}",
"func (c *Command) CurrentName() string {\n\treturn c.Name\n}",
"func backward_word(e *Event) {\n\tbuf := e.Buffer()\n\tdoc := buf.Document()\n\n\twstart := doc.FindStartOfPreviousWord()\n\tbuf.CursorBackward(Offset(doc.CursorIndex() - wstart))\n}",
"func (l *Lexer) PeekWord() string {\n\n\tif l.pos == l.peekedWordPos && l.peekedWordPos > 0 {\n\t\treturn l.peekedWord\n\t}\n\t// TODO: optimize this, this is by far the most expensive operation\n\t// in the lexer\n\t// - move to some type of early bail? ie, use Accept() wherever possible?\n\tskipWs := 0\n\tfor ; skipWs < len(l.input)-l.pos; skipWs++ {\n\t\tr, ri := utf8.DecodeRuneInString(l.input[l.pos+skipWs:])\n\t\tif ri != 1 {\n\t\t\t//skipWs += (ri - 1)\n\t\t}\n\t\tif !unicode.IsSpace(r) {\n\t\t\tbreak\n\t\t}\n\t}\n\ti := skipWs\n\tfor ; i < len(l.input)-l.pos; i++ {\n\t\tr, ri := utf8.DecodeRuneInString(l.input[l.pos+i:])\n\t\t//u.Debugf(\"r: %v identifier?%v\", string(r), IsIdentifierRune(r))\n\t\tif ri != 1 {\n\t\t\t//i += (ri - 1)\n\t\t}\n\t\tif unicode.IsSpace(r) || (!IsIdentifierRune(r) && r != '@') || r == '(' {\n\t\t\tif i > 0 {\n\t\t\t\t//u.Infof(\"hm: '%v'\", l.input[l.pos+skipWs:l.pos+i])\n\t\t\t\tl.peekedWordPos = l.pos\n\t\t\t\tl.peekedWord = l.input[l.pos+skipWs : l.pos+i]\n\t\t\t\treturn l.peekedWord\n\t\t\t} else if r == '(' {\n\t\t\t\t// regardless of being short, lets treat like word\n\t\t\t\treturn string(r)\n\t\t\t}\n\t\t}\n\t}\n\t//u.Infof(\"hm: '%v'\", l.input[l.pos+skipWs:l.pos+i])\n\tl.peekedWordPos = l.pos\n\tl.peekedWord = l.input[l.pos+skipWs : l.pos+i]\n\treturn l.peekedWord\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ParsePath parses `text` into a filesystem path, line, and column. The `text` parameter must have one of the formats: (for example file.go, or /bin/bash) :(for example file.go:100) ::(for example file.go:100:20) If the parsed path is not absolute it is made absolute by prepending the cwd of the current window in vim. If line and or col is missing, they are set to 0.
|
func (n Basejump) ParsePath(text string) (fpath string, line, col int, err error) {
text = strings.TrimSpace(text)
match := pathRegex.FindStringSubmatch(text)
if match == nil || len(match) < 2 {
err = fmt.Errorf("doesn't seem to be a valid path")
return
}
fpath = match[1]
if len(match) > 2 && match[2] != "" {
line, err = strconv.Atoi(match[2])
if err != nil {
return
}
}
if len(match) > 3 && match[3] != "" {
col, err = strconv.Atoi(match[3])
if err != nil {
return
}
}
fpath, err = n.AbsPath(fpath)
if err != nil {
return
}
return
}
|
[
"func ParsePath(p string) string {\n\n\t// use the right separator (windows)\n\n\tparts := strings.Split(p, \"/\")\n\tjoinedPath := path.Join(parts...)\n\n\t// get home folder\n\n\thomedir, err := os.UserHomeDir()\n\n\tif err != nil {\n\t\tlog.Fatal(\"Could not find the home dir : \", err)\n\t}\n\n\treturn path.Clean(strings.ReplaceAll(joinedPath, \"~\", homedir))\n}",
"func ParsePath(path string) (Path, error) {\n\tif len(path) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tparts := strings.Split(path, \".\")\n\tparsedPath := make(Path, 0, len(parts)+strings.Count(path, \"[\"))\n\tfor i := 0; i < len(parts); i++ {\n\t\tpart := parts[i]\n\n\t\tarrayIdx := -1\n\t\t// first check of the path part ends in an array index, like\n\t\t//\n\t\t// tags[2]\n\t\t//\n\t\t// Extract the \"2\", and truncate the part to \"tags\"\n\t\tif bracketIdx := strings.Index(part, \"[\"); bracketIdx > -1 && strings.HasSuffix(part, \"]\") {\n\t\t\tif idx, err := strconv.Atoi(part[bracketIdx+1 : len(part)-1]); err == nil {\n\t\t\t\tarrayIdx = idx\n\t\t\t\tpart = part[0:bracketIdx]\n\t\t\t}\n\t\t}\n\n\t\tpart = strings.TrimSpace(part)\n\t\tif len(part) > 0 {\n\t\t\tparsedPath = append(parsedPath, part)\n\t\t}\n\t\tif arrayIdx > -1 {\n\t\t\tparsedPath = append(parsedPath, arrayIdx)\n\t\t}\n\t}\n\treturn parsedPath, nil\n}",
"func ParsePath(filename string) (Config, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn Config{}, err\n\t}\n\treturn parse(f)\n}",
"func parsePath(rawPath string) (path string, tags []string, err error) {\n\tif len(rawPath) == 0 {\n\t\treturn \"\", nil, ErrRefPathNotValid\n\t}\n\n\t// The path is separated from the tags (if present) by a single colon.\n\tparts := strings.Split(rawPath, \":\")\n\tif len(parts) > 2 {\n\t\treturn \"\", nil, ErrRefPathNotValid\n\t}\n\n\t// TODO: not sure we should modify the path here...\n\t// Name can optionally start with a leading \"/\".\n\tpath = parts[0]\n\tif len(strings.TrimPrefix(path, \"/\")) == 0 {\n\t\treturn \"\", nil, ErrRefPathNotValid\n\t}\n\n\tif len(parts) > 1 {\n\t\ttags, err = parseTags(parts[1])\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t} else {\n\t\ttags = nil\n\t}\n\treturn path, tags, nil\n}",
"func ParseSnapshotPath(s string) (index int, err error) {\n\ts = filepath.Base(s)\n\n\ta := snapshotPathRegex.FindStringSubmatch(s)\n\tif a == nil {\n\t\treturn 0, fmt.Errorf(\"invalid snapshot path: %s\", s)\n\t}\n\n\ti64, _ := strconv.ParseUint(a[1], 16, 64)\n\treturn int(i64), nil\n}",
"func parsePath(str string) []int {\n\tif str == \"\" {\n\t\treturn nil\n\t}\n\tif str[0] != '/' {\n\t\tpanic(str)\n\t}\n\tvar res []int\n\tfor _, valStr := range strings.Split(str, \"/\")[1:] {\n\t\tval, err := strconv.Atoi(valStr)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tres = append(res, val)\n\t}\n\treturn res\n}",
"func ParseAbsPath(filepath string, homeDir string) string {\n\n\tvar result string\n\tif ok := path.IsAbs(filepath); ok {\n\t\tif strings.HasPrefix(filepath, homeDir) {\n\t\t\tresult = strings.Replace(filepath, homeDir, \"~\", 1)\n\t\t} else {\n\t\t\tresult = filepath\n\t\t}\n\t} else {\n\t\tresult = filepath\n\t}\n\treturn result\n\n}",
"func ParseFieldPath(fieldPath string) (FieldPath, error) {\n\treturn FieldPath(strings.Split(fieldPath, \".\")), nil\n}",
"func ParseRelPath(filepath string, homeDir string) string {\n\n\tvar result string\n\tif ok := path.IsAbs(filepath); !ok {\n\t\tresult = strings.Replace(filepath, \"~\", homeDir, 1)\n\t} else {\n\t\tresult = filepath\n\t}\n\treturn result\n\n}",
"func ParseHostAndBrickPath(brickPath string) (string, string, error) {\n\ti := strings.LastIndex(brickPath, \":\")\n\tif i == -1 {\n\t\tlog.WithField(\"brick\", brickPath).Error(errors.ErrInvalidBrickPath.Error())\n\t\treturn \"\", \"\", errors.ErrInvalidBrickPath\n\t}\n\thostname := brickPath[0:i]\n\tpath := brickPath[i+1 : len(brickPath)]\n\n\treturn hostname, path, nil\n}",
"func (ev *SemEnv) ScanText(fp io.Reader) {\n\tscan := bufio.NewScanner(fp) // line at a time\n\tcur := []string{}\n\tlbl := \"\"\n\tfor scan.Scan() {\n\t\tb := scan.Bytes()\n\t\tbs := string(b)\n\t\tsp := strings.Fields(bs)\n\t\tif len(sp) == 0 {\n\t\t\tev.Paras = append(ev.Paras, cur)\n\t\t\tev.ParaLabels = append(ev.ParaLabels, lbl)\n\t\t\tcur = []string{}\n\t\t\tlbl = \"\"\n\t\t} else {\n\t\t\tcoli := strings.Index(sp[0], \":\")\n\t\t\tif coli > 0 {\n\t\t\t\tlbl = sp[0][:coli]\n\t\t\t\tsp = sp[1:]\n\t\t\t}\n\t\t\tcur = append(cur, sp...)\n\t\t}\n\t}\n\tif len(cur) > 0 {\n\t\tev.Paras = append(ev.Paras, cur)\n\t\tev.ParaLabels = append(ev.ParaLabels, lbl)\n\t}\n}",
"func (p *Parser) CompilePath(svgPath string) error {\n\tp.init()\n\tlastIndex := -1\n\tfor i, v := range svgPath {\n\t\tif unicode.IsLetter(v) && v != 'e' {\n\t\t\tif lastIndex != -1 {\n\t\t\t\tif err := p.addSeg(svgPath[lastIndex:i]); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tlastIndex = i\n\t\t}\n\t}\n\tif lastIndex != -1 {\n\t\tif err := p.addSeg(svgPath[lastIndex:]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.dc.ClosePath()\n\n\treturn nil\n}",
"func parse(path string) (kpath, error) {\n\tvar r kpath\n\tif path == \"\" {\n\t\treturn r, errors.New(\"empty path\")\n\t}\n\tif path[0] == '[' {\n\t\tvar i int\n\t\tif len(path) < 2 {\n\t\t\treturn r, errors.New(\"unclosed array index in path\")\n\t\t}\n\t\tif path[1] == '\"' {\n\t\t\t// explicit string map index\n\t\t\ti = strings.Index(path, \"\\\"]\")\n\t\t\tif i < 0 {\n\t\t\t\treturn r, errors.New(\"unclosed map index in path\")\n\t\t\t}\n\t\t\tr.Part = path[2:i]\n\t\t\ti += 2\n\t\t} else {\n\t\t\t// array index\n\t\t\ti = strings.IndexRune(path, ']')\n\t\t\tif i < 0 {\n\t\t\t\treturn r, errors.New(\"unclosed array index in path\")\n\t\t\t}\n\t\t\tr.Part = path[1:i]\n\t\t\ti++\n\t\t}\n\t\tif len(path) > i {\n\t\t\tr.More = true\n\t\t\tif path[i] == '.' {\n\t\t\t\t// exlude delimiter\n\t\t\t\tr.Path = path[i+1:]\n\t\t\t} else {\n\t\t\t\t// include delimiter\n\t\t\t\tr.Path = path[i:]\n\t\t\t}\n\t\t}\n\t\treturn r, nil\n\t}\n\t// implicit string map index\n\tfor i := 0; i < len(path); i++ {\n\t\tif path[i] == '.' {\n\t\t\t// exlude delimiter\n\t\t\tr.Part = path[:i]\n\t\t\tr.Path = path[i+1:]\n\t\t\tr.More = true\n\t\t\treturn r, nil\n\t\t}\n\t\tif path[i] == '[' {\n\t\t\t// include delimiter\n\t\t\tr.Part = path[:i]\n\t\t\tr.Path = path[i:]\n\t\t\tr.More = true\n\t\t\treturn r, nil\n\t\t}\n\t}\n\t// entire path is the last part\n\tr.Part = path\n\treturn r, nil\n}",
"func (p *Parser) ParseLines(path, startMatch, endMatch string) (int, int, error) {\n\tvar startLine, endLine int\n\tf, err := p.fs.Open(path)\n\tif err != nil {\n\t\treturn startLine, endLine, err\n\t}\n\tdefer f.Close()\n\tscanner := bufio.NewScanner(f)\n\tline := 0\n\tfoundStart := false\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), startMatch) {\n\t\t\tfoundStart = true\n\t\t\tstartLine = line\n\t\t}\n\t\tif strings.Contains(scanner.Text(), endMatch) && foundStart {\n\t\t\tendLine = line\n\t\t\tbreak\n\t\t}\n\t\tline++\n\t}\n\terr = scanner.Err()\n\treturn startLine, endLine, err\n}",
"func Path(command *kingpin.CmdClause, c *cmd.OptionalString) {\n\tcommand.Flag(\"path\", \"The path to upload logs to\").Action(c.Set).StringVar(&c.Value)\n}",
"func parsePath(path string, matchPrefix, matchSlashes bool) (*pathInfo, error) {\n\t// Empty paths are not valid.\n\tif path == \"\" {\n\t\treturn nil, fmt.Errorf(errEmptyPath)\n\t}\n\n\tparams := make([][]string, 0)\n\tfwdPattern := bytes.NewBufferString(\"^\")\n\trevPattern := new(bytes.Buffer)\n\tvar depth, param, pos int\n\tfor i := range path {\n\t\tswitch path[i] {\n\t\tcase '{':\n\t\t\tif depth++; depth == 1 {\n\t\t\t\tparam = i\n\t\t\t}\n\t\tcase '}':\n\t\t\tif depth--; depth == 0 {\n\t\t\t\tnameVal := strings.SplitN(path[param+1:i], \":\", 2)\n\t\t\t\t// Parameters must be named.\n\t\t\t\tif nameVal[0] == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(errParamNameNotDefined)\n\t\t\t\t}\n\t\t\t\t// Parameters must be unique per path.\n\t\t\t\t// FIXME: Do parameters really need to be unique?\n\t\t\t\tfor p := 0; p < len(params); p++ {\n\t\t\t\t\tif params[p][0] == nameVal[0] {\n\t\t\t\t\t\treturn nil, fmt.Errorf(errParamNameDefined, nameVal[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(nameVal) < 2 {\n\t\t\t\t\tnameVal[1] = \"\"\n\t\t\t\t}\n\t\t\t\tif nameVal[1] == \"\" {\n\t\t\t\t\tnameVal[1] = \"[^/]+\"\n\t\t\t\t}\n\t\t\t\tsubPath := path[pos:param]\n\t\t\t\tfmt.Fprintf(fwdPattern, \"%s(%s)\", regexp.QuoteMeta(subPath), nameVal[1])\n\t\t\t\tfmt.Fprintf(revPattern, \"%s%%s\", subPath)\n\t\t\t\tparams = append(params, nameVal)\n\t\t\t\tpos = i + 1\n\t\t\t} else if depth < 0 {\n\t\t\t\t// With properly formatted input, depth should never go below zero.\n\t\t\t\treturn nil, fmt.Errorf(errUnevenBraces)\n\t\t\t}\n\t\t}\n\t}\n\tif depth != 0 {\n\t\t// At the end of the string, we're still inside a parameter brace.\n\t\treturn nil, fmt.Errorf(errUnevenBraces)\n\t}\n\n\tif pos < len(path) {\n\t\tfmt.Fprint(fwdPattern, regexp.QuoteMeta(path[pos:]))\n\t\tfmt.Fprint(revPattern, path[pos:])\n\t}\n\n\tif path != \"/\" && matchSlashes {\n\t\tif !strings.HasSuffix(path, \"/\") {\n\t\t\tfwdPattern.WriteByte('/')\n\t\t}\n\t\tfwdPattern.WriteByte('?')\n\t}\n\tif !matchPrefix {\n\t\tfwdPattern.WriteByte('$')\n\t}\n\n\tfwdRegexp, err := regexp.Compile(fwdPattern.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pathInfo{\n\t\trawPath: path,\n\t\tfwdPattern: fwdRegexp,\n\t\trevPattern: revPattern.String(),\n\t\tparams: params,\n\t}, nil\n}",
"func (g *GpgCLI) Path(mctx MetaContext) string {\n\tcanExec, err := g.CanExec(mctx)\n\tif err == nil && canExec {\n\t\treturn g.path\n\t}\n\treturn \"\"\n}",
"func ParseSource(text string) (kind string, path string) {\n\tparts := strings.SplitN(text, \":\", 2)\n\tif len(parts) == 1 {\n\t\treturn \"file\", text\n\t}\n\tswitch parts[0] {\n\tcase \"etcd\":\n\t\treturn \"etcd\", parts[1]\n\tcase \"file\":\n\t\treturn \"file\", parts[1]\n\t}\n\n\treturn \"file\", text\n}",
"func Path(path string) Matcher {\n\tif path == \"\" || path[0] == '/' {\n\t\tpanic(\"Path must be relative\")\n\t}\n\treturn func(p string) bool { return p == path }\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AbsPath makes the path `fpath` absolute if it is not by prepending the working directory of the current window.
|
func (n Basejump) AbsPath(fpath string) (result string, err error) {
return n.AbsPathRelWindow(fpath, -1)
}
|
[
"func absPath(dir string, path Path) string {\n\tif filepath.IsAbs(string(path)) {\n\t\t// filepath.Join cleans the path so we should clean the absolute paths as well for consistency.\n\t\treturn filepath.Clean(string(path))\n\t}\n\treturn filepath.Join(dir, string(path))\n}",
"func (n Basejump) AbsPathRelWindow(fpath string, window int) (result string, err error) {\n\tnv := n.nvim()\n\n\tresult = fpath\n\tif !path.IsAbs(fpath) {\n\n\t\tvar cwd string\n\n\t\tif window == -1 {\n\t\t\t// If the current window is a terminal window, then we need\n\t\t\t// to get the cwd in a special way. We can't use this method for\n\t\t\t// the non-current window.\n\t\t\tvar b bool\n\t\t\terr = nv.Call(\"exists\", &b, \"b:term_title\")\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif b {\n\t\t\t\t// Is a terminal.\n\t\t\t\tvar pid float32\n\t\t\t\tvar buf nvim.Buffer\n\t\t\t\tbuf, err = nv.CurrentBuffer()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = nv.BufferVar(buf, \"terminal_job_pid\", &pid)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tcwd, err = n.pidCwd(int(pid))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\tif cwd == \"\" {\n\n\t\t\targs := make([]interface{}, 0, 1)\n\t\t\tif window != -1 {\n\t\t\t\targs = append(args, window)\n\t\t\t}\n\n\t\t\terr = nv.Call(\"getcwd\", &cwd, args...)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tresult = cwd + \"/\" + fpath\n\t}\n\treturn\n}",
"func GetAbsPath(p string) string {\n\tfp := path.Join(GetCurPath(), p)\n\treturn path.Clean(fp)\n}",
"func (w *Wiki) AbsFilePath(relPath string) string {\n\tpath, _ := filepath.Abs(w.Dir(w.UnresolvedAbsFilePath(relPath)))\n\treturn path\n}",
"func (l *FsLoader) AbsPath(path string) string {\n\treturn filepath.Join(l.pkg.Root(), path)\n}",
"func Abs(os, path, cwd string) (_ string, err error) {\n\tif IsAbs(os, path) {\n\t\treturn Clean(os, path), nil\n\t}\n\treturn Clean(os, Join(os, cwd, path)), nil\n}",
"func AbsPath(path string) string {\n\tif !filepath.IsAbs(path) { // If the path provided isn't already absolute\n\t\tuser, userGetErr := user.Current()\n\n\t\tif userGetErr == nil { // If we didn't fail getting the current user\n\t\t\tpath = strings.Replace(path, \"~\", user.HomeDir, -1) // Replace any home directory reference\n\t\t}\n\n\t\tpath, _ = filepath.Abs(path) // Get the absolute path of path\n\n\t\tvar stripLastElement bool\n\n\t\tif file, openErr := os.Open(path); openErr == nil { // Attempt to open the path, to validate if it is a file or directory\n\t\t\tstat, statErr := file.Stat()\n\t\t\tstripLastElement = (statErr == nil) && !stat.IsDir() // Sets stripLastElement to true if stat.IsDir is not true\n\t\t} else { // If we failed to open the directory or file\n\t\t\tlastElement := filepath.Base(path)\n\t\t\tstripLastElement = filepath.Ext(lastElement) != \"\" // If lastElement is either a dotfile or has an extension, assume it is a file\n\t\t}\n\n\t\tif stripLastElement {\n\t\t\tpath = filepath.Dir(path) // Strip out the last element\n\t\t}\n\t}\n\n\treturn path\n}",
"func FilepathAbs(inputPath string) (path string, err error) {\n\tvar OSTYPE string\n\tconst cygwinRootOfAllDrives = \"/cygdrive/\"\n\tif strings.HasPrefix(inputPath, cygwinRootOfAllDrives) {\n\t\tOSTYPE = \"cygwin\"\n\t\t// OSTYPE := os.Getenv(\"OSTYPE\")\t// Is not helpful (returns nothing on Windows 10)\n\t}\n\tif OSTYPE == \"cygwin\" { // Atypical case: cygwin drive.\n\t\t// Use cygwin utility cygpath to convert cygwin path to windows path.\n\t\tconst executable = \"cygpath\"\n\t\tconst flag = \"-w\"\n\t\tvar cmd *exec.Cmd = exec.Command(executable, flag, inputPath)\n\t\tvar out bytes.Buffer\n\t\tcmd.Stdout = &out\n\t\terr = cmd.Run()\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"%s exit code %v error: %s\", executable, err, out.String())\n\t\t\treturn\n\t\t}\n\t\tpath = out.String()\n\t\t// cygpath or cygwin bash appends path with an unwelcome new line.\n\t\tpath = strings.Replace(path, \"\\n\", \"\", -1)\n\t} else { // Typical case.\n\t\tpath, err = filepath.Abs(inputPath)\n\t}\n\n\treturn\n}",
"func getAbsPath(fileName, path string) string {\n\tif filepath.IsAbs(fileName) {\n\t\treturn fileName\n\t}\n\n\treturn filepath.Join(path, fileName)\n}",
"func (d Destination) AbsPath() string {\n\treturn filepath.Join(d.Base, d.Path)\n}",
"func absPath(relPath string) string {\n\t// mpqDir specifies a directory containing an extracted copy of the files\n\t// contained within DIABDAT.MPQ. Note that the extracted files should have\n\t// lowercase names.\n\tconst mpqDir = \"diabdat\"\n\treturn filepath.Join(mpqDir, relPath)\n}",
"func absFilePath(file file.Filer) string {\r\n\tpath, _ := filepath.Abs(file.Name())\r\n\treturn filepath.Join(path, file.Name())\r\n}",
"func ParseAbsPath(filepath string, homeDir string) string {\n\n\tvar result string\n\tif ok := path.IsAbs(filepath); ok {\n\t\tif strings.HasPrefix(filepath, homeDir) {\n\t\t\tresult = strings.Replace(filepath, homeDir, \"~\", 1)\n\t\t} else {\n\t\t\tresult = filepath\n\t\t}\n\t} else {\n\t\tresult = filepath\n\t}\n\treturn result\n\n}",
"func (w *Wiki) UnresolvedAbsFilePath(relPath string) string {\n\n\t// sanitize\n\trelPath = filepath.FromSlash(relPath)\n\n\t// join with wiki dir\n\tpath := w.Dir(relPath)\n\n\t// resolve symlink\n\tabs, _ := filepath.Abs(path)\n\treturn abs\n}",
"func GetAbsPath(relativePath string) string {\n\t_, b, _, _ := runtime.Caller(1)\n\tpath, err := filepath.Abs(filepath.Dir(b) + \"/\" + relativePath)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\treturn path\n}",
"func RealPath(path string) string {\n\tif abs, err := filepath.Abs(path); err == nil {\n\t\tpath = abs\n\t}\n\tif expanded, err := filepath.EvalSymlinks(path); err == nil {\n\t\tpath = expanded\n\t}\n\treturn path\n}",
"func RealPath(path string) (string, error) {\n\treturn filepath.Abs(path)\n}",
"func defaultAbsPath(dir string) (string, error) {\n\treturn filepath.Abs(dir)\n}",
"func (p *PathImpl) MakeAbsolute() *PathImpl {\r\n\tif !p.unc && !p.absolute {\r\n\t\tif newPath, err := syscall.FullPath(p.ToString()); err == nil {\r\n\t\t\treturn Path(newPath)\r\n\t\t}\r\n\t}\r\n\treturn p\r\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AbsPathRelWindow makes the path `fpath` absolute if it is not by prepending the working directory of the window `window`.
|
func (n Basejump) AbsPathRelWindow(fpath string, window int) (result string, err error) {
nv := n.nvim()
result = fpath
if !path.IsAbs(fpath) {
var cwd string
if window == -1 {
// If the current window is a terminal window, then we need
// to get the cwd in a special way. We can't use this method for
// the non-current window.
var b bool
err = nv.Call("exists", &b, "b:term_title")
if err != nil {
return
}
if b {
// Is a terminal.
var pid float32
var buf nvim.Buffer
buf, err = nv.CurrentBuffer()
if err != nil {
return
}
err = nv.BufferVar(buf, "terminal_job_pid", &pid)
if err != nil {
return
}
cwd, err = n.pidCwd(int(pid))
if err != nil {
return
}
}
}
if cwd == "" {
args := make([]interface{}, 0, 1)
if window != -1 {
args = append(args, window)
}
err = nv.Call("getcwd", &cwd, args...)
if err != nil {
return
}
}
result = cwd + "/" + fpath
}
return
}
|
[
"func (n Basejump) AbsPath(fpath string) (result string, err error) {\n\treturn n.AbsPathRelWindow(fpath, -1)\n}",
"func (w *Wiki) AbsFilePath(relPath string) string {\n\tpath, _ := filepath.Abs(w.Dir(w.UnresolvedAbsFilePath(relPath)))\n\treturn path\n}",
"func (w *Wiki) RelPath(absPath string) string {\n\trel := makeRelPath(absPath, w.Dir())\n\tif !relPathLocal(rel) {\n\t\treturn \"\"\n\t}\n\treturn rel\n}",
"func RealPath(path string) string {\n\tif abs, err := filepath.Abs(path); err == nil {\n\t\tpath = abs\n\t}\n\tif expanded, err := filepath.EvalSymlinks(path); err == nil {\n\t\tpath = expanded\n\t}\n\treturn path\n}",
"func (w *Wiki) UnresolvedAbsFilePath(relPath string) string {\n\n\t// sanitize\n\trelPath = filepath.FromSlash(relPath)\n\n\t// join with wiki dir\n\tpath := w.Dir(relPath)\n\n\t// resolve symlink\n\tabs, _ := filepath.Abs(path)\n\treturn abs\n}",
"func (f *File) RelativePath() string {\n\treturn strings.Replace(f.Location, f.Issue.Title.Location, \"\", 1)\n}",
"func GoRelBase(f string) string {\n\trd := GoRelDir(f)\n\treturn filepath.Join(rd, filepath.Base(f))\n}",
"func wrapWindowsPath(p string) string {\n\tif goruntime.GOOS != \"windows\" {\n\t\treturn p\n\t}\n\tpp := filepath.FromSlash(p)\n\tif !filepath.IsAbs(p) && []rune(pp)[0] == '\\\\' {\n\t\tpp, _ = filepath.Abs(p)\n\t\tu, _ := url.Parse(pp)\n\t\treturn u.String()\n\t}\n\treturn pp\n}",
"func RelPath(path ...string) string {\n\treturn filepath.Join(append([]string{root}, path...)...)\n}",
"func displayPath(target string) string {\n\tif !filepath.IsAbs(target) {\n\t\treturn filepath.Clean(target)\n\t}\n\n\tbase, err := os.Getwd()\n\tif err != nil {\n\t\treturn filepath.Clean(target)\n\t}\n\n\trel, err := filepath.Rel(base, target)\n\tif err != nil {\n\t\t// No path from base to target available, return target as is.\n\t\treturn filepath.Clean(target)\n\t}\n\treturn rel\n}",
"func absPath(relPath string) string {\n\t// mpqDir specifies a directory containing an extracted copy of the files\n\t// contained within DIABDAT.MPQ. Note that the extracted files should have\n\t// lowercase names.\n\tconst mpqDir = \"diabdat\"\n\treturn filepath.Join(mpqDir, relPath)\n}",
"func AbsoluteOrRelativePath(p string, contextDir string) string {\n\tif path.IsAbs(p) {\n\t\treturn p\n\t}\n\tif contextDir == \".\" {\n\t\treturn p\n\t}\n\treturn contextDir + \"/\" + p\n}",
"func absFilePath(file file.Filer) string {\r\n\tpath, _ := filepath.Abs(file.Name())\r\n\treturn filepath.Join(path, file.Name())\r\n}",
"func absPath(dir string, path Path) string {\n\tif filepath.IsAbs(string(path)) {\n\t\t// filepath.Join cleans the path so we should clean the absolute paths as well for consistency.\n\t\treturn filepath.Clean(string(path))\n\t}\n\treturn filepath.Join(dir, string(path))\n}",
"func getAbsPath(fileName, path string) string {\n\tif filepath.IsAbs(fileName) {\n\t\treturn fileName\n\t}\n\n\treturn filepath.Join(path, fileName)\n}",
"func Abs(os, path, cwd string) (_ string, err error) {\n\tif IsAbs(os, path) {\n\t\treturn Clean(os, path), nil\n\t}\n\treturn Clean(os, Join(os, cwd, path)), nil\n}",
"func addRelFile(abs map[string]*File, absPath string) error {\n\tif absPath == \".\" {\n\t\t// The root of the file system (see the documentation\n\t\t// for path.Clean). There is no parent directory, so\n\t\t// just return.\n\t\treturn nil\n\t}\n\tfile := abs[absPath]\n\tif file == nil {\n\t\t// empty directory\n\t\tfile = &File{\n\t\t\tkids: Tree{},\n\t\t}\n\t\tabs[absPath] = file\n\t}\n\tdirPath, relPath := slashpath.Split(absPath)\n\n\tdirPath = slashpath.Clean(dirPath)\n\trelPath = slashpath.Clean(relPath)\n\n\tif dirPath != \".\" {\n\t\t// make sure all our ancestors are present.\n\t\tif err := addRelFile(abs, dirPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdir := abs[dirPath]\n\tif dir.kids == nil {\n\t\treturn errors.New(\"Conflict: non-directory has child nodes\")\n\t}\n\tdir.kids[relPath] = file\n\treturn nil\n}",
"func (g *Golang) relGopath() (string, error) {\n\tgopath := g.Environment[\"GOPATH\"]\n\tif g.WorkingDirectory != \"\" && strings.HasPrefix(gopath, g.WorkingDirectory) {\n\t\treturn filepath.Rel(g.WorkingDirectory, gopath)\n\t}\n\tif filepath.IsAbs(gopath) {\n\t\treturn \"\", errors.New(\"GOPATH is absolute path, but needs to be relative path\")\n\t}\n\treturn gopath, nil\n}",
"func GetAbsPath(p string) string {\n\tfp := path.Join(GetCurPath(), p)\n\treturn path.Clean(fp)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
OpenOrChangeTo ensures the specified file is open in vim. If the path is found in a window, that window is made current. If no window contains that path, it is split and opened.
|
func (n Basejump) OpenOrChangeTo(fpath, method string) (wasOpen bool, err error) {
nv := n.nvim()
//win, winNr, err := n.findWindow(fpath, SearchOnlyInCurrentTab)
win, tabNr, winNr, err := n.findWindow(fpath, SearchInAllTabs)
if err != nil {
return
}
if win != nil {
// Change to this window
trace(n, "trace: SplitOrChangeTo: changing to tab")
nv.Command(fmt.Sprintf("%dtabnext", tabNr))
trace(n, "trace: SplitOrChangeTo: changing to existing window")
err = nv.Command(fmt.Sprintf("%dwincmd w", winNr))
wasOpen = true
return
}
// Not found. Split new window
// Seems like :split is not working from a script for directories for me
// (see https://superuser.com/questions/1243344/vim-wont-split-open-a-directory-from-python-but-it-works-interactively)
// so if it's a directory, use Hexplore instead.
isDir := false
var fi os.FileInfo
if fi, err = os.Stat(fpath); err == nil && fi.IsDir() {
isDir = true
}
action := "splitting"
dirCmd := "Hexplore"
splitCmd := "split"
if method == openByTab {
action = "tabbing"
dirCmd = "Texplore"
splitCmd = "tabedit"
}
trace(n, "trace: SplitOrChangeTo: no window matches. %s %s.", action, fpath)
if isDir {
// If it's a directory, use :Hexplore instead.
err = nv.Command(fmt.Sprintf("%s %s", dirCmd, fpath))
} else {
err = nv.Command(fmt.Sprintf("%s %s", splitCmd, fpath))
}
return
}
|
[
"func OpenFileInEditor(filepath string) {\n\teditor := os.Getenv(\"EDITOR\")\n\n\tvar cmd *exec.Cmd\n\tcmd = exec.Command(editor, filepath)\n\tcmd.Start()\n}",
"func OpenInEditor(executablePath, filePath string) error {\n\tcmd := exec.Command(executablePath, filePath)\n\treturn cmd.Start()\n}",
"func OpenProjectInEditor(projectPath, filename string) {\n\tvar cmd *exec.Cmd\n\n\teditor := os.Getenv(\"EDITOR\")\n\tif editor == \"code\" || editor == \"code-insiders\" {\n\t\tcmd = exec.Command(editor, projectPath, \"--goto\", filename)\n\t} else {\n\t\tcmd = exec.Command(editor, projectPath)\n\t}\n\n\tcmd.Start()\n}",
"func OpenFileInEditor(filename string) error {\n\teditor := os.Getenv(EnvironmentVariableName)\n\tif editor == \"\" {\n\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\teditor = WindowsDefaultEditor\n\t\t} else {\n\t\t\teditor = UnixDefaultEditor\n\t\t}\n\t}\n\n\teditorExecutable, err := exec.LookPath(editor)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(editorExecutable, filename)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd.Run()\n}",
"func openFile(target string) error {\n\tvar cmd string\n\tvar args []string\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcmd = \"cmd\"\n\t\targs = []string{\"/c\", \"start\"}\n\tcase \"darwin\":\n\t\tcmd = \"open\"\n\tdefault: // \"linux\", \"freebsd\", \"openbsd\", \"netbsd\"\n\t\tcmd = \"xdg-open\"\n\t}\n\targs = append(args, target)\n\treturn exec.Command(cmd, args...).Start()\n}",
"func (c *OSCommand) EditFile(filename string) (*exec.Cmd, error) {\n\teditor, _ := gitconfig.Global(\"core.editor\")\n\tif editor == \"\" {\n\t\teditor = os.Getenv(\"VISUAL\")\n\t}\n\tif editor == \"\" {\n\t\teditor = os.Getenv(\"EDITOR\")\n\t}\n\tif editor == \"\" {\n\t\tif err := c.RunCommand(\"which vi\"); err == nil {\n\t\t\teditor = \"vi\"\n\t\t}\n\t}\n\tif editor == \"\" {\n\t\treturn nil, errors.New(\"No editor defined in $VISUAL, $EDITOR, or git config\")\n\t}\n\treturn c.PrepareSubProcess(editor, filename)\n}",
"func OpenCommand(\n\tlog Logger,\n\ttm tmux.Tmux,\n\tm manifest.Manifest,\n) func(*cli.Cmd) {\n\treturn func(cmd *cli.Cmd) {\n\t\tprojects := cmd.StringsArg(\"PROJECTS\", nil, \"names or aliases of projects to open\")\n\t\tnoEdit := cmd.BoolOpt(\"no-edit n\", false, \"do not launch editor\")\n\n\t\tcmd.Spec = \"[OPTIONS] PROJECTS...\"\n\n\t\tcmd.Action = func() {\n\t\t\tif !tm.Valid() {\n\t\t\t\tlog.Fatalf(\"jkl open must be ran in tmux\")\n\t\t\t}\n\n\t\t\tfor _, name := range *projects {\n\t\t\t\tp, err := m.FindProject(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"%s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif fileNotExists(p.Path) {\n\t\t\t\t\tlog.Fatalf(\"project directory for %s does not exist\", p.Name)\n\t\t\t\t}\n\n\t\t\t\tvar opts []tmux.CreateWindowOption\n\t\t\t\tif p.WorkingPath != \"\" {\n\t\t\t\t\topts = append(opts, tmux.WithVerticalSplitPath(\n\t\t\t\t\t\tfilepath.Join(p.Path, p.WorkingPath)),\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tif p.Layout != \"\" {\n\t\t\t\t\topts = append(opts, tmux.WithLayout(p.Layout))\n\t\t\t\t}\n\n\t\t\t\terr = tm.CreateWindow(p.Name, p.Path, opts...)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"failed to open project '%s': %s\", p.Name, err)\n\t\t\t\t}\n\n\t\t\t\tif !*noEdit {\n\t\t\t\t\terr = tm.Execute((m.Editor + \" .\"))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"failed to open editor: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (w *Workspace) OpenFile(absFilepath, text string) error {\n\thash := calculateHashFromString(text)\n\n\tif err := w.LoaderEngine.openedFiles.EnsureOpened(absFilepath, text); err != nil {\n\t\treturn errors.Wrap(err, \"From OpenFile\")\n\t}\n\n\tabsPath := filepath.Dir(absFilepath)\n\tdp, err := w.Loader.FindDistinctPackage(absPath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to find package for %s\", absPath)\n\t}\n\texistingHash := dp.Package.fileHashes[filepath.Base(absFilepath)]\n\tif existingHash == hash {\n\t\tw.log.Debugf(\"Shadowed file '%s'; unchanged\\n\", absFilepath)\n\t\treturn nil\n\t}\n\n\tw.LoaderEngine.InvalidatePackage(absPath)\n\n\tw.log.Debugf(\"Shadowed file '%s'\\n\", absFilepath)\n\n\treturn nil\n}",
"func (b *Browser) OpenFile(filename string, flag int, _ os.FileMode) (\n\tf billy.File, err error) {\n\tif b.tree == nil {\n\t\treturn nil, errors.New(\"Empty repo\")\n\t}\n\n\tif flag&os.O_CREATE != 0 {\n\t\treturn nil, errors.New(\"browser can't create files\")\n\t}\n\n\treturn b.Open(filename)\n}",
"func (e *Editor) Edit(path string) error {\n\teditor := os.Getenv(\"EDITOR\")\n\tif editor == \"\" {\n\t\teditor = DefaultEditor\n\t}\n\n\t// Get the full executable path for the editor.\n\texecutable, err := exec.LookPath(editor)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(executable, path)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd.Run()\n}",
"func Edit(cfgFilename, editor string) error {\n\tcfgPath, err := fullCfgPath(cfgFilename)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tcmd, err := util.OpenFileCmd(editor, cfgPath, 0)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}",
"func (v *Viz) OpenScriptFile(ctx context.Context, resolver qfs.PathResolver) (err error) {\n\tif v.Text != \"\" {\n\t\tv.scriptFile = qfs.NewMemfileBytes(\"template.html\", []byte(v.Text))\n\t\treturn nil\n\t}\n\n\tif v.ScriptPath == \"\" {\n\t\t// nothing to resolve\n\t\treturn nil\n\t}\n\n\tif resolver == nil {\n\t\treturn ErrNoResolver\n\t}\n\tv.scriptFile, err = resolver.Get(ctx, v.ScriptPath)\n\treturn err\n}",
"func ShellOpen(file string) error {\n\treturn open(file)\n}",
"func (t *Tailor) openFile(offset int64, whence int) (err error) {\n\tt.file, err = os.Open(t.fileName)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error opening file\")\n\t}\n\n\terr = t.seekToLineStart(offset, whence)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error seeking to line start\")\n\t}\n\n\terr = t.updateFileStatus()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error updating file status\")\n\t}\n\n\treturn nil\n}",
"func (mgr *CrlWorkspaceManager) openFile(fileInfo os.FileInfo, trans *core.Transaction) (*workspaceFile, error) {\n\twritable := (fileInfo.Mode().Perm() & 0200) > 0\n\tmode := os.O_RDONLY\n\tif writable {\n\t\tmode = os.O_RDWR\n\t}\n\tfilename := mgr.editor.userPreferences.WorkspacePath + \"/\" + fileInfo.Name()\n\tfile, err := os.OpenFile(filename, mode, fileInfo.Mode())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfileContent := make([]byte, fileInfo.Size())\n\t_, err = file.Read(fileContent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\telement, err2 := mgr.GetUofD().RecoverDomain(fileContent, trans)\n\tif err2 != nil {\n\t\treturn nil, err2\n\t}\n\tif !writable {\n\t\telement.SetReadOnlyRecursively(true, trans)\n\t}\n\tvar wf workspaceFile\n\twf.filename = filename\n\twf.Domain = element\n\twf.Info = fileInfo\n\twf.LoadedVersion = element.GetVersion(trans)\n\twf.File = file\n\treturn &wf, nil\n}",
"func (c *OSCommand) SublimeOpenFile(filename string) (*exec.Cmd, error) {\n\treturn nil, c.RunCommand(\"subl \" + filename)\n}",
"func (ui *ufsInterface) Open(path string, _ filesystem.IOFlags) (filesystem.File, error) {\n\tcallCtx, cancel := interfaceutils.CallContext(ui.ctx)\n\tdefer cancel()\n\tipldNode, err := ui.core.ResolveNode(callCtx, corepath.New(path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdmod, err := mod.NewDagModifier(ui.ctx, ipldNode, ui.core.Dag(), func(r io.Reader) chunk.Splitter {\n\t\treturn chunk.NewBuzhash(r) // TODO: maybe switch this back to the default later; buzhash should be faster so we're keeping it temporarily while testing\n\t})\n\tif err != nil {\n\t\treturn nil, iferrors.Other(path, err)\n\t}\n\n\treturn &dagRef{DagModifier: dmod, modifiedCallback: ui.modifiedCallback}, nil\n}",
"func (w *Workspace) ChangeFile(absFilepath string, startLine, startCharacter, endLine, endCharacter int, text string) error {\n\tbuf, err := w.LoaderEngine.openedFiles.Get(absFilepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Have position (line, character), need to transform into offset into file\n\t// Then replace starting from there.\n\tr1 := buf.NewReader()\n\tstartOffset, err := CalculateOffsetForPosition(r1, startLine, startCharacter)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error from start (%d, %d)\", startLine, startCharacter)\n\t}\n\n\tr2 := buf.NewReader()\n\tendOffset, err := CalculateOffsetForPosition(r2, endLine, endCharacter)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error from end (%d, %d)\", endLine, endCharacter)\n\t}\n\n\tfmt.Printf(\"offsets: [%d:%d]\\n\", startOffset, endOffset)\n\n\tif err = buf.Alter(startOffset, endOffset, text); err != nil {\n\t\treturn errors.Wrap(err, \"ChangeFile: failed to alter the file buffer\")\n\t}\n\n\tw.LoaderEngine.InvalidatePackage(filepath.Dir(absFilepath))\n\n\treturn nil\n}",
"func (c *OSCommand) VsCodeOpenFile(filename string) (*exec.Cmd, error) {\n\treturn nil, c.RunCommand(\"code -r \" + filename)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
JumpToLineAndCol moves the cursor to the specified line and column in the current buffer.
|
func (n Basejump) JumpToLineAndCol(line, col int) (err error) {
nv := n.P.Nvim
// In order to store these jumps in the jump history
// we use the 'G' command first. This only stores the line
// number, though, with column 1 (instead of the correct column).
// We store a second jump by doing a forward search for any character,
// with the count of the column number i.e. 10/.
nv.Command(fmt.Sprintf("normal %dG", line))
if col > 1 {
nv.Command(fmt.Sprintf("normal %d/.", col-1))
}
// Just to make sure we didn't mess up
err = nv.Call("cursor", nil, line, col)
return
}
|
[
"func (e *Editor) JumpToLine() {\n\n\t// Prompt the user to enter a line number.\n\tanswer, err := e.Ask(\"Line: \", \"\")\n\tif err != nil {\n\t\te.SetStatusMessage(\"Jump cancelled.\")\n\t\treturn\n\t}\n\n\t// Attempt to convert the provided input into an integer.\n\ti, err := strconv.Atoi(answer)\n\tif err != nil {\n\t\te.SetStatusMessage(\"Error: Invalid input.\")\n\t\treturn\n\t}\n\n\tlineCount := e.FB().Length()\n\n\t// Check if the target line is out of bounds, then jump to the correct line.\n\tif i <= 1 {\n\t\te.FB().CursorY = 1\n\t} else if i > lineCount {\n\t\te.FB().CursorY = lineCount\n\t} else {\n\t\te.FB().CursorY = i\n\t}\n\n\t// Automatically move the cursor to the start of the new line.\n\te.MoveCursor(CursorMoveLineStart)\n}",
"func (c Cursor) SetLineCol(line, col int) Cursor {\n\tc.line, c.col = (*c.buffer).ClampLineCol(line, col)\n\treturn c\n}",
"func (n Basejump) Cursor() (line, col int, err error) {\n\tresult := make([]float32, 4)\n\tnv := n.nvim()\n\n\terr = nv.Call(\"getpos\", result, \".\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tline = int(result[1])\n\tcol = int(result[2])\n\treturn\n}",
"func (w *PosixWriter256) CursorGoTo(row, col int) {\n\tif row == 0 && col == 0 {\n\t\t// If no row/column parameters are provided (ie. <ESC>[H), the cursor will move to the home position.\n\t\tw.WriteRaw([]byte{0x1b, '[', 'H'})\n\t\treturn\n\t}\n\tr := strconv.Itoa(row)\n\tc := strconv.Itoa(col)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(r))\n\tw.WriteRaw([]byte{';'})\n\tw.WriteRaw([]byte(c))\n\tw.WriteRaw([]byte{'H'})\n\treturn\n}",
"func (v *View) MoveCursor(dx, dy int, writeMode bool) {\n\tmaxX, maxY := v.Size()\n\tcx, cy := v.cx+dx, v.cy+dy\n\tx, y := v.ox+cx, v.oy+cy\n\n\tvar curLineWidth, prevLineWidth int\n\t// get the width of the current line\n\tif writeMode {\n\t\tif v.Wrap {\n\t\t\tcurLineWidth = maxX - 1\n\t\t} else {\n\t\t\tcurLineWidth = maxInt\n\t\t}\n\t} else {\n\t\tif y >= 0 && y < len(v.viewLines) {\n\t\t\tcurLineWidth = len(v.viewLines[y].line)\n\t\t\tif v.Wrap && curLineWidth >= maxX {\n\t\t\t\tcurLineWidth = maxX - 1\n\t\t\t}\n\t\t} else {\n\t\t\tcurLineWidth = 0\n\t\t}\n\t}\n\t// get the width of the previous line\n\tif y-1 >= 0 && y-1 < len(v.viewLines) {\n\t\tprevLineWidth = len(v.viewLines[y-1].line)\n\t} else {\n\t\tprevLineWidth = 0\n\t}\n\n\t// adjust cursor's x position and view's x origin\n\tif x > curLineWidth { // move to next line\n\t\tif dx > 0 { // horizontal movement\n\t\t\tif !v.Wrap {\n\t\t\t\tv.ox = 0\n\t\t\t}\n\t\t\tv.cx = 0\n\t\t\tcy++\n\t\t} else { // vertical movement\n\t\t\tif curLineWidth > 0 { // move cursor to the EOL\n\t\t\t\tif v.Wrap {\n\t\t\t\t\tv.cx = curLineWidth\n\t\t\t\t} else {\n\t\t\t\t\tncx := curLineWidth - v.ox\n\t\t\t\t\tif ncx < 0 {\n\t\t\t\t\t\tv.ox += ncx\n\t\t\t\t\t\tif v.ox < 0 {\n\t\t\t\t\t\t\tv.ox = 0\n\t\t\t\t\t\t}\n\t\t\t\t\t\tv.cx = 0\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.cx = ncx\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif !v.Wrap {\n\t\t\t\t\tv.ox = 0\n\t\t\t\t}\n\t\t\t\tv.cx = 0\n\t\t\t}\n\t\t}\n\t} else if cx < 0 {\n\t\tif !v.Wrap && v.ox > 0 { // move origin to the left\n\t\t\tv.ox--\n\t\t} else { // move to previous line\n\t\t\tif prevLineWidth > 0 {\n\t\t\t\tif !v.Wrap { // set origin so the EOL is visible\n\t\t\t\t\tnox := prevLineWidth - maxX + 1\n\t\t\t\t\tif nox < 0 {\n\t\t\t\t\t\tv.ox = 0\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.ox = nox\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tv.cx = prevLineWidth\n\t\t\t} else {\n\t\t\t\tif !v.Wrap {\n\t\t\t\t\tv.ox = 0\n\t\t\t\t}\n\t\t\t\tv.cx = 0\n\t\t\t}\n\t\t\tcy--\n\t\t}\n\t} else { // stay on the same line\n\t\tif v.Wrap {\n\t\t\tv.cx = cx\n\t\t} else {\n\t\t\tif cx >= maxX {\n\t\t\t\tv.ox++\n\t\t\t} else {\n\t\t\t\tv.cx = cx\n\t\t\t}\n\t\t}\n\t}\n\n\t// adjust cursor's y position and view's y origin\n\tif cy >= maxY {\n\t\tv.oy++\n\t} else if cy < 0 {\n\t\tif v.oy > 0 {\n\t\t\tv.oy--\n\t\t}\n\t} else {\n\t\tv.cy = cy\n\t}\n}",
"func (v *view) move_cursor_next_line() {\n\tc := v.cursor\n\tif !c.last_line() {\n\t\tc = cursor_location{c.line.next, c.line_num + 1, -1}\n\t\tv.move_cursor_to(c)\n\t} else {\n\t\tv.ctx.set_status(\"End of buffer\")\n\t}\n}",
"func (v *madman) move_cursor_next_line() {\n\tc := v.cursor\n\tif !c.last_line() {\n\t\tc = cursor_location{c.line.next, c.line_num + 1, -1}\n\t\tv.move_cursor_to(c)\n\t} else {\n\t\t//v.ctx.set_status(\"End of buffer\")\n\t}\n}",
"func (w *TextIter) SetLineOffset(charOnLine int) {\n\tw.Candy().Guify(\"gtk_text_iter_set_line_offset\", w, charOnLine)\n}",
"func (v *view) move_cursor_line_n_times(n int) {\n\tif n == 0 {\n\t\treturn\n\t}\n\n\tcursor := v.cursor.line\n\tfor cursor.prev != nil && n < 0 {\n\t\tcursor = cursor.prev\n\t\tv.cursor.line_num--\n\t\tn++\n\t}\n\tfor cursor.next != nil && n > 0 {\n\t\tcursor = cursor.next\n\t\tv.cursor.line_num++\n\t\tn--\n\t}\n\tv.cursor.line = cursor\n}",
"func (r *Reader) GetCurrentLineAndColumn() (line int, col int, ok error) {\n\treturn r.track.GetCurrentLineAndColumn()\n}",
"func (c *Context) Jump(pos int) {\n\tc.nextip = pos\n}",
"func (l *Lexer) stepCursor() {\n\tif l.rune == '\\n' {\n\t\tl.line++\n\t\tl.col = 0\n\t} else {\n\t\tl.col += l.width\n\t}\n}",
"func (r *Reader) GetLineAndColumn(byteOffset int) (line int, col int, ok error) {\n\treturn r.track.GetLineAndColumn(byteOffset)\n}",
"func (v *view) move_cursor_forward() {\n\tc := v.cursor\n\tif c.last_line() && c.eol() {\n\t\tv.ctx.set_status(\"End of buffer\")\n\t\treturn\n\t}\n\n\tc.move_one_rune_forward()\n\tv.move_cursor_to(c)\n}",
"func FindOffset(fileText string, line, column int) int {\n\t// we count our current line and column position\n\tcurrentCol := 1\n\tcurrentLine := 1\n\n\tfor offset, ch := range fileText {\n\t\t// see if we found where we wanted to go to\n\t\tif currentLine == line && currentCol == column {\n\t\t\treturn offset\n\t\t}\n\n\t\t// line break - increment the line counter and reset the column\n\t\tif ch == '\\n' {\n\t\t\tcurrentLine++\n\t\t\tcurrentCol = 1\n\t\t} else {\n\t\t\tcurrentCol++\n\t\t}\n\t}\n\treturn -1 //not found\n}",
"func (n Basejump) Selection() (startLine, startCol, endLine, endCol int, err error) {\n\tresult := make([]float32, 4)\n\tnv := n.nvim()\n\n\terr = nv.Call(\"getpos\", result, \"'<\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tstartLine = int(result[1])\n\tstartCol = int(result[2])\n\n\terr = nv.Call(\"getpos\", result, \"'>\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tendLine = int(result[1])\n\tendCol = int(result[2])\n\n\treturn\n}",
"func setConsoleCursorPosition(handle uintptr, isRelative bool, column int16, line int16) error {\n\tscreenBufferInfo, err := GetConsoleScreenBufferInfo(handle)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar position COORD\n\tif isRelative {\n\t\tposition.X = screenBufferInfo.CursorPosition.X + SHORT(column)\n\t\tposition.Y = screenBufferInfo.CursorPosition.Y + SHORT(line)\n\t} else {\n\t\tposition.X = SHORT(column)\n\t\tposition.Y = SHORT(line)\n\t}\n\treturn getError(setConsoleCursorPositionProc.Call(handle, marshal(position), 0))\n}",
"func (v *view) move_cursor_prev_line() {\n\tc := v.cursor\n\tif !c.first_line() {\n\t\tc = cursor_location{c.line.prev, c.line_num - 1, -1}\n\t\tv.move_cursor_to(c)\n\t} else {\n\t\tv.ctx.set_status(\"Beginning of buffer\")\n\t}\n}",
"func SwitchStmtPos(s *ast.SwitchStmt,) token.Pos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Starting at `index` in string `s`, move forwards and backwards to find the longest string around `index` that contains only characters in `chars`.
|
func matching(s string, index int, chars string) string {
if index < 0 || index >= len(s) {
return ""
}
srunes := []rune(s)
crunes := []rune(expandCharRanges(chars))
good := func(i int) bool {
for _, r := range crunes {
if srunes[i] == r {
return true
}
}
return false
}
if !good(index) {
return ""
}
left := index
for ; left >= 0; left-- {
if !good(left) {
break
}
}
right := index
for ; right < len(s); right++ {
if !good(right) {
break
}
}
return string(srunes[left+1 : right])
}
|
[
"func LastIndexAny(s, chars string) int {\n\tfmt.Println(strings.LastIndexAny(\"gogopher\", \"go\")) // 3\n\tfmt.Println(strings.LastIndexAny(\"gogopher\", \"ogh\")) // 5\n\tfmt.Println(strings.LastIndexAny(\"gogopher\", \"gr\")) // 7\n\tfmt.Println(strings.LastIndexAny(\"gogopher\", \"rodent\")) // 7\n\treturn strings.LastIndexAny(s, chars)\n}",
"func CharAt(s string, index int) string {\n\tl := len(s)\n\tshortcut := index < 0 || index > l-1 || l == 0\n\tif shortcut {\n\t\treturn \"\"\n\t}\n\treturn s[index : index+1]\n}",
"func LastIndexFunc(s string, f func(rune) bool) int {\n\tfunction := func(c rune) bool {\n\t\treturn unicode.Is(unicode.Han, c)\n\t}\n\n\tfmt.Println(strings.LastIndexFunc(\"hello 世界\", function)) // 10 一个汉字貌似占3个位置\n\tfmt.Println(strings.LastIndexFunc(\"hello world\", function)) // -1\n\treturn strings.LastIndexFunc(s, f)\n}",
"func ShortestToChar(s string, c byte) []int {\n\tfromLeft := make([]int, len(s))\n\tfromRight := make([]int, len(s))\n\tlastLeftMatch := -1\n\tlastRightMatch := len(s)\n\n\tfor i := range s {\n\t\tif s[i] == c {\n\t\t\tlastLeftMatch = i\n\t\t}\n\t\tfromLeft[i] = lastLeftMatch\n\t\trx := len(s) - 1 - i\n\t\tif s[rx] == c {\n\t\t\tlastRightMatch = rx\n\t\t}\n\t\tfromRight[rx] = lastRightMatch\n\t}\n\n\tr := make([]int, len(s))\n\tfor j := range s {\n\t\tif fromLeft[j] == -1 {\n\t\t\tr[j] = fromRight[j] - j\n\t\t\tcontinue\n\t\t}\n\t\tif fromRight[j] == len(s) {\n\t\t\tr[j] = j - fromLeft[j]\n\t\t\tcontinue\n\t\t}\n\t\tif j-fromLeft[j] <= fromRight[j]-j {\n\t\t\tr[j] = j - fromLeft[j]\n\t\t\tcontinue\n\t\t}\n\t\tr[j] = fromRight[j] - j\n\t}\n\treturn r\n}",
"func longestMatch(source string, ic cursor, options []string) string {\n\tvar value []byte\n\tvar skipList []int\n\tvar match string\n\n\tcur := ic\n\tfor cur.pointer < uint(len(source)) {\n\t\tvalue = append(value, strings.ToLower(string(source[cur.pointer]))...)\n\t\tcur.pointer++\n\n\tmatch:\n\t\tfor i, option := range options {\n\t\t\tfor _, skip := range skipList {\n\t\t\t\tif i == skip {\n\t\t\t\t\tcontinue match\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Deal with cases like INT vs INTO\n\t\t\tif option == string(value) {\n\t\t\t\tskipList = append(skipList, i)\n\t\t\t\tif len(option) > len(match) {\n\t\t\t\t\tmatch = option\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsharesPrefix := string(value) == option[:cur.pointer-ic.pointer]\n\t\t\ttooLong := len(value) > len(option)\n\t\t\tif tooLong || !sharesPrefix {\n\t\t\t\tskipList = append(skipList, i)\n\t\t\t}\n\t\t}\n\n\t\tif len(skipList) == len(options) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn match\n}",
"func (s *String) At(i int) rune {\n\t// ASCII is easy. Let the compiler catch the indexing error if there is one.\n\tif i < s.nonASCII {\n\t\treturn rune(s.str[i])\n\t}\n\n\t// Now we do need to know the index is valid.\n\tif i < 0 || i >= s.numRunes {\n\t\tpanic(outOfRange)\n\t}\n\n\tvar r rune\n\n\t// Five easy common cases: within 1 spot of bytePos/runePos, or the beginning, or the end.\n\t// With these cases, all scans from beginning or end work in O(1) time per rune.\n\tswitch {\n\n\tcase i == s.runePos-1: // backing up one rune\n\t\tr, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])\n\t\ts.runePos = i\n\t\ts.bytePos -= s.width\n\t\treturn r\n\tcase i == s.runePos+1: // moving ahead one rune\n\t\ts.runePos = i\n\t\ts.bytePos += s.width\n\t\tfallthrough\n\tcase i == s.runePos:\n\t\tr, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])\n\t\treturn r\n\tcase i == 0: // start of string\n\t\tr, s.width = utf8.DecodeRuneInString(s.str)\n\t\ts.runePos = 0\n\t\ts.bytePos = 0\n\t\treturn r\n\n\tcase i == s.numRunes-1: // last rune in string\n\t\tr, s.width = utf8.DecodeLastRuneInString(s.str)\n\t\ts.runePos = i\n\t\ts.bytePos = len(s.str) - s.width\n\t\treturn r\n\t}\n\n\t// We need to do a linear scan. There are three places to start from:\n\t// 1) The beginning\n\t// 2) bytePos/runePos.\n\t// 3) The end\n\t// Choose the closest in rune count, scanning backwards if necessary.\n\tforward := true\n\tif i < s.runePos {\n\t\t// Between beginning and pos. Which is closer?\n\t\t// Since both i and runePos are guaranteed >= nonASCII, that's the\n\t\t// lowest location we need to start from.\n\t\tif i < (s.runePos-s.nonASCII)/2 {\n\t\t\t// Scan forward from beginning\n\t\t\ts.bytePos, s.runePos = s.nonASCII, s.nonASCII\n\t\t} else {\n\t\t\t// Scan backwards from where we are\n\t\t\tforward = false\n\t\t}\n\t} else {\n\t\t// Between pos and end. Which is closer?\n\t\tif i-s.runePos < (s.numRunes-s.runePos)/2 {\n\t\t\t// Scan forward from pos\n\t\t} else {\n\t\t\t// Scan backwards from end\n\t\t\ts.bytePos, s.runePos = len(s.str), s.numRunes\n\t\t\tforward = false\n\t\t}\n\t}\n\tif forward {\n\t\t// TODO: Is it much faster to use a range loop for this scan?\n\t\tfor {\n\t\t\tr, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])\n\t\t\tif s.runePos == i {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts.runePos++\n\t\t\ts.bytePos += s.width\n\t\t}\n\t} else {\n\t\tfor {\n\t\t\tr, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])\n\t\t\ts.runePos--\n\t\t\ts.bytePos -= s.width\n\t\t\tif s.runePos == i {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn r\n}",
"func LengthOfLongestSubstring(s string) int {\n\tcache := [128]bool{}\n\tmax, lengh, cursor := 0, 0, 0\n\tfor i := range s {\n\t\tfor cache[s[i]] {\n\t\t\tcache[s[cursor]] = false\n\t\t\tlengh--\n\t\t\tcursor++\n\t\t}\n\t\tlengh++\n\t\tcache[s[i]] = true\n\t\tif lengh > max {\n\t\t\tmax = lengh\n\t\t}\n\t}\n\treturn max\n}",
"func maxPower(s string) int {\n var last_char byte = 'A'\n last_len :=0\n max_len := 0\n for i:=0; i<len(s); i++ {\n cur_char := s[i]\n if cur_char==last_char{\n last_len++\n }else{\n if last_len > max_len {\n max_len = last_len\n }\n last_len = 1\n last_char = cur_char\n }\n }\n if last_len > max_len {\n max_len = last_len\n }\n return max_len\n}",
"func strTillNextMarker(indices map[int]string, startInd int, pattern string) string {\n\t// initialize with max value which is length of pattern\n\tnextMarkerStartInd := len(pattern)\n\tfor ind := range indices {\n\t\tif ind > startInd {\n\t\t\tnextMarkerStartInd = min(ind-startInd, nextMarkerStartInd)\n\t\t}\n\t}\n\treturn substrOfLen(pattern, startInd, nextMarkerStartInd)\n}",
"func LengthOfLongestSubstring(s string) int {\n\tif len(s) <= 1 {\n\t\treturn len(s)\n\t}\n\n\t// try sliding windows and keep tmp map\n\n\tmaxLen := 1 // must be at least one at this point\n\tfor i := range s {\n\t\tlu := unique(s[i:])\n\t\tmaxLen = max(maxLen, lu)\n\t\ti += lu\n\t}\n\treturn maxLen\n}",
"func dfs(s []byte, i int, ans *[]string) {\n\tif i == len(s) {\n // All characters have been transformed, add it to answers.\n\t\t*ans = append(*ans, string(s))\n\t\treturn\n\t}\n\n // Original string.\n\tdfs(s, i+1, ans)\n\n // Transform string, if character at index: i is an alphabet.\n\tif isAlphabet(s[i]) {\n\t\ts[i] ^= (1 << 5)\n\t\tdfs(s, i+1, ans)\n\t}\n}",
"func PalindromeIndex(s string) int {\n\tlast := len(s) - 1\n\n\tfor i := 0; i < len(s)/2; i++ { // O(n)\n\t\ttail := last - i\n\t\tif rune(s[tail]) != rune(s[i]) {\n\t\t\tif IsPalindrome(s[i:tail]) { // O(n)\n\t\t\t\treturn tail\n\t\t\t} else if IsPalindrome(s[i+1 : tail+1]) { // O(n)\n\t\t\t\treturn i\n\t\t\t} else {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn -1\n}",
"func main() {\n\tinput := getInput()\n\tset := deduplicatedChars(input.s)\n\tmaxMatchLength := 0\n\tfor x := 0; x < len(set)-1; x++ {\n\t\tfor y := x + 1; y < len(set); y++ {\n\t\t\ts := matchTwoChars(input.s, set[x], set[y])\n\t\t\tl := len(s)\n\t\t\tif doesAlternate(s) && maxMatchLength < l {\n\t\t\t\tmaxMatchLength = l\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println(maxMatchLength)\n}",
"func longest(a, b string, isSubsequence bool) string {\n\tif len(a) == 0 || len(b) == 0 {\n\t\treturn \"\"\n\t}\n\truneA := []rune(a)\n\truneB := []rune(b)\n\tcell := make([][]int, len(runeA)*len(runeB))\n\tfor i := range cell {\n\t\tcell[i] = make([]int, len(runeB))\n\t}\n\n\tsubEndIndex := 0\n\tlongest := 0\n\tfor i, aa := range runeA {\n\t\tfor j, bb := range runeB {\n\t\t\tif aa == bb {\n\t\t\t\tif i > 0 && j > 0 {\n\t\t\t\t\tcell[i][j] = cell[i-1][j-1] + 1\n\t\t\t\t} else {\n\t\t\t\t\tcell[i][j] = 1\n\t\t\t\t}\n\t\t\t\tif cell[i][j] > longest {\n\t\t\t\t\tlongest = cell[i][j]\n\t\t\t\t\tsubEndIndex = i\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif i > 0 && j > 0 && isSubsequence {\n\t\t\t\t\tcell[i][j] = max(cell[i-1][j], cell[i][j-1])\n\t\t\t\t} else {\n\t\t\t\t\tcell[i][j] = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tsub := string(runeA[subEndIndex-longest+1 : subEndIndex+1])\n\treturn sub\n}",
"func minLengthSubstring(s string, t string) int {\n\ttarget := make(map[byte]int)\n\tfor _, c := range t {\n\t\ttarget[byte(c)]++\n\t}\n\n\tseenChars := make(map[byte]int)\n\tminSubstrLen := len(s)\n\tsi := -1\n\tei := 0\n\n\tfor ei < len(s) {\n\t\tsi = findNextStartIdx(s, target, seenChars, si+1)\n\t\tif si >= len(s) {\n\t\t\tbreak\n\t\t}\n\t\tif si >= ei {\n\t\t\tseenChars[s[si]]++\n\t\t\tei = si + 1\n\t\t}\n\t\tei = findNextEndIdx(s, target, seenChars, ei)\n\t\tif ei == -1 {\n\t\t\tbreak\n\t\t}\n\t\tif ei-si < minSubstrLen {\n\t\t\tminSubstrLen = ei - si\n\t\t}\n\n\t\tdropSeenChar(seenChars, s[si])\n\t}\n\n\treturn minSubstrLen\n}",
"func (w WordFinder) longestFrom(sa *suffixarray.Index, col, row int, prefix string) string {\n\tlongest := \"\"\n\tr := w.grid[row][col]\n\tword := prefix + string(r)\n\tif len(word) > 6 {\n\t\tlog.Print(word)\n\t}\n\t// Are there any words starting with this\n\thits := sa.Lookup([]byte(\"\\x00\"+word), 1)\n\tif len(hits) > 0 {\n\t\t// If so, then we should keep looking for longer words (recursive)\n\t\tfor _, pos := range w.validMoves(col, row) {\n\t\t\tsub := w.longestFrom(sa, pos.col, pos.row, word)\n\t\t\tif len(sub) > len(longest) {\n\t\t\t\t// longest word so far\n\t\t\t\tlongest = sub\n\t\t\t}\n\t\t}\n\t}\n\t// We did not find a longer word while recursing,\n\t// then check if \"word\" itself is aword from the word list.\n\tif len(longest) == 0 {\n\t\thits = sa.Lookup([]byte(\"\\x00\"+word+\"\\x00\"), 1)\n\t\tif len(hits) > 0 {\n\t\t\t//log.Printf(\"Found word: %s\", string(word))\n\t\t\tlongest = word\n\t\t}\n\t}\n\n\treturn longest\n}",
"func longestPrefix(src *suffixarray.Index, dst []byte) (offset, length int) {\n\t// First the simple edge simple cases. Is it smaller than minCopy? Does\n\t// it have a prefix of at least minCopy?\n\tif len(dst) < minCopy {\n\t\treturn 0, -1\n\t}\n\n\t// If there's no prefix at all of at least length minCopy,\n\t// don't bother searching for one.\n\tif result := src.Lookup(dst[:minCopy], 1); len(result) == 0 {\n\t\treturn 0, -1\n\t}\n\n\t// If the entire dst exists somewhere in src, return the first\n\t// one found.\n\tif result := src.Lookup(dst, 1); len(result) > 0 {\n\t\treturn result[0], len(dst)\n\t}\n\n\t// We know there's a substring somewhere but the whole thing\n\t// isn't a substring, brute force the location of the longest\n\t// substring with a binary search of our suffix array.\n\tlength = -1\n\tminIdx := minCopy\n\tmaxIdx := len(dst)\n\tfor i := minIdx; maxIdx-minIdx > 1; i = ((maxIdx - minIdx) / 2) + minIdx {\n\t\tif result := src.Lookup(dst[:i], 1); result != nil {\n\t\t\toffset = result[0]\n\t\t\tlength = i\n\t\t\tminIdx = i\n\t\t} else {\n\t\t\tmaxIdx = i - 1\n\t\t}\n\t}\n\treturn\n}",
"func longestPalindrome(s string) string {\n\tll := len(s)\n\tif ll == 0 {\n\t\treturn \"\"\n\t}\n\n\tvar l, r, pl, pr int\n\tfor r < ll {\n\t\t// gobble up dup chars\n\t\tfor r+1 < ll && s[l] == s[r+1] {\n\t\t\t// log.Println(\"gobble\", string(s[l]), string(s[r+1]))\n\t\t\tr++\n\t\t}\n\t\t// find size of this palindrome\n\t\tfor l-1 >= 0 && r+1 < ll && s[l-1] == s[r+1] {\n\t\t\t// log.Println(\"find size of palindrome\", string(s[l-1]), string(s[r+1]))\n\t\t\tl--\n\t\t\tr++\n\t\t}\n\t\tif r-l > pr-pl {\n\t\t\tpl, pr = l, r\n\t\t}\n\t\t// reset to next mid point\n\t\tl = (l+r)/2 + 1\n\t\tr = l\n\t}\n\treturn s[pl : pr+1]\n}",
"func longestSubstringWithKDistinct(str []byte, K int) int {\n\tstart, max := 0, 0\n\tcount := make(map[byte]int)\n\n\tfor end := 0; end < len(str); end++ {\n\t\tcount[str[end]]++\n\n\t\tfor len(count) > K {\n\t\t\tcount[str[start]]--\n\t\t\tif count[str[start]] == 0 {\n\t\t\t\tdelete(count, str[start])\n\t\t\t}\n\t\t\tstart++\n\t\t}\n\t\tmax = getMax(max, end-start+1)\n\t}\n\treturn max\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewPodInformer creates a new instance of PodInformer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
func NewPodInformer(t mockConstructorTestingTNewPodInformer) *PodInformer {
mock := &PodInformer{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
|
[
"func newInformerWatchPod(ctx context.Context, c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller {\n\t_, controller := cache.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\toptions.FieldSelector = fields.SelectorFromSet(fields.Set{\"metadata.name\": podName}).String()\n\t\t\t\tobj, err := c.CoreV1().Pods(podNamespace).List(ctx, options)\n\t\t\t\treturn runtime.Object(obj), err\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\toptions.FieldSelector = fields.SelectorFromSet(fields.Set{\"metadata.name\": podName}).String()\n\t\t\t\treturn c.CoreV1().Pods(podNamespace).Watch(ctx, options)\n\t\t\t},\n\t\t},\n\t\t&v1.Pod{},\n\t\t0,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tp, ok := obj.(*v1.Pod)\n\t\t\t\tif ok {\n\t\t\t\t\tcheckPodStatusFunc(p)\n\t\t\t\t}\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\tp, ok := newObj.(*v1.Pod)\n\t\t\t\tif ok {\n\t\t\t\t\tcheckPodStatusFunc(p)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tp, ok := obj.(*v1.Pod)\n\t\t\t\tif ok {\n\t\t\t\t\tcheckPodStatusFunc(p)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\treturn controller\n}",
"func newInformerWatchPod(ctx context.Context, f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {\n\tns := f.Namespace.Name\n\tcheckPodRunning := func(p *v1.Pod) {\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tdefer ginkgo.GinkgoRecover()\n\n\t\tif p.Status.Phase == v1.PodRunning {\n\t\t\tif _, found := watchTimes[p.Name]; !found {\n\t\t\t\twatchTimes[p.Name] = metav1.Now()\n\t\t\t}\n\t\t}\n\t}\n\n\t_, controller := cache.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\toptions.LabelSelector = labels.SelectorFromSet(labels.Set{\"type\": podType}).String()\n\t\t\t\tobj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options)\n\t\t\t\treturn runtime.Object(obj), err\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\toptions.LabelSelector = labels.SelectorFromSet(labels.Set{\"type\": podType}).String()\n\t\t\t\treturn f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options)\n\t\t\t},\n\t\t},\n\t\t&v1.Pod{},\n\t\t0,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tp, ok := obj.(*v1.Pod)\n\t\t\t\tframework.ExpectEqual(ok, true)\n\t\t\t\tgo checkPodRunning(p)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\tp, ok := newObj.(*v1.Pod)\n\t\t\t\tframework.ExpectEqual(ok, true)\n\t\t\t\tgo checkPodRunning(p)\n\t\t\t},\n\t\t},\n\t)\n\treturn controller\n}",
"func (h *Handler) PodInformer() informerscore.PodInformer {\n\treturn h.informerFactory.Core().V1().Pods()\n}",
"func NewWatcher(client Client, syncPeriod, cleanupTimeout time.Duration, host string) Watcher {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &podWatcher{\n\t\tclient: client,\n\t\tcleanupTimeout: cleanupTimeout,\n\t\tsyncPeriod: syncPeriod,\n\t\tnodeFilter: k8s.QueryParam(\"fieldSelector\", \"spec.nodeName=\"+host),\n\t\tlastResourceVersion: \"0\",\n\t\tctx: ctx,\n\t\tstop: cancel,\n\t\tpods: make(map[string]*Pod),\n\t\tdeleted: make(map[string]time.Time),\n\t\tbus: bus.New(\"kubernetes\"),\n\t}\n}",
"func (amc *AppMonitorController) newAppMonitorControllerInformer() *AppMonitorControllerInformer {\n\tpodStore, podController := amc.newPodInformer()\n\tappMonitorStore, appMonitorController := amc.newAppMonitorInformer()\n\n\treturn &AppMonitorControllerInformer{\n\t\tpodStore: podStore,\n\t\tpodController: podController,\n\t\tappMonitorStore: appMonitorStore,\n\t\tappMonitorController: appMonitorController,\n\t}\n}",
"func NewCleanerMock(t minimock.Tester) *CleanerMock {\n\tm := &CleanerMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.NotifyAboutPulseMock = mCleanerMockNotifyAboutPulse{mock: m}\n\tm.NotifyAboutPulseMock.callArgs = []*CleanerMockNotifyAboutPulseParams{}\n\n\tm.StopMock = mCleanerMockStop{mock: m}\n\n\treturn m\n}",
"func NewDetachedNotifierMock(t minimock.Tester) *DetachedNotifierMock {\n\tm := &DetachedNotifierMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.NotifyMock = mDetachedNotifierMockNotify{mock: m}\n\tm.NotifyMock.callArgs = []*DetachedNotifierMockNotifyParams{}\n\n\treturn m\n}",
"func NewInformer(\n\tc *Controller,\n\tresourceFuncs k8scache.ResourceEventHandlerFuncs,\n\tinterval time.Duration,\n) (k8scache.Indexer, k8scache.Controller) {\n\tlistWatcher := k8scache.NewListWatchFromClient(\n\t\tc.ncr.StreamingV1alpha1().RESTClient(),\n\n\t\t// Plural name of the CRD\n\t\t\"natsstreamingclusters\",\n\n\t\t// Namespace where the clusters will be created.\n\t\tc.opts.Namespace,\n\t\tk8sfields.Everything(),\n\t)\n\treturn k8scache.NewIndexerInformer(\n\t\tlistWatcher,\n\t\t&stanv1alpha1.NatsStreamingCluster{},\n\n\t\t// How often it will poll for the state\n\t\t// of the resources.\n\t\tinterval,\n\n\t\t// Handlers\n\t\tresourceFuncs,\n\t\tk8scache.Indexers{},\n\t)\n}",
"func newPod(runtime *Runtime) (*Pod, error) {\n\tpod := new(Pod)\n\tpod.config = new(PodConfig)\n\tpod.config.ID = stringid.GenerateNonCryptoID()\n\tpod.config.Labels = make(map[string]string)\n\tpod.config.CreatedTime = time.Now()\n\tpod.config.InfraContainer = new(InfraContainerConfig)\n\tpod.state = new(podState)\n\tpod.runtime = runtime\n\n\treturn pod, nil\n}",
"func NewPodController(cfg PodControllerConfig) (*PodController, error) {\n\tif cfg.PodClient == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing core client\")\n\t}\n\tif cfg.EventRecorder == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing event recorder\")\n\t}\n\tif cfg.PodInformer == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing pod informer\")\n\t}\n\tif cfg.ConfigMapInformer == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing config map informer\")\n\t}\n\tif cfg.SecretInformer == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing secret informer\")\n\t}\n\tif cfg.ServiceInformer == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing service informer\")\n\t}\n\tif cfg.Provider == nil {\n\t\treturn nil, errdefs.InvalidInput(\"missing provider\")\n\t}\n\tif cfg.SyncPodsFromKubernetesRateLimiter == nil {\n\t\tcfg.SyncPodsFromKubernetesRateLimiter = workqueue.DefaultControllerRateLimiter()\n\t}\n\tif cfg.DeletePodsFromKubernetesRateLimiter == nil {\n\t\tcfg.DeletePodsFromKubernetesRateLimiter = workqueue.DefaultControllerRateLimiter()\n\t}\n\tif cfg.SyncPodStatusFromProviderRateLimiter == nil {\n\t\tcfg.SyncPodStatusFromProviderRateLimiter = workqueue.DefaultControllerRateLimiter()\n\t}\n\trm, err := manager.NewResourceManager(cfg.PodInformer.Lister(), cfg.SecretInformer.Lister(), cfg.ConfigMapInformer.Lister(), cfg.ServiceInformer.Lister())\n\tif err != nil {\n\t\treturn nil, pkgerrors.Wrap(err, \"could not create resource manager\")\n\t}\n\n\tpc := &PodController{\n\t\tclient: cfg.PodClient,\n\t\tpodsInformer: cfg.PodInformer,\n\t\tpodsLister: cfg.PodInformer.Lister(),\n\t\tprovider: cfg.Provider,\n\t\tresourceManager: rm,\n\t\tready: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t\trecorder: cfg.EventRecorder,\n\t\tpodEventFilterFunc: cfg.PodEventFilterFunc,\n\t}\n\n\tpc.syncPodsFromKubernetes = queue.New(cfg.SyncPodsFromKubernetesRateLimiter, \"syncPodsFromKubernetes\", pc.syncPodFromKubernetesHandler, cfg.SyncPodsFromKubernetesShouldRetryFunc)\n\tpc.deletePodsFromKubernetes = queue.New(cfg.DeletePodsFromKubernetesRateLimiter, \"deletePodsFromKubernetes\", pc.deletePodsFromKubernetesHandler, cfg.DeletePodsFromKubernetesShouldRetryFunc)\n\tpc.syncPodStatusFromProvider = queue.New(cfg.SyncPodStatusFromProviderRateLimiter, \"syncPodStatusFromProvider\", pc.syncPodStatusFromProviderHandler, cfg.SyncPodStatusFromProviderShouldRetryFunc)\n\n\treturn pc, nil\n}",
"func (m *MockNonNamespacedControllerInterface[T, TList]) Informer() cache.SharedIndexInformer {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Informer\")\n\tret0, _ := ret[0].(cache.SharedIndexInformer)\n\treturn ret0\n}",
"func newFakePod(httpServer bool) (*fakePod, error) {\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to bind: %v\", err)\n\t}\n\tf := &fakePod{ln: ln, http: httpServer}\n\n\t// spawn an http server or a TCP server that counts the number of connections received\n\tif httpServer {\n\t\tvar mu sync.Mutex\n\t\tvisitors := map[string]struct{}{}\n\t\tgo http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\tif _, ok := visitors[r.RemoteAddr]; !ok {\n\t\t\t\tatomic.AddInt64(&f.numConnection, 1)\n\t\t\t\tvisitors[r.RemoteAddr] = struct{}{}\n\t\t\t}\n\t\t}))\n\t} else {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tconn, err := ln.Accept()\n\t\t\t\tif err != nil {\n\t\t\t\t\t// exit when the listener is closed\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tatomic.AddInt64(&f.numConnection, 1)\n\t\t\t\t// handle request but not block\n\t\t\t\tgo func(c net.Conn) {\n\t\t\t\t\tdefer c.Close()\n\t\t\t\t\t// read but swallow the errors since the probe doesn't send data\n\t\t\t\t\tbuffer := make([]byte, 1024)\n\t\t\t\t\tc.Read(buffer)\n\t\t\t\t\t// respond\n\t\t\t\t\tconn.Write([]byte(\"Hi back!\\n\"))\n\t\t\t\t}(conn)\n\n\t\t\t}\n\t\t}()\n\t}\n\treturn f, nil\n\n}",
"func (m *MockControllerMeta) Informer() cache.SharedIndexInformer {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Informer\")\n\tret0, _ := ret[0].(cache.SharedIndexInformer)\n\treturn ret0\n}",
"func New(\n\tidpCache UpstreamLDAPIdentityProviderICache,\n\tclient pinnipedclientset.Interface,\n\tldapIdentityProviderInformer idpinformers.LDAPIdentityProviderInformer,\n\tsecretInformer corev1informers.SecretInformer,\n\twithInformer pinnipedcontroller.WithInformerOptionFunc,\n) controllerlib.Controller {\n\treturn newInternal(\n\t\tidpCache,\n\t\t// start with an empty cache\n\t\tupstreamwatchers.NewValidatedSettingsCache(),\n\t\t// nil means to use a real production dialer when creating objects to add to the cache\n\t\tnil,\n\t\tclient,\n\t\tldapIdentityProviderInformer,\n\t\tsecretInformer,\n\t\twithInformer,\n\t)\n}",
"func NewPodAnnotator(kube Interface, pod *kapi.Pod) Annotator {\n\treturn &podAnnotator{\n\t\tkube: kube,\n\t\tpod: pod,\n\t\tchanges: make(map[string]*action),\n\t}\n}",
"func newPodChaperons(c *MulticlusterV1alpha1Client, namespace string) *podChaperons {\n\treturn &podChaperons{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}",
"func New() *MockMailer {\n\treturn new(MockMailer)\n}",
"func NewPodMutator(log *zap.SugaredLogger, config *config.Config) admission.Handler {\n\tmutatorLog := log.Named(\"quarks-link-pod-mutator\")\n\tmutatorLog.Info(\"Creating a Pod mutator for QuarksLink\")\n\n\treturn &PodMutator{\n\t\tlog: mutatorLog,\n\t\tconfig: config,\n\t}\n}",
"func NewPodLoggingController(informerFactory informers.SharedInformerFactory) *PodLoggingController {\n\tpodInformer := informerFactory.Core().V1().Pods()\n\tlister := podInformer.Lister()\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second * 10)\n\t\t\tglog.Info(podInformer.Informer().HasSynced())\n\t\t\tpods, err := lister.List(labels.Everything())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfor _, p := range pods {\n\t\t\t\tglog.Infof(\"Pod: [%s]\", p.Name)\n\t\t\t}\n\t\t}\n\t}()\n\n\tc := &PodLoggingController{\n\t\tinformerFactory: informerFactory,\n\t\tpodInformer: podInformer,\n\t}\n\tpodInformer.Informer().AddEventHandler(\n\t\t// Your custom resource event handlers.\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t// Called on creation\n\t\t\tAddFunc: c.podAdd,\n\t\t\t// Called on resource update and every resyncPeriod on existing resources.\n\t\t\tUpdateFunc: c.podUpdate,\n\t\t\t// Called on resource deletion.\n\t\t\tDeleteFunc: c.podDelete,\n\t\t},\n\t)\n\treturn c\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetConfig parses the Postgresql connection settings from the environment. DEPRECATED: we just use the Postgresql env vars directly.
|
func GetConfig() (*pgx.ConnConfig, error) {
config, err := pgx.ParseConfig("")
if err != nil {
return nil, err
}
// pass pgconn struct
addAppname(&config.Config)
return config, nil
}
|
[
"func GetDBConfig(params ...string) (Configuration, error) {\n\tconfiguration := Configuration{}\n\tenv := \"dev\"\n\tif len(params) > 0 {\n\t\tenv = params[0]\n\t}\n\tfileName := fmt.Sprintf(\"./%s_config.json\", env)\n\terr := gonfig.GetConf(fileName, &configuration)\n\treturn configuration, err\n}",
"func (c *Configuration) GetPostgresConfig() string {\n\treturn fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=%d\",\n\t\tc.GetPostgresHost(),\n\t\tc.GetPostgresPort(),\n\t\tc.GetPostgresUser(),\n\t\tc.GetPostgresPassword(),\n\t\tc.GetPostgresDatabase(),\n\t\tc.GetPostgresSSLMode(),\n\t\tc.GetPostgresConnectionTimeout())\n}",
"func ConfigFromEnv() *ConnectionOptions {\n\thost := flag.String(\"database.host\", env.String(\"DATABASE_HOST\", \"localhost\"), \"PostgreSQL server host\")\n\tport := flag.Int(\"database.port\", env.Int(\"DATABASE_PORT\", 5432), \"PostgreSQL server port\")\n\tname := flag.String(\"database.name\", env.String(\"DATABASE_NAME\", \"symptomatic\"), \"PostgreSQL database name\")\n\tuser := flag.String(\"database.user\", env.String(\"DATABASE_USER\", \"symptomatic\"), \"PostgreSQL server user\")\n\tpassword := flag.String(\"database.password\", env.String(\"DATABASE_PASSWORD\", \"symptomatic\"), \"PostgreSQL server password\")\n\tsslMode := flag.Bool(\"database.ssl\", env.Bool(\"DATABASE_SSL\", false), \"PostgreSQL server ssl mode\")\n\n\treturn &ConnectionOptions{\n\t\tHost: *host,\n\t\tPort: *port,\n\t\tUser: *user,\n\t\tPassword: *password,\n\t\tDBName: *name,\n\t\tSSL: *sslMode,\n\t}\n}",
"func getDBConfig(prefix string) DatabaseConfigurations {\n\tvar c DatabaseConfigurations\n\terr := envconfig.Process(prefix, &c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif c.Host == \"\" || c.Port == 0 || c.User == \"\" || c.Password == \"\" || c.DBName == \"\" {\n\t\tlog.Fatalf(\"failed to get database configurations. Please set them in environments variables. It is now %v\", c)\n\t}\n\n\treturn c\n}",
"func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.PostgreSQLArgs, error) {\n\tpsqlTargets := make(map[string]target.PostgreSQLArgs)\n\tfor k, kv := range config.Merge(postgresKVS, target.EnvPostgresEnable, DefaultPostgresKVS) {\n\t\tenableEnv := target.EnvPostgresEnable\n\t\tif k != config.Default {\n\t\t\tenableEnv = enableEnv + config.Default + k\n\t\t}\n\n\t\tenabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\tqueueLimitEnv := target.EnvPostgresQueueLimit\n\t\tif k != config.Default {\n\t\t\tqueueLimitEnv = queueLimitEnv + config.Default + k\n\t\t}\n\n\t\tqueueLimit, err := strconv.Atoi(env.Get(queueLimitEnv, kv.Get(target.PostgresQueueLimit)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tformatEnv := target.EnvPostgresFormat\n\t\tif k != config.Default {\n\t\t\tformatEnv = formatEnv + config.Default + k\n\t\t}\n\n\t\tconnectionStringEnv := target.EnvPostgresConnectionString\n\t\tif k != config.Default {\n\t\t\tconnectionStringEnv = connectionStringEnv + config.Default + k\n\t\t}\n\n\t\ttableEnv := target.EnvPostgresTable\n\t\tif k != config.Default {\n\t\t\ttableEnv = tableEnv + config.Default + k\n\t\t}\n\n\t\tqueueDirEnv := target.EnvPostgresQueueDir\n\t\tif k != config.Default {\n\t\t\tqueueDirEnv = queueDirEnv + config.Default + k\n\t\t}\n\n\t\tmaxOpenConnectionsEnv := target.EnvPostgresMaxOpenConnections\n\t\tif k != config.Default {\n\t\t\tmaxOpenConnectionsEnv = maxOpenConnectionsEnv + config.Default + k\n\t\t}\n\n\t\tmaxOpenConnections, cErr := strconv.Atoi(env.Get(maxOpenConnectionsEnv, kv.Get(target.PostgresMaxOpenConnections)))\n\t\tif cErr != nil {\n\t\t\treturn nil, cErr\n\t\t}\n\n\t\tpsqlArgs := target.PostgreSQLArgs{\n\t\t\tEnable: enabled,\n\t\t\tFormat: env.Get(formatEnv, kv.Get(target.PostgresFormat)),\n\t\t\tConnectionString: env.Get(connectionStringEnv, kv.Get(target.PostgresConnectionString)),\n\t\t\tTable: env.Get(tableEnv, kv.Get(target.PostgresTable)),\n\t\t\tQueueDir: env.Get(queueDirEnv, kv.Get(target.PostgresQueueDir)),\n\t\t\tQueueLimit: uint64(queueLimit),\n\t\t\tMaxOpenConnections: maxOpenConnections,\n\t\t}\n\t\tif err = psqlArgs.Validate(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpsqlTargets[k] = psqlArgs\n\t}\n\n\treturn psqlTargets, nil\n}",
"func getPostgresConnString() string {\n\tconfig := config.New()\n\treturn fmt.Sprintf(\"postgres://%s:%s@%s:%d/%s\",\n\t\tconfig.Postgres.User, config.Postgres.Passwd, config.Postgres.Host, config.Postgres.Port, config.Postgres.DatabaseName)\n}",
"func dbConfig() *databaseConfig {\n\treturn &databaseConfig{\n\t\tclient: getEnv(\"DB_CLIENT\", \"postgres\"),\n\t\tuser: getEnv(\"DB_USER\", \"postgres\"),\n\t\tpassword: getEnv(\"DB_PASSWORD\", \"\"),\n\t\tdbName: getEnv(\"DB_NAME\", \"postgres\"),\n\t\thost: getEnv(\"DB_HOST\", \"localhost\"),\n\t\tport: getEnv(\"DB_PORT\", \"5432\"),\n\t}\n}",
"func (c *Config) GetPostgresSSLMode() string {\n\treturn c.v.GetString(varPostgresSSLMode)\n}",
"func (c Config) GetConnectionString() string {\n\treturn fmt.Sprintf(\"user=%s password=%s host=%s port=%d dbname=%s sslmode=%s\",\n\t\tc.PostgreSQLUserName,\n\t\tc.PostgreSQLPassword,\n\t\tc.PostgreSQLHost,\n\t\tc.PostgreSQLPort,\n\t\tc.PostgreSQLDBName,\n\t\tc.SSLMode)\n}",
"func getDatabaseConfig() *databaseConfig {\n\treturn &databaseConfig{\n\t\tDBPort: GetEnvConfig(\"db.port\"),\n\t\tDBDatabase: GetEnvConfig(\"db.database\"),\n\t\tDBHost: GetEnvConfig(\"db.host\"),\n\t\tDBUsername: GetEnvConfig(\"db.username\"),\n\t\tDBPassword: GetEnvConfig(\"db.password\"),\n\t}\n}",
"func PostgresConnectionString(sslmode string) string {\n\t// settings := Settings(path)\n\n\t// connection := []string{\n\t// \t\"host=\", settings[\"host\"], \" \",\n\t// \t\"password=\", settings[\"password\"], \" \",\n\t// \t\"user=\", settings[\"username\"], \" \",\n\t// \t\"dbname=\", settings[\"database\"], \" \",\n\t// \t\"sslmode=\", sslmode}\n\n\thost := os.Getenv(\"APP_HOST\")\n\tpassword := os.Getenv(\"PGDB_PASSWORD\")\n\tuser := os.Getenv(\"PGDB_USER\")\n\tdbname := os.Getenv(\"PG_DB\")\n\n\tconnection := []string{\n\t\t\"host=\", host, \" \",\n\t\t\"password=\", password, \" \",\n\t\t\"user=\", user, \" \",\n\t\t\"dbname=\", dbname, \" \",\n\t\t\"sslmode=\", sslmode}\n\n\treturn strings.Join(connection, \"\")\n}",
"func GetConfig() {\n\tgodotenv.Load()\n}",
"func DbConfig() {\n\tdb, e = gorm.Open(\"postgres\", \"host=192.168.77.40 port=5439 user=postgres password=testpassword dbname=postgres sslmode=disable\")\n\tif e != nil {\n\t\tfmt.Println(e)\n\t} else {\n\t\tfmt.Println(\"Connection Established\")\n\t}\n\tdb.SingularTable(true)\n}",
"func GetConfig() *xld.Config {\n\n\treturn &xld.Config{\n\t\tUser: viper.GetString(\"user\"),\n\t\tPassword: viper.GetString(\"password\"),\n\t\tHost: viper.GetString(\"host\"),\n\t\tPort: viper.GetString(\"port\"),\n\t\tContext: viper.GetString(\"context\"),\n\t\tScheme: viper.GetString(\"scheme\"),\n\t}\n\n}",
"func DBReplSetGetConfig() (bson.M, error) {\n\tresult := bson.M{}\n\terr := _instance.Session.Run(bson.D{{Name: \"replSetGetConfig\", Value: 1}}, &result)\n\treturn result, err\n}",
"func (p *postgresEmbedded) GetConnectionOptions() string {\n\t//return fmt.Sprintf(\"postgresql://127.0.0.1:%d/postgres?sslmode=disable\", p.Port)\n\treturn fmt.Sprintf(\"port=%d dbname=postgres sslmode=disable\", p.Port)\n}",
"func LoadPostgreSQLOptions() SQLOptions {\n\toptions := DefaultPostgreSQLOptions\n\n\t// postgresql container exposes port at 3306, if we're inside a container, we\n\t// need to use 3306 to connect to the postgresql server.\n\tif IsInsideContainer() {\n\t\toptions.Port = \"5432\"\n\t} else {\n\t\toptions.Host = \"127.0.0.1\"\n\t}\n\n\tif host, ok := os.LookupEnv(\"TEST_POSTGRESQL_HOST\"); ok {\n\t\toptions.Host = host\n\t}\n\tif val, ok := os.LookupEnv(\"TEST_POSTGRESQL_PORT\"); ok {\n\t\toptions.Port = val\n\t}\n\n\tif val, ok := os.LookupEnv(\"TEST_POSTGRESQL_DATABASE\"); ok {\n\t\toptions.Database = val\n\t}\n\n\tif val, ok := os.LookupEnv(\"TEST_POSTGRESQL_USERNAME\"); ok {\n\t\toptions.Username = val\n\t}\n\n\tif val, ok := os.LookupEnv(\"TEST_POSTGRESQL_PASSWORD\"); ok {\n\t\toptions.Password = val\n\t}\n\treturn options\n}",
"func Get(config *DatabaseConfig, configurators ...ConfigFunc) (*sql.DB, error) {\n\tif config == nil {\n\t\treturn nil, ErrNoConfig.New()\n\t}\n\n\tfor _, c := range configurators {\n\t\tconfig = c(config)\n\t}\n\n\tds, err := config.DataSourceName()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.Open(\"postgres\", ds)\n}",
"func LoadDbConfig() (dbConnectionInfo *DbConnInfo) {\n\tif len(os.Getenv(\"db_type\")) < 1 {\n\t\tdbConnectionInfo = &DbConnInfo{\n\t\t\tDBType: Cfg.Section(\"\").Key(\"db_type\").Value(),\n\t\t\tUser: Cfg.Section(\"\").Key(\"db_user\").Value(),\n\t\t\tPw: Cfg.Section(\"\").Key(\"db_pw\").Value(),\n\t\t\tDBName: Cfg.Section(\"\").Key(\"db_name\").Value(),\n\t\t\tHost: Cfg.Section(\"\").Key(\"db_host\").Value(),\n\t\t\tPort: Cfg.Section(\"\").Key(\"db_port\").Value(),\n\t\t}\n\t} else {\n\t\tdbConnectionInfo = &DbConnInfo{\n\t\t\tDBType: os.Getenv(\"db_type\"),\n\t\t\tUser: os.Getenv(\"db_user\"),\n\t\t\tPw: os.Getenv(\"db_pw\"),\n\t\t\tDBName: os.Getenv(\"db_name\"),\n\t\t\tHost: os.Getenv(\"db_host\"),\n\t\t\tPort: os.Getenv(\"db_port\"),\n\t\t}\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
addAppname adds the application name to the Postgresql connection parameters.
|
func addAppname(c *pgconn.Config) {
if name, ok := c.RuntimeParams["application_name"]; !ok || name == "" {
c.RuntimeParams["application_name"] = "app"
}
}
|
[
"func appendClientAppName(dst []byte, name string) ([]byte, error) {\n\tif name == \"\" {\n\t\treturn dst, nil\n\t}\n\n\tvar idx int32\n\tidx, dst = bsoncore.AppendDocumentElementStart(dst, \"application\")\n\n\tdst = bsoncore.AppendStringElement(dst, \"name\", name)\n\n\treturn bsoncore.AppendDocumentEnd(dst, idx)\n}",
"func ForApp(app app, key string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"postgres\", app.GetEnv(key))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tapp.DeferClose(db)\n\n\treturn db, nil\n}",
"func ConnectionName(connectionName string) func(app *App) {\n\treturn func(a *App) {\n\t\ta.config.connectionName = connectionName\n\t}\n}",
"func (s Scope) AddApp(name string, app *sysl.Application) {\n\tm := MakeValueMap()\n\ts[name] = m\n\tAddItemToValueMap(m, \"name\", MakeValueString(syslutil.GetAppName(app.Name)))\n\tAddItemToValueMap(m, \"attrs\", attrsToValueMap(app.Attrs))\n\tAddItemToValueMap(m, \"types\", typesToValueMap(app.Types))\n\tAddItemToValueMap(m, \"union\", unionToValueMap(app.Types))\n\tAddItemToValueMap(m, \"alias\", aliasToValueMap(app.Types))\n\tAddItemToValueMap(m, \"endpoints\", endpointsToValueMap(app.Endpoints))\n}",
"func NewConnectionFromApp(ctx context.Context, app string) (*pgx.Conn, error) {\n\tconfig, err := pgx.ParseConfig(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfig.RuntimeParams[\"application_name\"] = app\n\treturn pgx.ConnectConfig(context.Background(), config)\n}",
"func (f *FigFile) AddApplication(name string, a *FigApplication) {\n\tf.Applications[name] = a\n}",
"func (conf *Conf) addPostgresEnv(env []v1.EnvVar) []v1.EnvVar {\n\tenv = append(env, v1.EnvVar{\n\t\tName: \"PG_HOST\",\n\t\tValue: conf.Postgres.Host,\n\t})\n\n\tenv = append(env, v1.EnvVar{\n\t\tName: \"PG_PORT\",\n\t\tValue: fmt.Sprintf(\"%d\", conf.Postgres.Port),\n\t})\n\n\tenv = append(env, v1.EnvVar{\n\t\tName: \"PG_USER\",\n\t\tValue: conf.Postgres.Username,\n\t})\n\n\tenv = append(env, v1.EnvVar{\n\t\tName: \"PG_PASS\",\n\t\tValue: conf.Postgres.Password,\n\t})\n\n\treturn env\n}",
"func (p *Parser) addDatabase(dt cfg.DatabasesType) error {\n\n\tif len(strings.TrimSpace(dt.Name)) == 0 {\n\t\treturn fmt.Errorf(\"Data name is empty\")\n\t}\n\n\tif _, exist := p.dbs[dt.Name]; exist {\n\t\treturn fmt.Errorf(\"Data name `%+s` is not unique\", dt.Name)\n\t}\n\n\t//getting info about which queries are to be executed\n\texecQrs := []string{}\n\tfor _, q := range dt.QueryToExecute {\n\t\texecQrs = append(execQrs, q.QueryName)\n\t}\n\n\t// adding database to databases map\n\tp.dbs[dt.Name] = &dtype.Database{\n\t\tDriver: dt.Driver,\n\t\tHost: dt.DriverOption.Host,\n\t\tPort: dt.DriverOption.Port,\n\t\tUsername: dt.DriverOption.Username,\n\t\tPassword: dt.DriverOption.Password,\n\t\tDBName: dt.DriverOption.DbName,\n\t\tSelectDB: dt.SelectDb,\n\t\tActive: false,\n\t\tQrsToExec: execQrs,\n\t\tExecutor: executor.NewExecutor(),\n\t}\n\n\treturn nil\n}",
"func (o *InstallOptions) AddApp(app string, version string, repository string, username string, password string,\n\treleaseName string, valuesFiles []string, setValues []string, alias string, helmUpdate bool) error {\n\tinspectChartFunc := o.createInspectChartFn(version, app, repository, username, password, releaseName, setValues,\n\t\talias, helmUpdate)\n\to.valuesFiles = valuesFiles\n\terr := helm.InspectChart(app, version, repository, username, password, o.Helmer, inspectChartFunc)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func newApp(config Config, dbConfig postgres.Config) *app {\n\treturn &app{\n\t\tconfig: config,\n\t\tdbConfig: dbConfig,\n\t}\n}",
"func urlParserPostgreSQL(cd *ConnectionDetails) error {\n\tconf, err := pgconn.ParseConfig(cd.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcd.Database = conf.Database\n\tcd.Host = conf.Host\n\tcd.User = conf.User\n\tcd.Password = conf.Password\n\tcd.Port = fmt.Sprintf(\"%d\", conf.Port)\n\n\toptions := []string{\"fallback_application_name\"}\n\tfor i := range options {\n\t\tif opt, ok := conf.RuntimeParams[options[i]]; ok {\n\t\t\tcd.setOption(options[i], opt)\n\t\t}\n\t}\n\n\tif conf.TLSConfig == nil {\n\t\tcd.setOption(\"sslmode\", \"disable\")\n\t}\n\n\treturn nil\n}",
"func PostgresConnectionString(sslmode string) string {\n\t// settings := Settings(path)\n\n\t// connection := []string{\n\t// \t\"host=\", settings[\"host\"], \" \",\n\t// \t\"password=\", settings[\"password\"], \" \",\n\t// \t\"user=\", settings[\"username\"], \" \",\n\t// \t\"dbname=\", settings[\"database\"], \" \",\n\t// \t\"sslmode=\", sslmode}\n\n\thost := os.Getenv(\"APP_HOST\")\n\tpassword := os.Getenv(\"PGDB_PASSWORD\")\n\tuser := os.Getenv(\"PGDB_USER\")\n\tdbname := os.Getenv(\"PG_DB\")\n\n\tconnection := []string{\n\t\t\"host=\", host, \" \",\n\t\t\"password=\", password, \" \",\n\t\t\"user=\", user, \" \",\n\t\t\"dbname=\", dbname, \" \",\n\t\t\"sslmode=\", sslmode}\n\n\treturn strings.Join(connection, \"\")\n}",
"func RegisterApplication(name string) {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tif appname != \"\" {\n\t\tpanic(\"resource: application name already registered\")\n\t}\n\tappname = name\n}",
"func genDBName(pe plugins.ProjectExtension) string {\n\treturn fmt.Sprintf(\"%s_%s\", pe.Project.Slug, pe.Environment)\n}",
"func WithDBName(databaseName string) func(*MGO) error {\n\treturn func(m *MGO) error {\n\t\tm.databaseName = databaseName\n\t\treturn nil\n\t}\n}",
"func (s *Session) resetApplicationName(appName string) {\n\ts.ApplicationName = appName\n\tif s.sqlStats != nil {\n\t\ts.appStats = s.sqlStats.getStatsForApplication(appName)\n\t}\n}",
"func setupPostgresql(ctx context.Context, ns string) harbormetav1.PostgresConnectionWithParameters {\n\tpgName := newName(\"pg\")\n\tpgPasswordName := newName(\"pg-password\")\n\n\tExpect(k8sClient.Create(ctx, &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\tName: \"http\",\n\t\t\t\tPort: 5432,\n\t\t\t}},\n\t\t},\n\t})).To(Succeed())\n\n\tExpect(k8sClient.Create(ctx, &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgPasswordName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tStringData: map[string]string{\n\t\t\tharbormetav1.PostgresqlPasswordKey: \"th3Adm!nPa$$w0rd\",\n\t\t},\n\t\tType: harbormetav1.SecretTypePostgresql,\n\t})).To(Succeed())\n\n\tExpect(k8sClient.Create(ctx, &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pgName,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"pod-selector\": pgName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"pod-selector\": pgName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tVolumes: []corev1.Volume{{\n\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tName: \"database\",\n\t\t\t\t\t\tImage: \"postgres\",\n\t\t\t\t\t\tEnv: []corev1.EnvVar{{\n\t\t\t\t\t\t\tName: \"POSTGRES_PASSWORD\",\n\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: pgPasswordName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tKey: harbormetav1.PostgresqlPasswordKey,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: 5432,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{{\n\t\t\t\t\t\t\tMountPath: \"/var/lib/postgresql/data\",\n\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})).To(Succeed())\n\n\treturn harbormetav1.PostgresConnectionWithParameters{\n\t\tPostgresConnection: harbormetav1.PostgresConnection{\n\t\t\tPostgresCredentials: harbormetav1.PostgresCredentials{\n\t\t\t\tPasswordRef: pgPasswordName,\n\t\t\t\tUsername: \"postgres\",\n\t\t\t},\n\t\t\tDatabase: \"postgresql\",\n\t\t\tHosts: []harbormetav1.PostgresHostSpec{{\n\t\t\t\tHost: pgName,\n\t\t\t\tPort: 5432,\n\t\t\t}},\n\t\t},\n\t\tParameters: map[string]string{\n\t\t\tharbormetav1.PostgresSSLModeKey: string(harbormetav1.PostgresSSLModeRequire),\n\t\t},\n\t}\n}",
"func (s *store) AddApps(\n\tctx context.Context,\n\tapps []string,\n) error {\n\tconst queryHead = `INSERT INTO apps(name) VALUES `\n\n\ttx, err := s.db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer tx.Rollback()\n\n\tqueryParts := make([]string, len(apps))\n\targs := make([]interface{}, len(apps))\n\n\tfor i, a := range apps {\n\t\tqueryParts[i] = fmt.Sprintf(\"($%d)\", i+1)\n\t\targs[i] = strings.ToLower(a)\n\t}\n\n\tquery := queryHead + strings.Join(queryParts, \",\")\n\n\tif _, err = tx.ExecContext(ctx, query, args...); err != nil {\n\t\treturn err\n\t}\n\n\treturn tx.Commit()\n}",
"func LoadByName(ctx context.Context, db gorp.SqlExecutor, projectKey, appName string, opts ...LoadOptionFunc) (*sdk.Application, error) {\n\tquery := gorpmapping.NewQuery(`\n\t\tSELECT application.*\n\t\tFROM application\n\t\tJOIN project ON project.id = application.project_id\n\t\tWHERE project.projectkey = $1\n\t\tAND application.name = $2`).Args(projectKey, appName)\n\treturn get(ctx, db, projectKey, query, opts...)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewConnectionFromApp makes a new Postgresql connection using default PG environment variables. It will add (potentially overriding) the provided application name to the connection settings to facilitate debugging. Prefer this function for helper tools that make direct database connections.
|
func NewConnectionFromApp(ctx context.Context, app string) (*pgx.Conn, error) {
config, err := pgx.ParseConfig("")
if err != nil {
return nil, err
}
config.RuntimeParams["application_name"] = app
return pgx.ConnectConfig(context.Background(), config)
}
|
[
"func newApp(config Config, dbConfig postgres.Config) *app {\n\treturn &app{\n\t\tconfig: config,\n\t\tdbConfig: dbConfig,\n\t}\n}",
"func ForApp(app app, key string) (*sql.DB, error) {\n\tdb, err := sql.Open(\"postgres\", app.GetEnv(key))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tapp.DeferClose(db)\n\n\treturn db, nil\n}",
"func NewAppDBPool(config *AppConfig) *gorm.DB {\n\tdbstr := fmt.Sprintf(\n\t\t`host=%s port=%s user=%s password=%s dbname=%s sslmode=disable`,\n\t\tconfig.DatabaseHost,\n\t\tconfig.DatabasePort,\n\t\tconfig.DatabaseUser,\n\t\tconfig.DatabasePass,\n\t\tconfig.DatabaseName,\n\t)\n\n\tlogLevel := logger.Info\n\tif os.Getenv(\"PROD\") == \"1\" {\n\t\tlogLevel = logger.Silent\n\t}\n\tdb, err := gorm.Open(postgres.New(postgres.Config{\n\t\tDSN: dbstr,\n\t\tPreferSimpleProtocol: true,\n\t}), &gorm.Config{\n\t\tLogger: logger.Default.LogMode(logLevel),\n\t})\n\n\tsqlDB, err := db.DB()\n\tsqlDB.SetMaxIdleConns(1)\n\tsqlDB.SetMaxOpenConns(4)\n\tsqlDB.SetConnMaxLifetime(time.Hour)\n\n\tif err != nil {\n\t\tpanic(\"Something is wrong with database\")\n\t}\n\n\treturn db\n}",
"func NewPostgresqlConnection() *gorm.DB {\n\tconnectionAdapter := os.Getenv(\"db_connection_adapter\")\n\tusername := os.Getenv(\"db_user\")\n\tpassword := os.Getenv(\"db_pass\")\n\tdbName := os.Getenv(\"db_name\")\n\tdbHost := os.Getenv(\"db_host\")\n\tdbPort := os.Getenv(\"db_port\")\n\n\tif connectionAdapter == \"gcp\" {\n\t\t// Connect using Google Cloud Platform Postgres SQL service\n\t\treturn adapters.CreatePostgresDBConnection(dbHost, dbPort, username, password, dbName)\n\t} else {\n\t\treturn CreatePostgresDBConnection(dbHost, dbPort, username, password, dbName) //Build connection string\n\t}\n}",
"func NewPGConnection(dataSource string) (*sql.DB, func()) {\n\tlog.Println(\"env value: \" + dataSource)\n\tdb, err := sql.Open(\"postgres\", dataSource)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := db.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db, func() {\n\t\terr := db.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to close DB by error\", err)\n\t\t}\n\t}\n}",
"func NewApplication(testing bool) (*Application, error) {\n\tu, err := libunix.CurrentUser()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dbname string\n\tif testing {\n\t\tdbname = \"forty_thieves_test\"\n\t} else {\n\t\tdbname = \"forty_thieves\"\n\t}\n\tdsn := libenv.EnvWithDefault(\n\t\t\"DSN\", fmt.Sprintf(\"postgres://%v@localhost:5432/%s?sslmode=disable\", u, dbname))\n\n\tdb, err := sqlx.Connect(\"postgres\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcookieStoreSecret := libenv.EnvWithDefault(\"COOKIE_SECRET\", \"ittwiP92o0oi6P4i\")\n\n\tapp := &Application{}\n\tapp.dsn = dsn\n\tapp.db = db\n\tapp.sessionStore = sessions.NewCookieStore([]byte(cookieStoreSecret))\n\n\treturn app, err\n}",
"func (c *PGDBContext) newConnection(ctx context.Context) (*sqlx.DB, error) {\n\treturn sqlx.ConnectContext(ctx, \"postgres\", c.connectionString)\n}",
"func addAppname(c *pgconn.Config) {\n\tif name, ok := c.RuntimeParams[\"application_name\"]; !ok || name == \"\" {\n\t\tc.RuntimeParams[\"application_name\"] = \"app\"\n\t}\n}",
"func NewAppConfig() *AppConfig {\n\tif os.Getenv(\"PROD\") != \"1\" {\n\t\terr := godotenv.Load()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error loading .env file\")\n\t\t}\n\t}\n\n\tconfig := new(AppConfig)\n\tconfig.DatabaseHost = os.Getenv(\"DB_HOST\")\n\tconfig.DatabasePort = os.Getenv(\"DB_PORT\")\n\tconfig.DatabaseUser = os.Getenv(\"DB_USER\")\n\tconfig.DatabasePass = os.Getenv(\"DB_PASS\")\n\tconfig.DatabaseName = os.Getenv(\"DB_NAME\")\n\tconfig.AppPort = os.Getenv(\"APP_PORT\")\n\n\tif config.DatabaseHost == \"\" {\n\t\tconfig.DatabaseHost = \"db\"\n\t}\n\n\tif config.DatabasePort == \"\" {\n\t\tconfig.DatabasePort = \"5432\"\n\t}\n\n\tif config.DatabaseUser == \"\" {\n\t\tconfig.DatabaseUser = \"postgres\"\n\t}\n\n\tif config.DatabaseName == \"\" {\n\t\tconfig.DatabaseName = \"locexercise\"\n\t}\n\n\tif config.AppPort == \"\" {\n\t\tconfig.AppPort = \"5678\"\n\t}\n\n\treturn config\n}",
"func CreateAppEnvironment() AppEnvironment {\n\tosEnvironmentVariableCheck()\n\tisProductionEnv := IsProductionEnv()\n\tlogger := log.New(os.Stdout, \"[app] \", log.LstdFlags)\n\tlogger.SetFlags(log.Lshortfile)\n\n\tif !isProductionEnv{\n\t\tlogger.Println(\"STARTING APP :: LOCAL DEV ENVIRONMENT\")\n\t} else {\n\t\tlogger.Println(\"STARTING APP :: PRODUCTION ENVIRONMENT\")\n\t}\n\n\n\tdbString := os.Getenv(DBUrlEnvironmentVariable)\n\tvar DB *gorm.DB\n\tvar err error\n\n\tlogger.Println(\"API initializing connection to database\")\n\t// repeatedly ping DB until connection is established\n\tfor i := 0; i < 10; i++ {\n\t\tDB, err = gorm.Open(postgres.Open(dbString), &gorm.Config{})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlogger.Printf(\"Failed to connect to db on %s , retrying...\", dbString)\n\t\ttime.Sleep(6 * time.Second)\n\t}\n\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tlogger.Println(\"API connected to database\")\n\n\tport := os.Getenv(PostEnvironmentVariable)\n\n\treturn AppEnvironment{\n\t\tDb: models.AppDB { DB: DB},\n\t\tLogger: logger,\n\t\tPort: port,\n\t\tIsProductionEnv: isProductionEnv,\n\t}\n\n}",
"func dbConnect() (*pgx.ConnPool, error) {\n\tport, err := strconv.ParseUint(os.Getenv(\"PG_PORT\"), 10, 16)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Can't cast %q to uint16: %v\", os.Getenv(\"PG_PORT\"), err)\n\t}\n\n\treturn pgx.NewConnPool(pgx.ConnPoolConfig{\n\t\tConnConfig: pgx.ConnConfig{\n\t\t\tHost: os.Getenv(\"PG_HOST\"),\n\t\t\tPort: uint16(port),\n\t\t\tUser: os.Getenv(\"PG_USER\"),\n\t\t\tPassword: os.Getenv(\"PG_PASSWORD\"),\n\t\t\tDatabase: os.Getenv(\"PG_DATABASE\"),\n\t\t},\n\t\tAcquireTimeout: time.Second,\n\t\tMaxConnections: 4,\n\t})\n}",
"func PostgresConnectionString(sslmode string) string {\n\t// settings := Settings(path)\n\n\t// connection := []string{\n\t// \t\"host=\", settings[\"host\"], \" \",\n\t// \t\"password=\", settings[\"password\"], \" \",\n\t// \t\"user=\", settings[\"username\"], \" \",\n\t// \t\"dbname=\", settings[\"database\"], \" \",\n\t// \t\"sslmode=\", sslmode}\n\n\thost := os.Getenv(\"APP_HOST\")\n\tpassword := os.Getenv(\"PGDB_PASSWORD\")\n\tuser := os.Getenv(\"PGDB_USER\")\n\tdbname := os.Getenv(\"PG_DB\")\n\n\tconnection := []string{\n\t\t\"host=\", host, \" \",\n\t\t\"password=\", password, \" \",\n\t\t\"user=\", user, \" \",\n\t\t\"dbname=\", dbname, \" \",\n\t\t\"sslmode=\", sslmode}\n\n\treturn strings.Join(connection, \"\")\n}",
"func PostgresConnect(config *Config) *sql.DB {\n\n\tdataSourceName := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\tconfig.DB.Host, config.DB.Port, config.DB.User, config.DB.Password, config.DB.Name)\n\tdb, err := sql.Open(\"postgres\", dataSourceName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err = db.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcloser.Bind(func() {\n\t\tlogrus.Info(\"Closing database connection\")\n\t\tdb.Close()\n\t})\n\n\treturn db\n}",
"func NewDBFromEnv() *sqlx.DB {\n\tcfg := getDataFromEnv()\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"db_user\": cfg.DBUser,\n\t\t\"db_host\": cfg.DBHost,\n\t\t\"db_port\": cfg.DBPort,\n\t\t\"db_name\": cfg.DBName,\n\t}).Info(\"Establishing a new database connection\")\n\n\tdb, err := sql.Open(\"instrumented-postgres\", getConnString(cfg))\n\tif err != nil {\n\t\tlogrus.WithError(err).Panic(\"Cannot open driver with connection string\")\n\t}\n\n\tdbx := sqlx.NewDb(db, \"postgres\")\n\n\tif err := dbx.Ping(); err != nil {\n\t\tlogrus.WithError(err).Fatal(\"Cannot ping database\")\n\t}\n\n\treturn dbx\n}",
"func getPostgresConnString() string {\n\tconfig := config.New()\n\treturn fmt.Sprintf(\"postgres://%s:%s@%s:%d/%s\",\n\t\tconfig.Postgres.User, config.Postgres.Passwd, config.Postgres.Host, config.Postgres.Port, config.Postgres.DatabaseName)\n}",
"func Connect(ctx context.Context, connString string, applicationName string) (*pgxpool.Pool, error) {\n\t// we are going to use pgx for more control over connection pool and\n\t// and a cleaner api around bulk inserts\n\tcfg, err := pgxpool.ParseConfig(connString)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse ConnString: %v\", err)\n\t}\n\tconst appnameKey = `application_name`\n\tparams := cfg.ConnConfig.RuntimeParams\n\tif _, ok := params[appnameKey]; !ok {\n\t\tparams[appnameKey] = applicationName\n\t}\n\n\tpool, err := pgxpool.ConnectConfig(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create ConnPool: %v\", err)\n\t}\n\n\tif err := prometheus.Register(poolstats.NewCollector(pool, applicationName)); err != nil {\n\t\tzlog.Info(ctx).Msg(\"pool metrics already registered\")\n\t}\n\n\treturn pool, nil\n}",
"func PostgresConnect() (err error) {\n\tpostgres, err = sql.Open(\"postgres\", fmt.Sprintf(\n\t\t\"host=%s user=%s dbname=%s password=%s port=%d sslmode=%s\",\n\t\tviper.GetString(`postgres.host`),\n\t\tviper.GetString(`postgres.user`),\n\t\tviper.GetString(`postgres.dbname`),\n\t\tviper.GetString(`postgres.password`),\n\t\tviper.GetInt(`postgres.port`),\n\t\t\"disable\",\n\t))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sql open connect\")\n\t}\n\tpostgres.SetMaxOpenConns(viper.GetInt(`postgres.maxConnect`))\n\tpostgres.SetMaxIdleConns(viper.GetInt(`postgres.maxConnect`))\n\tpostgres.SetConnMaxLifetime(time.Duration(50) * time.Second)\n\n\tif err = postgres.Ping(); err != nil {\n\t\treturn errors.Wrap(err, \"db ping\")\n\t}\n\n\treturn nil\n}",
"func OpenPgxAsConn(\n\tctx context.Context, connectString string, options ...Option,\n) (*pgx.Conn, func(), error) {\n\treturn openPgx(ctx, connectString, options,\n\t\tfunc(ctx *stopper.Context, cfg *pgxpool.Config) (*pgx.Conn, func() error, error) {\n\t\t\timpl, err := pgx.ConnectConfig(ctx, cfg.ConnConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.WithStack(err)\n\t\t\t}\n\t\t\tcloseDB := func() error { return impl.Close(context.Background()) }\n\t\t\treturn impl, closeDB, nil\n\t\t})\n}",
"func ConfigFromEnv() *ConnectionOptions {\n\thost := flag.String(\"database.host\", env.String(\"DATABASE_HOST\", \"localhost\"), \"PostgreSQL server host\")\n\tport := flag.Int(\"database.port\", env.Int(\"DATABASE_PORT\", 5432), \"PostgreSQL server port\")\n\tname := flag.String(\"database.name\", env.String(\"DATABASE_NAME\", \"symptomatic\"), \"PostgreSQL database name\")\n\tuser := flag.String(\"database.user\", env.String(\"DATABASE_USER\", \"symptomatic\"), \"PostgreSQL server user\")\n\tpassword := flag.String(\"database.password\", env.String(\"DATABASE_PASSWORD\", \"symptomatic\"), \"PostgreSQL server password\")\n\tsslMode := flag.Bool(\"database.ssl\", env.Bool(\"DATABASE_SSL\", false), \"PostgreSQL server ssl mode\")\n\n\treturn &ConnectionOptions{\n\t\tHost: *host,\n\t\tPort: *port,\n\t\tUser: *user,\n\t\tPassword: *password,\n\t\tDBName: *name,\n\t\tSSL: *sslMode,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewPool starts a new Postgresql pool using connection parameters defined in the environment. Prefer this pool function for the main database backend.
|
func NewPool(ctx context.Context) (*pgxpool.Pool, error) {
config, err := pgxpool.ParseConfig("")
if err != nil {
return nil, err
}
// pass pgconn struct
//addAppname(&config.ConnConfig.Config)
return pgxpool.ConnectConfig(context.Background(), config)
}
|
[
"func NewPool(db string, size int) *Pool {\n\td, err := gorm.Open(dialect, db)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpool := &Pool{\n\t\tdb: d,\n\t}\n\n\tif size <= 0 {\n\t\tsize = poolSize\n\t} else if size > poolMaxSize {\n\t\tsize = poolMaxSize\n\t}\n\n\tpool.db.DB().SetMaxIdleConns(size)\n\tpool.db.DB().SetMaxOpenConns(size << 1)\n\n\treturn pool\n}",
"func NewPool(uri string, schema Schema, opts Options) *Pool {\n\tready := make(chan struct{})\n\tretry := make(chan struct{}, 1)\n\tctx, cancel := context.WithCancel(context.Background())\n\tp := &Pool{\n\t\tretry: retry,\n\t\topts: opts,\n\t\tcancel: cancel,\n\t\tready: ready,\n\t}\n\tif opts.PrepareConn != nil {\n\t\tp.inited = make(map[*sqlite.Conn]struct{})\n\t}\n\tgo func() {\n\t\tdefer close(ready)\n\t\tdefer cancel()\n\t\tp.pool, p.err = p.open(ctx, uri, schema)\n\t\tif p.err != nil {\n\t\t\topts.OnError.call(p.err)\n\t\t}\n\t}()\n\treturn p\n}",
"func NewConnPool(db *pg.DB) adapter.ConnPool {\n\treturn &connPool{db}\n}",
"func NewPool(o *Options) (*Pool, error) {\n\t//create pool\n\tpool := redis.NewPool(func() (redis.Conn, error) {\n\t\tconn, err := redis.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", o.Host, o.Port), redis.DialDatabase(o.DBName))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"ERROR: fail init redis: %s\", err.Error())\n\t\t}\n\t\treturn conn, err\n\t}, 30000)\n\n\t// adding parameters.\n\tpool.MaxActive = 29500\n\tpool.Wait = true\n\n\t//ping\n\tif err := ping(pool.Get()); err != nil {\n\t\treturn nil, errors.Wrap(err, \"ping redis db error\")\n\t}\n\n\treturn &Pool{pool}, nil\n}",
"func NewPool(connString string) (*Pool, error) {\n\tconfig, err := ParsePoolConfig(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewPoolConfig(config)\n}",
"func NewPool() *Pool {\n\treturn &Pool{\n\t\tperHost: make(map[string]*ring.Ring),\n\t\tsem: semaphore.NewWeighted(1),\n\t}\n}",
"func NewConnPool(mysqlDBConf Config) (*sqlx.DB, error) {\n\tdsn := mysqlDBConf.User + \":\" +\n\t\tmysqlDBConf.Password + \"@\" +\n\t\tmysqlDBConf.Protocol + \"(\" +\n\t\tmysqlDBConf.Host + \":\" +\n\t\tmysqlDBConf.Port + \")/\" +\n\t\tmysqlDBConf.DbName + \"?parseTime=true&multiStatements=true\"\n\n\tpool, err := sqlx.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewConnPool: sqlx.Open %v\", err)\n\t}\n\n\terrP := pool.Ping()\n\tif errP != nil {\n\t\treturn nil, fmt.Errorf(\"NewConnPool: pool.Ping %v\", errP)\n\t}\n\n\treturn pool, nil\n}",
"func NewPool(concurrency int) *Pool {\n\treturn &Pool{\n\t\tconcurrency: concurrency,\n\t\ttasksChan: make(chan *task, concurrency),\n\t}\n}",
"func NewPool() *Pool {\n\treturn &Pool{\n\t\tcache: map[string]string{},\n\t}\n}",
"func newPool(d Info) (error, *redis.Pool) {\n\tc, err := redis.Dial(\"tcp\", DSN_Redis(d.Redis))\n\treturn err, &redis.Pool{\n\t\tMaxIdle: 80,\n\t\tMaxActive: 12000, // max number of connections\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn c, err\n\t\t},\n\t}\n}",
"func (hdb *HostDB) NewPool(filesize uint64, duration types.BlockHeight) (HostPool, error) {\n\thdb.mu.RLock()\n\tdefer hdb.mu.RUnlock()\n\tif hdb.isEmpty() {\n\t\treturn nil, errors.New(\"HostDB is empty\")\n\t}\n\treturn &pool{\n\t\tfilesize: filesize,\n\t\tduration: duration,\n\t\thdb: hdb,\n\t}, nil\n}",
"func NewPool() *Pool {\n\treturn &Pool{\n\t\tworkers: []*Worker{},\n\t\twg: &sync.WaitGroup{},\n\t}\n}",
"func NewPool(config etc.RedisPool) (pool *redis.Pool, err error) {\n\tconfigURL, err := url.Parse(config.URL)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"invalid redis URL: %s\", err)\n\t\treturn\n\t}\n\n\tswitch configURL.Scheme {\n\tcase \"redis\":\n\t\tpool = newInstancePool(config)\n\tcase \"redis+sentinel\":\n\t\treturn newSentinelPool(configURL, config)\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid redis URL scheme: %s\", configURL.Scheme)\n\t}\n\treturn\n}",
"func NewPGConnection(dataSource string) (*sql.DB, func()) {\n\tlog.Println(\"env value: \" + dataSource)\n\tdb, err := sql.Open(\"postgres\", dataSource)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := db.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db, func() {\n\t\terr := db.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to close DB by error\", err)\n\t\t}\n\t}\n}",
"func New(ctx context.Context, connString string) (*Pool, error) {\n\tconfig, err := ParseConfig(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWithConfig(ctx, config)\n}",
"func NewPool(get func(arg interface{}) (io.Closer, error)) *Pool {\n\treturn &Pool{\n\t\tget: get,\n\t\titems: make(map[interface{}]*poolItem),\n\t}\n}",
"func NewPool(_ testing.TB, addr string) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 2,\n\t\tMaxActive: 10,\n\t\tIdleTimeout: time.Minute,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn redis.Dial(\"tcp\", addr)\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n}",
"func newPool(address string) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 360 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.Dial(\"tcp\", address) }}\n}",
"func NewAppDBPool(config *AppConfig) *gorm.DB {\n\tdbstr := fmt.Sprintf(\n\t\t`host=%s port=%s user=%s password=%s dbname=%s sslmode=disable`,\n\t\tconfig.DatabaseHost,\n\t\tconfig.DatabasePort,\n\t\tconfig.DatabaseUser,\n\t\tconfig.DatabasePass,\n\t\tconfig.DatabaseName,\n\t)\n\n\tlogLevel := logger.Info\n\tif os.Getenv(\"PROD\") == \"1\" {\n\t\tlogLevel = logger.Silent\n\t}\n\tdb, err := gorm.Open(postgres.New(postgres.Config{\n\t\tDSN: dbstr,\n\t\tPreferSimpleProtocol: true,\n\t}), &gorm.Config{\n\t\tLogger: logger.Default.LogMode(logLevel),\n\t})\n\n\tsqlDB, err := db.DB()\n\tsqlDB.SetMaxIdleConns(1)\n\tsqlDB.SetMaxOpenConns(4)\n\tsqlDB.SetConnMaxLifetime(time.Hour)\n\n\tif err != nil {\n\t\tpanic(\"Something is wrong with database\")\n\t}\n\n\treturn db\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parseRecord parses one line of input using regex and returns device.Record and error, if no match was found
|
func (p *parser) parseRecord(input []byte) (device.Record, error) {
matches := p.re.FindSubmatch(input)
if matches == nil {
return device.Record{}, fmt.Errorf("parseRecord: invalid record: id=%s, input=%s", Id, input)
}
return device.Record{
Port: string(matches[1]),
Description: p.sanitize(matches[2]),
VLAN: p.sanitize(matches[3]),
}, nil
}
|
[
"func ParseRecord(rawRec []byte) (rec *Record, err error) {\n\n\trec = new(Record)\n\n\trec.Leader.Text = string(rawRec[:24])\n\n\tdir, err := parseDirectory(rawRec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseDataAddress, err := toInt(rawRec[12:17])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trec.Controlfields, err = extractControlfields(rawRec, baseDataAddress, dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trec.Datafields, err = extractDatafields(rawRec, baseDataAddress, dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rec, nil\n}",
"func parse(buf []byte) (out record, err error) {\n\tif len(buf) < recordHeaderSize {\n\t\treturn out, Error.New(\"record buf not big enough for header\")\n\t}\n\n\tout.version = int8(buf[0])\n\tif out.version != recordVersion {\n\t\treturn out, Error.New(\"unknown record header version: %d\", out.version)\n\t}\n\n\tout.kind = recordKind(buf[1])\n\tout.start = int64(binary.BigEndian.Uint64(buf[2:10]))\n\tout.end = int64(binary.BigEndian.Uint64(buf[10:18]))\n\tout.size = binary.BigEndian.Uint16(buf[18:20])\n\n\tdata_end := recordHeaderSize + int(out.size)\n\tif len(buf) < data_end {\n\t\treturn out, Error.New(\"record buf not big enough for data\")\n\t}\n\tout.data = buf[recordHeaderSize:data_end]\n\n\t// the crc is everything but the last 4 bytes of the record header\n\t// followed by the data.\n\tvar crc uint32\n\tcrc = crc32.Update(crc, castTable, buf[:recordHeaderSize-4])\n\tcrc = crc32.Update(crc, castTable, out.data)\n\tif disk_crc := binary.BigEndian.Uint32(buf[20:24]); crc != disk_crc {\n\t\treturn out, Error.New(\"crc mismatch: %x != disk %x\", crc, disk_crc)\n\t}\n\n\treturn out, nil\n}",
"func ParseNextRecord(r io.Reader) (rec *Record, err error) {\n\n\trawRec, err := NextRecord(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trec, err = ParseRecord(rawRec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rec, nil\n}",
"func RecordFromString(input string) *Record {\n\toutput := new(Record)\n\tscanner := bufio.NewScanner(strings.NewReader(input))\n\tscanner.Split(bufio.ScanWords)\n\tfor scanner.Scan() {\n\t\ttoken := scanner.Text()\n\t\tif strings.Contains(token, \":\") {\n\t\t\tpieces := strings.Split(token, \":\")\n\t\t\tfield := strings.Title(pieces[0])\n\t\t\tvalue := pieces[1]\n\t\t\treflect.ValueOf(output).Elem().FieldByName(field).SetString(value)\n\t\t}\n\t}\n\treturn output\n}",
"func readRecord(l *ledger) stateFn {\n\t// check what kind of record this depending on its type we have more or less data to read\n\tb, err := l.rd.Peek(1)\n\tif err != nil {\n\t\tlog.Fatal(\"Error parsing lexRecord: \", err)\n\t}\n\n\trecordType := RecordType(b[0])\n\tvar buf []byte\n\tif recordType == Debit || recordType == Credit {\n\t\t// debits and credits have extra 8 byte more data\n\t\tbuf = make([]byte, 21)\n\t} else {\n\t\tbuf = make([]byte, 13)\n\t}\n\t_, err = l.rd.Read(buf)\n\tif err != nil {\n\t\tlog.Fatal(\"Error parsing Record: \", err)\n\t}\n\tl.emit(item{recordType, buf})\n\n\t// check to see if we have the minimum amount of bytes to read for the next record\n\tif _, err := l.rd.Peek(13); err != nil {\n\t\treturn nil\n\t}\n\treturn readRecord\n}",
"func (r *Scanner) Record() *Record {\n\tdata := r.Bytes()\n\trecord, _ := FromBytes(data)\n\treturn record\n}",
"func parseRecord(c *csv.Reader) (*VPN, error) {\n\td, err := c.Read()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(d) != 15 {\n\t\treturn nil, fmt.Errorf(\"got %d columns, want 15\", len(d))\n\t}\n\n\tv := &VPN{}\n\tv.Hostname = d[0]\n\tv.IP = d[1]\n\tv.Score, _ = strconv.Atoi(d[2])\n\tping, _ := strconv.Atoi(d[3])\n\tv.Ping = time.Duration(ping) * time.Millisecond\n\tv.Speed, _ = strconv.Atoi(d[4])\n\tv.Country = d[5]\n\tv.CountryShort = d[6]\n\tv.Sessions, _ = strconv.Atoi(d[7])\n\tuptime, _ := strconv.Atoi(d[8])\n\tv.Uptime = time.Duration(uptime) * time.Millisecond\n\tv.Users, _ = strconv.Atoi(d[9])\n\tv.Traffic, _ = strconv.Atoi(d[10])\n\tv.LogType = d[11]\n\tv.Operator = d[12]\n\tv.Message = d[13]\n\n\tb, err := base64.StdEncoding.DecodeString(d[14])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := bytes.NewBuffer(b)\n\ttextMode := false\n\ttext := \"\"\n\tfor {\n\t\tline, err := r.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttrimmed := strings.TrimSpace(line)\n\n\t\tswitch trimmed {\n\t\tcase \"\":\n\t\t\tcontinue\n\t\tcase \"<ca>\", \"<cert>\", \"<key>\":\n\t\t\ttextMode = true\n\t\t\ttext = \"\"\n\t\t\tcontinue\n\t\tcase \"</ca>\", \"</cert>\", \"</key>\":\n\t\t\ttextMode = false\n\t\t\ttext = strings.TrimSpace(text)\n\t\t\tswitch trimmed {\n\t\t\tcase \"</ca>\":\n\t\t\t\tv.CA = text\n\t\t\tcase \"</cert>\":\n\t\t\t\tv.Cert = text\n\t\t\tcase \"</key>\":\n\t\t\t\tv.Key = text\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif textMode {\n\t\t\ttext += line\n\t\t\tcontinue\n\t\t}\n\n\t\twords := strings.Split(trimmed, \" \")\n\t\tswitch {\n\t\tcase len(words) < 2:\n\t\t\tcontinue\n\t\tcase words[0] == \"proto\":\n\t\t\tv.Proto = words[1]\n\t\tcase words[0] == \"cipher\":\n\t\t\tv.Cipher = words[1]\n\t\tcase words[0] == \"auth\":\n\t\t\tv.Auth = words[1]\n\t\tcase len(words) < 3:\n\t\t\tcontinue\n\t\tcase words[0] == \"remote\":\n\t\t\tif v.IP != words[1] {\n\t\t\t\treturn nil, fmt.Errorf(\"inconsistent IP: got %s, want %s\", words[1], v.IP)\n\t\t\t}\n\t\t\tv.Port, _ = strconv.Atoi(words[2])\n\t\t}\n\t}\n\n\tif v.Proto == \"\" || v.IP == \"\" || v.Port == 0 || v.Cipher == \"\" ||\n\t\tv.Auth == \"\" || v.CA == \"\" || v.Cert == \"\" || v.Key == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid config or parsing\")\n\t}\n\n\treturn v, nil\n}",
"func (s Scanner) Record() Record {\n\tif s.done {\n\t\tpanic(\"Record called after Done()=true\")\n\t}\n\tend := bytes.Index(rawUCD[s.pos+1:], []byte(\"\\n\"))\n\treturn rawUCD[s.pos+1 : s.pos+1+end]\n}",
"func TestReadRecordRE(t *testing.T) {\n\tallRecordsStr := \"hello<foo>howdy</foo>hello<bar>yellow</bar>hello<baz>goodbye</baz>\"\n\tscr := NewScript()\n\tscr.input = bufio.NewReader(strings.NewReader(allRecordsStr))\n\tscr.SetRS(`<[^>]+>[^<]*<[^>]+>`)\n\tscr.rsScanner = bufio.NewScanner(scr.input)\n\tscr.rsScanner.Split(scr.makeRecordSplitter())\n\tfor i := 0; i < 3; i++ {\n\t\trec, err := scr.readRecord()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif rec != \"hello\" {\n\t\t\tt.Fatalf(\"Expected %q but received %q\", \"hello\", rec)\n\t\t}\n\t}\n}",
"func (rdAddendumD *ReturnDetailAddendumD) Parse(record string) {\n\tif utf8.RuneCountInString(record) < 60 {\n\t\treturn // line too short\n\t}\n\n\t// Character position 1-2, Always \"35\"\n\trdAddendumD.setRecordType()\n\t// 03-04\n\trdAddendumD.RecordNumber = rdAddendumD.parseNumField(record[2:4])\n\t// 05-13\n\trdAddendumD.EndorsingBankRoutingNumber = rdAddendumD.parseStringField(record[4:13])\n\t// 14-21\n\trdAddendumD.BOFDEndorsementBusinessDate = rdAddendumD.parseYYYYMMDDDate(record[13:21])\n\t// 22-36\n\trdAddendumD.EndorsingBankItemSequenceNumber = rdAddendumD.parseStringField(record[21:36])\n\t// 37-37\n\trdAddendumD.TruncationIndicator = rdAddendumD.parseStringField(record[36:37])\n\t// 38-38\n\trdAddendumD.EndorsingBankConversionIndicator = rdAddendumD.parseStringField(record[37:38])\n\t// 39-39\n\trdAddendumD.EndorsingBankCorrectionIndicator = rdAddendumD.parseNumField(record[38:39])\n\t// 40-40\n\trdAddendumD.ReturnReason = rdAddendumD.parseStringField(record[39:40])\n\t// 41-59\n\trdAddendumD.UserField = rdAddendumD.parseStringField(record[40:59])\n\t// 60-60\n\trdAddendumD.EndorsingBankIdentifier = rdAddendumD.parseNumField(record[59:60])\n\t// 61-80\n\trdAddendumD.reserved = \" \"\n}",
"func parseMetric(record []byte) (*Metric, error) {\n\trecord = bytes.TrimSpace(record)\n\n\tif len(record) == 0 {\n\t\treturn nil, fmt.Errorf(\"Parse error: empty record\")\n\t}\n\n\tbucket, rest := tokenize(':', record)\n\tif len(bucket) == 0 {\n\t\treturn nil, fmt.Errorf(\"Malformed record: No bucket name.\")\n\t}\n\n\tif len(rest) == 0 {\n\t\t// Statsd spec is fuzzy about it and there are Statsd implementations\n\t\t// that treat \"a\" as \"a:1|c\" but we'll drop these requests.\n\t\treturn nil, fmt.Errorf(\"Malformed record: No value/type found.\")\n\t}\n\n\tvalue, rest := tokenize('|', rest)\n\tif len(value) == 0 {\n\t\treturn nil, fmt.Errorf(\"Malformed record: No value found after '|'\")\n\t}\n\n\tmtype, rest := tokenize('|', rest)\n\tif len(mtype) == 0 {\n\t\treturn nil, fmt.Errorf(\"Malformed record: No 'type' found in %q\", record)\n\t}\n\n\t_, samplingRate := tokenize('@', rest)\n\n\treturn newMetric(bucket, value, mtype, samplingRate)\n}",
"func parseLine(line string) (dataLog *Rf12demoDataLog, err error) {\n\t// split line\n\tdataStrArray := strings.Split(line, \" \")\n\n\t// parse status\n\tif (len(dataStrArray) > 3) && (dataStrArray[0] == \"OK\") {\n\t\t// parse node infos\n\t\tnodeInfosByte := byteFromString(dataStrArray[2])\n\n\t\t// check reserved field\n\t\tif (nodeInfosByte & 0x80) != 0 {\n\t\t\terr = errors.New(\"Received payload with reserved field set to 1\")\n\t\t\tlog.Error(err)\n\t\t} else {\n\t\t\tdataLog = &Rf12demoDataLog{at: time.Now().UTC()}\n\n\t\t\t// parse node id\n\t\t\tdataLog.nodeId = int(byteFromString(dataStrArray[1]) & 0x1f)\n\n\t\t\t// parse node kind\n\t\t\tdataLog.nodeKind = int(nodeInfosByte & 0x7f)\n\n\t\t\t// parse data\n\t\t\tdataLog.data = make([]byte, len(dataStrArray)-3)\n\n\t\t\tfor index, dataStr := range dataStrArray {\n\t\t\t\tif index > 2 {\n\t\t\t\t\tdataLog.data[index-3] = byteFromString(dataStr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = errors.New(\"Garbage received\")\n\t}\n\n\treturn\n}",
"func (l *LogParser) doParse(out chan *LogRecord, filter RecordFilter) {\n\tlogDate := time.Time{}\n\ts, err := l.r.ReadString('\\n')\n\n\tfor err == nil || (err == io.EOF && len(s) > 0) {\n\t\tfor i := len(s) - 1; i > 0 && (s[i] == '\\r' || s[i] == '\\n'); i-- {\n\t\t\ts = s[:i]\n\t\t}\n\t\tswitch {\n\n\t\t// #Date: get log date\n\t\tcase strings.HasPrefix(s, datePrefix):\n\t\t\ts = s[len(datePrefix):]\n\t\t\tlogDate, err = time.ParseInLocation(tsFormat, s, time.UTC)\n\n\t\t// #Fields : get fields definition, may have changed\n\t\tcase strings.HasPrefix(s, fieldsPrefix):\n\t\t\ts = s[len(fieldsPrefix):]\n\t\t\tl.fields = []string{}\n\t\t\tmark := 0\n\t\t\tfor i := 0; i < len(s); i++ {\n\t\t\t\tif i == len(s)-1 || s[i] == ' ' {\n\t\t\t\t\tif i == len(s)-1 {\n\t\t\t\t\t\ti++ // I don't understand wy it's necessary!\n\t\t\t\t\t}\n\t\t\t\t\tl.fields = append(l.fields, s[mark:i])\n\t\t\t\t\tmark = i + 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t// skip empty lines or other commented out lines\n\t\tcase len(s) < 2, s[0] == '#':\n\t\t\t{\n\t\t\t}\n\t\tdefault:\n\t\t\t{\n\t\t\t\t// Do parsing work only if the log date is in time frame and if\n\t\t\t\t// a full text pattern is recognized\n\t\t\t\tif filter.CheckDate(logDate) && filter.CheckFullLine(&s) {\n\t\t\t\t\tr := NewLogRecord(s, filter)\n\t\t\t\t\tfieldIndex := 0\n\t\t\t\t\tmark := 0\n\t\t\t\t\tselected := true\n\t\t\t\t\tfor i := 0; i < len(s); i++ {\n\t\t\t\t\t\tif i == len(s)-1 || s[i] == ' ' {\n\t\t\t\t\t\t\tif i == len(s)-1 {\n\t\t\t\t\t\t\t\ti++ // don't get this!\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfield := s[mark:i]\n\t\t\t\t\t\t\tmark = i + 1\n\t\t\t\t\t\t\tif !r.Set(l.fields[fieldIndex], field) {\n\t\t\t\t\t\t\t\t// the record parsing is abandonned\n\t\t\t\t\t\t\t\t// as soon as a field is rejected by the filter\n\t\t\t\t\t\t\t\tselected = false\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfieldIndex++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif selected {\n\t\t\t\t\t\tout <- r\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ts, err = l.r.ReadString('\\n')\n\t}\n\tclose(out)\n}",
"func ParseSRVRecord(rec string) (srv, proto, name string, err error) {\n\tsplits := make([]string, 2)\n\n\tremaining := rec\n\n\tfor i := 0; i < 2; i++ {\n\t\tidx := strings.Index(remaining, \".\")\n\t\tif idx == -1 {\n\t\t\treturn \"\", \"\", \"\", errBadSRVFormat\n\t\t}\n\t\tsplit := remaining[:idx]\n\t\tif !strings.HasPrefix(split, \"_\") {\n\t\t\treturn \"\", \"\", \"\", errBadSRVFormat\n\t\t}\n\t\tsplits[i] = split[1:]\n\t\tremaining = remaining[idx+1:]\n\t}\n\tif len(remaining) == 0 {\n\t\treturn \"\", \"\", \"\", errBadSRVFormat\n\t}\n\treturn splits[0], splits[1], remaining, nil\n}",
"func UnmarshalRecord(buff []byte) *common.Record {\n\tr := newRecord()\n\n\tcopy(r.ID, buff[:16])\n\tr.Start = binary.LittleEndian.Uint64(buff[16:24])\n\tr.Length = binary.LittleEndian.Uint32(buff[24:])\n\n\treturn r\n}",
"func processLine(re *regexp.Regexp, line string) *accessLogEntry {\n\tentry := accessLogEntry{}\n\n\tresult := re.FindStringSubmatch(line)\n\n\t// skip lines with incorrect length\n\tif len(result) < 5 {\n\t\tfmt.Printf(\"Skipping line: %s\\nwhich had the following result object: %#v\\n\", line, result)\n\t\treturn nil\n\t}\n\n\tlayout := \"2006-01-02T15:04:05Z\"\n\tmDate, err := time.Parse(layout, result[1])\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tentry.year = mDate.Year()\n\tentry.month = int(mDate.Month())\n\tentry.day = mDate.Day()\n\tentry.hour = mDate.Hour()\n\n\tentry.hlsVersion = result[2]\n\tentry.bitrate = result[3]\n\tentry.responseCode = result[4]\n\tif i, err := strconv.Atoi(result[5]); err != nil {\n\t\tentry.bytes = 0\n\t} else {\n\t\tentry.bytes = i\n\t}\n\tentry.userAgent = result[6]\n\n\treturn &entry\n}",
"func parsePacctRecord(record []string) Pacct {\n\tvar sample Pacct\n\n\tsample.Command = strings.TrimSpace(record[0])\n\tversion := record[1]\n\tif version != \"v3\" {\n\t\tlog.Fatalf(\"The version of record is not v3, halting. version = %s\\n\", version)\n\t}\n\tsample.Utime = toFloat32(record[2])\n\tsample.Stime = toFloat32(record[3])\n\tsample.CpuTime = sample.Stime + sample.Stime\n\tsample.Elapsed = toFloat32(record[4])\n\tsample.Uid = toInt(record[5])\n\tsample.Gid = toInt(record[6])\n\tsample.Avmem = toFloat32(record[7])\n\tsample.Pid = toInt(record[9])\n\tsample.Ppid = toInt(record[10])\n\tsample.StartTime = toTime(record[14])\n\n\tDprintf(\"sample = %#v\\n\", sample)\n\tDprintf(\"sample2 = %v\\n\", sample)\n\treturn sample\n}",
"func (p *parser) parseLine(s string) error {\n\tif len(s) < 1+(dataOff+1)*2 || s[0] != ':' {\n\t\treturn ErrSyntax\n\t}\n\tbuf, err := hexDecodeString(s[1:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\taddr = binary.BigEndian.Uint16(buf[addrOff:])\n\t\tdata = buf[dataOff : len(buf)-1]\n\t\tsum byte\n\t)\n\tfor _, v := range buf {\n\t\tsum += v\n\t}\n\tif sum != 0 {\n\t\treturn ErrChecksum\n\t}\n\tif buf[typeOff] != dataRec && addr != 0 ||\n\t\tlen(data) != int(buf[lenOff]) {\n\t\treturn ErrSyntax\n\t}\n\tswitch buf[typeOff] {\n\tcase dataRec:\n\t\tif addr+uint16(len(data))-1 < addr {\n\t\t\t// For Data records whose data's addresses overflow\n\t\t\t// 16-bit register, in 8-bit and 32-bit format the data\n\t\t\t// are wrapped to zero at the end of the address space\n\t\t\t// (16- and 32-bit, respectively), and in 16-bit\n\t\t\t// format, at the end of the current segment to the\n\t\t\t// beginning thereof.\n\t\t\tvar c Chunk\n\t\t\tswitch {\n\t\t\tcase p.data.Format == FormatAuto:\n\t\t\t\treturn ErrSyntax\n\t\t\tcase p.data.Format != Format32Bit:\n\t\t\t\tc.Addr = p.fullAddr(0)\n\t\t\t\tfallthrough\n\t\t\tcase p.segment == 0xffff:\n\t\t\t\tc.Data = append(c.Data, data[-addr:]...)\n\t\t\t\tp.data.Chunks.add(c)\n\t\t\t\tdata = data[:-addr]\n\t\t\t}\n\t\t}\n\t\tp.data.Chunks.add(Chunk{p.fullAddr(addr), data})\n\tcase eofRec:\n\t\tif len(data) != 0 {\n\t\t\treturn ErrSyntax\n\t\t}\n\t\treturn io.EOF\n\tcase extSegmentAddrRec:\n\t\treturn p.setSegment(Format16Bit, data)\n\tcase startSegmentAddrRec:\n\t\treturn p.setStart(Format16Bit, data)\n\tcase extLinearAddrRec:\n\t\treturn p.setSegment(Format32Bit, data)\n\tcase startLinearAddrRec:\n\t\treturn p.setStart(Format32Bit, data)\n\tdefault:\n\t\treturn ErrSyntax\n\t}\n\treturn nil\n}",
"func NextRecord(r io.Reader) (rawRec []byte, err error) {\n\n\t// Read the first 5 bytes, determine the record length and\n\t// read the remainder of the record\n\trawLen := make([]byte, 5)\n\t_, err = r.Read(rawLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecLen, err := toInt(rawLen[0:5])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Ensure that we have a \"sane\" record length?\n\tif recLen <= leaderLen {\n\t\terr = errors.New(\"MARC record is too short\")\n\t\treturn nil, err\n\t} else if recLen > maxRecordSize {\n\t\terr = errors.New(\"MARC record is too long\")\n\t\treturn nil, err\n\t}\n\n\trawRec = make([]byte, recLen)\n\t// ensure that the raw len is available for the leader\n\tcopy(rawRec, rawLen)\n\n\t// Read the remainder of the record\n\t_, err = r.Read(rawRec[5:recLen])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The last byte should be a record terminator\n\tif rawRec[len(rawRec)-1] != recordTerminator {\n\t\treturn nil, errors.New(\"Record terminator not found at end of record\")\n\t}\n\n\treturn rawRec, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ArchFromString returns the enum value with a name, or the zero value if there's no such value.
|
func ArchFromString(c string) Arch {
switch c {
case "noarch":
return Arch_noarch
case "x8664":
return Arch_x8664
case "x8664Musl":
return Arch_x8664Musl
case "i686":
return Arch_i686
case "i686Musl":
return Arch_i686Musl
case "armV6":
return Arch_armV6
case "armV6Musl":
return Arch_armV6Musl
default:
return 0
}
}
|
[
"func (a Arch) Value() Arch {\n\tswitch a {\n\tcase X8664, AARCH64:\n\t\treturn a\n\t// accept amd, amd64, x86, x64, arm, arm64 and m1 values\n\tcase \"amd\", \"amd64\", \"x86\", \"x64\":\n\t\treturn X8664\n\tcase \"arm\", \"arm64\", \"m1\":\n\t\treturn AARCH64\n\t}\n\n\treturn Arch(runtime.GOARCH).Value()\n}",
"func (o *ImageForVersionArch) GetArch() string {\n\tif o == nil || IsNil(o.Arch) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Arch\n}",
"func ParseEnum(name string) (Enum, error) {\n\tswitch name {\n\tcase \"creating\":\n\t\treturn Creating, nil\n\tcase \"available\":\n\t\treturn Available, nil\n\tcase \"deprecated\":\n\t\treturn Deprecated, nil\n\tcase \"unavailable\":\n\t\treturn Unavailable, nil\n\tcase \"deleting\":\n\t\treturn Deleting, nil\n\tcase \"deleted\":\n\t\treturn Deleted, nil\n\tcase \"failed\":\n\t\treturn Failed, nil\n\t}\n\tvar zero Enum\n\treturn zero, fmt.Errorf(\"%s is not a valid imagestatus.Enum\", name)\n}",
"func NewArch(os, arch string) Arch {\n\treturn Arch{OS: os, Arch: arch}\n}",
"func (o *ImageForVersionArch) GetArchOk() (*string, bool) {\n\tif o == nil || IsNil(o.Arch) {\n\t\treturn nil, false\n\t}\n\treturn o.Arch, true\n}",
"func StatusEnumFromValue(value string) StatusEnum {\n switch value {\n case \"kInitializing\":\n return Status_KINITIALIZING\n case \"kAvailable\":\n return Status_KAVAILABLE\n case \"kBound\":\n return Status_KBOUND\n case \"kFailed\":\n return Status_KFAILED\n default:\n return Status_KINITIALIZING\n }\n}",
"func GetArch() (string, error) {\r\n\tcmd := exec.Command(\"uname\", []string{\"-m\"}...)\r\n\tresult, err := cmd.Output()\r\n\tif err != nil {\r\n\t\treturn \"\", err\r\n\t}\r\n\r\n\tstr := strings.Trim(string(result), \" \\r\\n\\t\")\r\n\tif str == \"x86_64\" {\r\n\t\treturn \"64\", nil\r\n\t} else if str == \"i386\" || str == \"i686\" {\r\n\t\treturn \"32\", nil\r\n\t}\r\n\treturn str, errors.New(\"Could not determine architecture bit-ness\")\r\n}",
"func Type28EnumToValue(type28Enum Type28Enum) string {\r\n switch type28Enum {\r\n case Type28_LOCAL:\r\n \t\treturn \"local\"\t\t\r\n case Type28_ARCHIVE:\r\n \t\treturn \"archive\"\t\t\r\n default:\r\n \treturn \"local\"\r\n }\r\n}",
"func EnumName(m map[byte]string, v byte) string {\n\ts, ok := m[v]\n\tif ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(int(v))\n}",
"func ParseArchVariant(platform string) (string, string) {\n\tosArchArr := strings.Split(platform, \"/\")\n\n\tvariant := \"\"\n\tarch := osArchArr[0]\n\tif len(osArchArr) > 1 {\n\t\tvariant = osArchArr[1]\n\t}\n\treturn arch, variant\n}",
"func (by *Bybit) GetActionFromString(s string) (orderbook.Action, error) {\n\tswitch s {\n\tcase wsOrderbookActionUpdate:\n\t\treturn orderbook.Amend, nil\n\tcase wsOrderbookActionDelete:\n\t\treturn orderbook.Delete, nil\n\tcase wsOrderbookActionInsert:\n\t\treturn orderbook.Insert, nil\n\t}\n\treturn 0, fmt.Errorf(\"%s %w\", s, orderbook.ErrInvalidAction)\n}",
"func SetArch(a string) {\n\tforceArch = a\n}",
"func ParseEnumName(elems []string, name string, collation string) (Enum, error) {\n\tctor := collate.GetCollator(collation)\n\tfor i, n := range elems {\n\t\tif ctor.Compare(n, name) == 0 {\n\t\t\treturn Enum{Name: n, Value: uint64(i) + 1}, nil\n\t\t}\n\t}\n\terrMsg := fmt.Sprintf(\"convert to MySQL enum failed: item %s is not in enum %v\", name, elems)\n\treturn Enum{}, errors.Wrap(ErrTruncated, errMsg)\n}",
"func (arch *Arch) String() string {\n\treturn arch.OS + \"_\" + arch.Arch\n}",
"func (a Arch) GoArch() string {\n\tswitch a {\n\tcase Archx86:\n\t\treturn \"386\"\n\tcase Archx86_64:\n\t\treturn \"amd64\"\n\tdefault:\n\t\treturn a.String()\n\t}\n}",
"func DwarfOpFromString(s string) enum.DwarfOp {\n\tfor key, val := range _DwarfOp_map {\n\t\tif s == val {\n\t\t\treturn key\n\t\t}\n\t}\n\tpanic(fmt.Errorf(\"unable to locate DwarfOp enum corresponding to %q\", s))\n}",
"func (i SQSSystemAttribute) ParseByName(s string) (SQSSystemAttribute, error) {\n\tif val, ok := _SQSSystemAttributeNameToValueMap[s]; ok {\n\t\t// parse ok\n\t\treturn val, nil\n\t}\n\n\t// error\n\treturn -1, fmt.Errorf(\"Enum Name of %s Not Expected In SQSSystemAttribute Values List\", s)\n}",
"func (arch Architecture) ToGoArch() string {\n\tswitch arch {\n\tcase ArchitectureAmd64:\n\t\treturn \"amd64\"\n\tcase ArchitectureArm:\n\t\treturn \"arm\"\n\tcase ArchitectureX86:\n\t\treturn \"386\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}",
"func toStateEnum(s string) string {\n\tswitch strings.ToLower(s) {\n\tcase \"pending\", \"waiting\":\n\t\treturn \"pending\"\n\tcase \"running\", \"in_progress\":\n\t\treturn \"in_progress\"\n\tcase \"cancelled\", \"killed\", \"stopped\", \"terminated\":\n\t\treturn \"cancelled\"\n\tcase \"failed\", \"failure\", \"error\", \"errored\":\n\t\treturn \"failed\"\n\tcase \"rollback\", \"rolled_back\":\n\t\treturn \"rolled_back\"\n\tcase \"success\", \"successful\":\n\t\treturn \"successful\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}",
"func ParseActionType(str string) (ActionType, error) {\n\tkey := strings.Trim(string(str), `\"`)\n\tv, ok := enumActionTypeIDMap[key]\n\tif !ok {\n\t\treturn 0, fmt.Errorf(\"unknown Status: %s\", str)\n\t}\n\n\treturn v, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewGithubEvent creates a new list of GithubEvent.
|
func NewGithubEvent_List(s *capnp.Segment, sz int32) (GithubEvent_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 4}, sz)
return GithubEvent_List{l}, err
}
|
[
"func newEvents(parent ulid.I, pb pb.BroadcastEvent) ([]events.Event, error) {\n\tid, err := ulid.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := &Event{id: id, parent: parent, pb: pb}\n\te.pb.Id = e.id[:]\n\te.pb.Parent = e.parent[:]\n\treturn []events.Event{e}, nil\n}",
"func convertToGithubEvents(events []scm.EventType) []string {\n\tvar ge []string\n\tfor _, e := range events {\n\t\tswitch e {\n\t\tcase scm.PullRequestEventType:\n\t\t\tge = append(ge, \"pull_request\")\n\t\tcase scm.PullRequestCommentEventType:\n\t\t\tge = append(ge, \"issue_comment\")\n\t\tcase scm.PushEventType:\n\t\t\tge = append(ge, \"push\")\n\t\tcase scm.TagReleaseEventType:\n\t\t\tge = append(ge, \"release\")\n\t\tdefault:\n\t\t\tlog.Errorf(\"The event type %s is not supported, will be ignored\", e)\n\t\t}\n\t}\n\n\treturn ge\n}",
"func NewEvent(data []byte) (*Event, error) {\n\tvar e Event\n\tif err := json.Unmarshal(data, &e); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &e, nil\n}",
"func NewEvent(s string) (Event, error) {\n\tvar e Event\n\n\te.fields = strings.Split(strings.TrimSpace(s), \"|\")\n\tid := e.Field(1)\n\tif len(id) < 4 {\n\t\treturn Event{}, errors.Errorf(\"unknown event %q\", id)\n\t}\n\tid = id[:4]\n\n\tc, ok := tokens[id]\n\tif !ok {\n\t\treturn Event{}, errors.Errorf(\"unknown event %q\", id)\n\t}\n\n\te.Category = c\n\treturn e, nil\n}",
"func GitHubCreateNewTags(\n\td *Data,\n\trepo string,\n\ttagsDest *[]*github.Reference,\n\tbranches, newTags []*github.Reference,\n\tmasterSHA string) error {\n\n\tfor _, tag := range newTags {\n\t\tsha := FindBranchHEADForTag(tag, d.PrefixBranch, masterSHA, branches)\n\n\t\t// In dry-run mode just append the new ref to the given list of destination refs.\n\t\tif d.DryRun {\n\t\t\tref, _ := GitHubCreateRef(d, repo, tag.GetRef(), sha, true)\n\t\t\t*tagsDest = append(*tagsDest, ref)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, err := GitHubCreateRef(d, repo, tag.GetRef(), sha, false); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func newAddEvent() *addEvent {\n\tc := &addEvent{}\n\tc.RegisterSteps(c.firstStep, c.secondStep, c.thirdStep, c.fourthStep, c.fifthStep, c.sixthStep)\n\treturn c\n}",
"func PackagistHooksNewPost(ctx *context.Context) {\n\tcreateWebhook(ctx, packagistHookParams(ctx))\n}",
"func NewEvent(e string, t Topic, a Action, p interface{}) *Event {\n\treturn &Event{\n\t\tEmitter: e,\n\t\tTopic: t,\n\t\tAction: a,\n\t\tPayload: p,\n\t}\n}",
"func NewEventPayload(eventData interface{}) *EventPayload {\n\tpayload := &EventPayload{\n\t\tdata: structs.New(eventData),\n\t}\n\treturn payload\n}",
"func (g Graph) CreateEvent(c *gin.Context) {\n\tgraph, err := g.unauthOne(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !g.canPostEvent(c, graph) {\n\t\tc.AbortWithStatus(403)\n\t\treturn\n\t}\n\n\tevent := models.PostBatchEvent{}\n\tc.BindJSON(&event)\n\n\tif !sugar.ValidateRequest(c, event) {\n\t\treturn\n\t}\n\n\tcurrentStatus := graph.Status()\n\n\tif !models.CanTransition(currentStatus, event.Status) {\n\t\tsugar.ErrResponse(c, 400, fmt.Sprintf(\"%s not valid when current status is %s\", event.Status, currentStatus))\n\t\treturn\n\t}\n\n\t_, isUser := middleware.CheckUser(c)\n\tif event.Status == models.StatusTerminated && isUser {\n\t\tsugar.ErrResponse(c, 400, fmt.Sprintf(\"Users cannot post TERMINATED events, please upgrade to reco v0.3.1 or above\"))\n\t}\n\n\tnewEvent, err := BatchService{AWS: g.AWS}.AddEvent(&graph.BatchJob, event)\n\n\tif err != nil {\n\t\tsugar.InternalError(c, err)\n\t\treturn\n\t}\n\n\teventMessage := \"Graph entered state:\" + event.Status\n\tsugar.EnqueueEvent(g.Events, c, eventMessage, graph.Project.UserID, map[string]interface{}{\"graph_id\": graph.ID, \"project_name\": graph.Project.Name, \"message\": event.Message})\n\n\tsugar.SuccessResponse(c, 200, newEvent)\n}",
"func NewEvent(t string) Event {\n\treturn &dom{js.Global().Get(\"Event\").New(t)}\n}",
"func NewEvent(req *http.Request) *Event {\n\treturn &Event{\n\t\tEventMeta: logger.NewEventMeta(Flag),\n\t\tremote: req.URL.Host,\n\t\tmethod: req.Method,\n\t\tkey: strings.TrimPrefix(req.URL.Path, \"/v1/\"),\n\t}\n}",
"func CreateNewBookEvents(book Book, eventType BookEventType) {\n\tvar eventRecords []interface{}\n\tfor _, bookCopy := range book.Copies {\n\t\tevent := Event{\n\t\t\tEventType: eventType,\n\t\t\tBaseBook: book.BaseBook,\n\t\t\tBookID: bookCopy.ID,\n\t\t\tISBN: book.ISBN,\n\t\t}\n\t\teventRecords = append(eventRecords, event)\n\t}\n\n\terrBulkBooksEvents := gormbulk.BulkInsert(MySQL, eventRecords, 3000)\n\tif errBulkBooksEvents != nil {\n\t\tlog.Println(errBulkBooksEvents.Error())\n\t\treturn\n\t}\n}",
"func AddNewEvent(data *ErrgularReq, db tables.ConnPool) (err error) {\n\tlog.Println(\"Adding a New Error into the appropriate table\")\n\tname, errMsg := data.Name, data.ErrMsg\n\tcode := data.Code\n\texistErr := CreateTable(name, db)\n\tif existErr != nil {\n\t\terr = existErr\n\t\treturn err\n\t}\n\terrorExists := checkErrorTypeExist(name, code, db)\n\tif errorExists != nil {\n\t\terr = errorExists\n\t\treturn err\n\t}\n\tinsertNewEventQuery := fmt.Sprintf(qInsertNewEvent, name)\n\t_, insertErr := db.Db.Exec(insertNewEventQuery, code, errMsg)\n\tif insertErr != nil {\n\t\terr = insertErr\n\t\treturn err\n\t}\n\treturn nil\n}",
"func WebhooksNew(ctx *context.Context) {\n\tctx.Data[\"Title\"] = ctx.Tr(\"repo.settings.add_webhook\")\n\tctx.Data[\"Webhook\"] = webhook.Webhook{HookEvent: &webhook_module.HookEvent{}}\n\n\torCtx, err := getOwnerRepoCtx(ctx)\n\tif err != nil {\n\t\tctx.ServerError(\"getOwnerRepoCtx\", err)\n\t\treturn\n\t}\n\n\tif orCtx.IsAdmin && orCtx.IsSystemWebhook {\n\t\tctx.Data[\"PageIsAdminSystemHooks\"] = true\n\t\tctx.Data[\"PageIsAdminSystemHooksNew\"] = true\n\t} else if orCtx.IsAdmin {\n\t\tctx.Data[\"PageIsAdminDefaultHooks\"] = true\n\t\tctx.Data[\"PageIsAdminDefaultHooksNew\"] = true\n\t} else {\n\t\tctx.Data[\"PageIsSettingsHooks\"] = true\n\t\tctx.Data[\"PageIsSettingsHooksNew\"] = true\n\t}\n\n\thookType := checkHookType(ctx)\n\tctx.Data[\"HookType\"] = hookType\n\tif ctx.Written() {\n\t\treturn\n\t}\n\tif hookType == \"discord\" {\n\t\tctx.Data[\"DiscordHook\"] = map[string]any{\n\t\t\t\"Username\": \"Gitea\",\n\t\t}\n\t}\n\tctx.Data[\"BaseLink\"] = orCtx.LinkNew\n\n\tctx.HTML(http.StatusOK, orCtx.NewTemplate)\n}",
"func GogsHooksNewPost(ctx *context.Context) {\n\tcreateWebhook(ctx, gogsHookParams(ctx))\n}",
"func newDeleteEvent() *deleteEvent {\n\tc := &deleteEvent{}\n\tc.RegisterSteps(c.firstStep, c.secondStep)\n\treturn c\n}",
"func newEvent(topic string, payload interface{}) (*event, error) {\n\tif topic == \"\" {\n\t\treturn nil, errors.New(ErrNoTopic, errorMessages)\n\t}\n\tencoded, err := encode(payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &event{topic, encoded, nil}, nil\n}",
"func newEvent(handler *Handler, callback Callback) *Container {\n\treturn &Container{\n\t\tcallback: callback,\n\t\thandler: handler,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ResultsFromString returns the enum value with a name, or the zero value if there's no such value.
|
func ResultsFromString(c string) Results {
switch c {
case "ok":
return Results_ok
case "err":
return Results_err
case "reject":
return Results_reject
default:
return 0
}
}
|
[
"func StatusEnumFromValue(value string) StatusEnum {\n switch value {\n case \"kInitializing\":\n return Status_KINITIALIZING\n case \"kAvailable\":\n return Status_KAVAILABLE\n case \"kBound\":\n return Status_KBOUND\n case \"kFailed\":\n return Status_KFAILED\n default:\n return Status_KINITIALIZING\n }\n}",
"func (e Enum) Value(name string) (int, bool) {\n\tfor v, n := range e.NameValues() {\n\t\tif n == name {\n\t\t\treturn v, true\n\t\t}\n\t}\n\treturn -1, false\n}",
"func Value(name string) gql.EnumValue {\n\tvalue := &value{\n\t\tname: name,\n\t}\n\treturn value\n}",
"func StatusFromString(in string) Status {\n\tswitch in {\n\tcase StatusWaiting.String():\n\t\treturn StatusWaiting\n\tcase StatusBuilding.String():\n\t\treturn StatusBuilding\n\tcase StatusSuccess.String():\n\t\treturn StatusSuccess\n\tcase StatusNeverBuilt.String():\n\t\treturn StatusNeverBuilt\n\tcase StatusFail.String():\n\t\treturn StatusFail\n\tcase StatusDisabled.String():\n\t\treturn StatusDisabled\n\tcase StatusSkipped.String():\n\t\treturn StatusSkipped\n\tdefault:\n\t\treturn StatusUnknown\n\t}\n}",
"func (m *Matrix) GetValueName(col, val int) string {\n\tif len(m.str_to_enum[col]) == 0 {\n\t\treturn \"\"\n\t}\n\treturn m.enum_to_str[col][val]\n}",
"func ParseResult(str string) (Result, error) {\n\tkey := strings.Trim(string(str), `\"`)\n\tv, ok := enumResultIDMap[key]\n\tif !ok {\n\t\treturn 0, fmt.Errorf(\"unknown Status: %s\", str)\n\t}\n\n\treturn v, nil\n}",
"func (u *Unit) ValueFromString(str string) (*Value, error) {\n\ts := &Value{unit: u}\n\n\tif err := s.Set(str); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}",
"func FPredFromString(s string) enum.FPred {\n\tif len(s) == 0 {\n\t\treturn 0\n\t}\n\tfor i := range _FPred_index[:len(_FPred_index)-1] {\n\t\tif s == _FPred_name[_FPred_index[i]:_FPred_index[i+1]] {\n\t\t\treturn enum.FPred(i)\n\t\t}\n\t}\n\tpanic(fmt.Errorf(\"unable to locate FPred enum corresponding to %q\", s))\n}",
"func EnumName(m map[byte]string, v byte) string {\n\ts, ok := m[v]\n\tif ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(int(v))\n}",
"func ParseEnumValue(elems []string, number uint64) (Enum, error) {\n\tif number == 0 || number > uint64(len(elems)) {\n\t\terrMsg := fmt.Sprintf(\"convert to MySQL enum failed: number %d overflow enum boundary [1, %d]\", number, len(elems))\n\t\treturn Enum{}, errors.Wrap(ErrTruncated, errMsg)\n\t}\n\n\treturn Enum{Name: elems[number-1], Value: number}, nil\n}",
"func StatusEnumToValue(statusEnum StatusEnum) string {\n switch statusEnum {\n case Status_KINITIALIZING:\n \t\treturn \"kInitializing\"\n case Status_KAVAILABLE:\n \t\treturn \"kAvailable\"\n case Status_KBOUND:\n \t\treturn \"kBound\"\n case Status_KFAILED:\n \t\treturn \"kFailed\"\n default:\n \treturn \"kInitializing\"\n }\n}",
"func ParseEnum(name string) (Enum, error) {\n\tswitch name {\n\tcase \"creating\":\n\t\treturn Creating, nil\n\tcase \"available\":\n\t\treturn Available, nil\n\tcase \"deprecated\":\n\t\treturn Deprecated, nil\n\tcase \"unavailable\":\n\t\treturn Unavailable, nil\n\tcase \"deleting\":\n\t\treturn Deleting, nil\n\tcase \"deleted\":\n\t\treturn Deleted, nil\n\tcase \"failed\":\n\t\treturn Failed, nil\n\t}\n\tvar zero Enum\n\treturn zero, fmt.Errorf(\"%s is not a valid imagestatus.Enum\", name)\n}",
"func (e Enum) Name(value int) (string, bool) {\n\tnameValues := e.NameValues()\n\tif v, ok := nameValues[value]; ok {\n\t\treturn v, true\n\t} else {\n\t\treturn \"\", false\n\t}\n}",
"func FromName(s string) QueryType {\n\tfor idx, name := range queryTypeNames {\n\t\tcandidate := QueryType(idx)\n\t\tif IsValid(candidate) && name == s {\n\t\t\treturn candidate\n\t\t}\n\t}\n\n\treturn Unknown\n}",
"func (v *SecondService_EchoEnum_Result) String() string {\n\tif v == nil {\n\t\treturn \"<nil>\"\n\t}\n\n\tvar fields [1]string\n\ti := 0\n\tif v.Success != nil {\n\t\tfields[i] = fmt.Sprintf(\"Success: %v\", *(v.Success))\n\t\ti++\n\t}\n\n\treturn fmt.Sprintf(\"SecondService_EchoEnum_Result{%v}\", strings.Join(fields[:i], \", \"))\n}",
"func (m MSRVal) String() string {\n\treturn m.Name\n}",
"func (i SQSSystemAttribute) ParseByName(s string) (SQSSystemAttribute, error) {\n\tif val, ok := _SQSSystemAttributeNameToValueMap[s]; ok {\n\t\t// parse ok\n\t\treturn val, nil\n\t}\n\n\t// error\n\treturn -1, fmt.Errorf(\"Enum Name of %s Not Expected In SQSSystemAttribute Values List\", s)\n}",
"func GetVName(v *gogoproto.EnumValueDescriptorProto) string {\n\to, err := reg.GetExtension(\"venice.enumValueStr\", v)\n\tif err != nil {\n\t\t// All enums are converted to lower case\n\t\treturn strings.ToLower(*v.Name)\n\t}\n\treturn o.(string)\n}",
"func ParseEnumName(elems []string, name string, collation string) (Enum, error) {\n\tctor := collate.GetCollator(collation)\n\tfor i, n := range elems {\n\t\tif ctor.Compare(n, name) == 0 {\n\t\t\treturn Enum{Name: n, Value: uint64(i) + 1}, nil\n\t\t}\n\t}\n\terrMsg := fmt.Sprintf(\"convert to MySQL enum failed: item %s is not in enum %v\", name, elems)\n\treturn Enum{}, errors.Wrap(ErrTruncated, errMsg)\n}",
"func (c convTarget) FromEnumLabel(src string, tt *Type) error {\n\tif target := c.makeDirectTarget(); target != nil {\n\t\treturn target.FromEnumLabel(src, tt)\n\t}\n\treturn c.FromString(src, tt)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
BuildTypeFromString returns the enum value with a name, or the zero value if there's no such value.
|
func BuildTypeFromString(c string) BuildType {
switch c {
case "bulk":
return BuildType_bulk
case "individual":
return BuildType_individual
default:
return 0
}
}
|
[
"func TypeFromString(s string) Type {\n\tswitch s {\n\tcase \"console\":\n\t\treturn Console\n\tcase \"amqp\":\n\t\treturn AMQP\n\tcase \"elasticsearch\":\n\t\treturn Elasticsearch\n\tcase \"http\":\n\t\treturn HTTP\n\tcase \"eventlog\":\n\t\treturn Eventlog\n\tcase \"null\":\n\t\treturn Null\n\tdefault:\n\t\treturn Unknown\n\t}\n}",
"func ParseType(str string) (Type, error) {\n\tkey := strings.Trim(string(str), `\"`)\n\tv, ok := enumTypeIDMap[key]\n\tif !ok {\n\t\treturn 0, fmt.Errorf(\"unknown Status: %s\", str)\n\t}\n\n\treturn v, nil\n}",
"func SignTypeFromString(c string) SignType {\n\tswitch c {\n\tcase \"none\":\n\t\treturn SignType_none\n\tcase \"ed25519\":\n\t\treturn SignType_ed25519\n\n\tdefault:\n\t\treturn 0\n\t}\n}",
"func RecordTypeFromString(name string) RecordType {\n\trr, ok := rrNameToInt[name]\n\tif !ok {\n\t\treturn -1\n\t}\n\n\treturn rr\n}",
"func ToTypeEnum(typeStr string) (Type, bool) {\n\n\tswitch strings.ToLower(typeStr) {\n\tcase \"any\":\n\t\treturn TypeAny, true\n\tcase \"string\":\n\t\treturn TypeString, true\n\tcase \"integer\", \"int\":\n\t\treturn TypeInteger, true\n\tcase \"long\":\n\t\treturn TypeLong, true\n\tcase \"double\", \"number\":\n\t\treturn TypeDouble, true\n\tcase \"boolean\", \"bool\":\n\t\treturn TypeBoolean, true\n\tcase \"object\":\n\t\treturn TypeObject, true\n\tcase \"complexobject\", \"complex_object\":\n\t\treturn TypeComplexObject, true\n\tcase \"array\":\n\t\treturn TypeArray, true\n\tcase \"params\":\n\t\treturn TypeParams, true\n\tdefault:\n\t\treturn TypeAny, false\n\t}\n}",
"func NameToType(typename string) TypeEnum {\n\tfor name, e := range AbstractTypeNames {\n\t\tif strings.Index(typename, name) >= 0 {\n\t\t\treturn e\n\t\t}\n\t}\n\tfor k, v := range enumNames {\n\t\tif v == typename {\n\t\t\treturn k\n\t\t}\n\t}\n\treturn TypeEnum_Unknown\n}",
"func TypeFromString(s string) (CellType, error) {\n\tswitch strings.ToLower(s) {\n\tcase \"cubes\":\n\t\treturn Cubes, nil\n\tcase \"sines2\":\n\t\treturn Sines2, nil\n\tcase \"sines3\":\n\t\treturn Sines3, nil\n\tdefault:\n\t\treturn Sines3, errors.New(s + \" is not a valid cell type. Returned Sines3.\")\n\t}\n}",
"func Special_TypeFromString(c string) Special_Type {\n\tswitch c {\n\tcase \"socket\":\n\t\treturn Special_Type_socket\n\tcase \"block\":\n\t\treturn Special_Type_block\n\tcase \"chardev\":\n\t\treturn Special_Type_chardev\n\tcase \"fifopipe\":\n\t\treturn Special_Type_fifopipe\n\tcase \"unknown\":\n\t\treturn Special_Type_unknown\n\n\tdefault:\n\t\treturn 0\n\t}\n}",
"func GoTypeForEnum(typ cc.Type, name string, types ...map[string]bg.Template) string {\n\tif typ == nil {\n\t\treturn \"<nil>\"\n\t}\n\tif typ.Kind() != cc.Enum {\n\t\tpanic(fmt.Sprintf(\"invalid type: %v\", typ))\n\t}\n\ttag := typ.Tag()\n\tif tag != 0 {\n\t\tn := string(xc.Dict.S(tag))\n\t\tfor _, t := range types {\n\t\t\tif s, ok := t[n]; ok {\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\terr := s.Execute(&buf, name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\treturn buf.String()\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"%s\", typ.Declarator())\n\tpanic(fmt.Sprintf(\"unknown type: %+v\", typ))\n}",
"func NewKeyTypeEnumFromValue(v string) (*KeyTypeEnum, error) {\n\tev := KeyTypeEnum(v)\n\tif ev.IsValid() {\n\t\treturn &ev, nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"invalid value '%v' for KeyTypeEnum: valid values are %v\", v, AllowedKeyTypeEnumEnumValues)\n\t}\n}",
"func StatusEnumFromValue(value string) StatusEnum {\n switch value {\n case \"kInitializing\":\n return Status_KINITIALIZING\n case \"kAvailable\":\n return Status_KAVAILABLE\n case \"kBound\":\n return Status_KBOUND\n case \"kFailed\":\n return Status_KFAILED\n default:\n return Status_KINITIALIZING\n }\n}",
"func OrderTypeFromString(s string) OrderType {\n\treturn orderTypeMap[s]\n}",
"func (record *HistoryRecord) TypeFromString(value string) error {\n\trecord.Type = strings.TrimSpace(value)\n\treturn nil\n}",
"func StateTypeForName(name string) StateType {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\tretC := C.atk_state_type_for_name(c_name)\n\tretGo := (StateType)(retC)\n\n\treturn retGo\n}",
"func PinTypeFromString(str string) PinType {\n\tswitch str {\n\tcase \"pin\":\n\t\treturn DataType\n\tcase \"meta-pin\":\n\t\treturn MetaType\n\tcase \"clusterdag-pin\":\n\t\treturn ClusterDAGType\n\tcase \"shard-pin\":\n\t\treturn ShardType\n\tcase \"all\":\n\t\treturn AllType\n\tdefault:\n\t\treturn BadType\n\t}\n}",
"func LogLevelFromString(c string) LogLevel {\n\tswitch c {\n\tcase \"debug\":\n\t\treturn LogLevel_debug\n\tcase \"info\":\n\t\treturn LogLevel_info\n\tcase \"warn\":\n\t\treturn LogLevel_warn\n\tcase \"error\":\n\t\treturn LogLevel_error\n\n\tdefault:\n\t\treturn 0\n\t}\n}",
"func typeOf(value interface{}) (string, error) {\n\tif value == nil {\n\t\treturn \"\", nil\n\t}\n\to := strings.ToLower(reflect.TypeOf(value).Kind().String())\n\n\tswitch {\n\tcase strings.Contains(o, \"int\"):\n\t\treturn \"integer\", nil\n\tcase strings.Contains(o, \"float\"):\n\t\treturn \"number\", nil\n\tcase strings.Contains(o, \"string\"):\n\t\treturn \"string\", nil\n\tcase strings.Contains(o, \"bool\"):\n\t\treturn \"boolean\", nil\n\tcase strings.Contains(o, \"array\"), strings.Contains(o, \"slice\"):\n\t\treturn \"array\", nil\n\tcase strings.Contains(o, \"map\"):\n\t\treturn \"object\", nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"invalid type for value: %v, type: %v\", value, o)\n\t}\n}",
"func NewDepositTypeEnumFromValue(v string) (*DepositTypeEnum, error) {\n\tev := DepositTypeEnum(v)\n\tif ev.IsValid() {\n\t\treturn &ev, nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"invalid value '%v' for DepositTypeEnum: valid values are %v\", v, allowedDepositTypeEnumEnumValues)\n\t}\n}",
"func TypeToName(typ TypeEnum) string {\n\tif name, found := enumNames[typ]; found {\n\t\treturn name\n\t}\n\treturn \"\"\n}",
"func Type(raw string) ast.Type {\n\tmatch := matchRawType.FindAllStringSubmatch(raw, -1)\n\tif match == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\ttokenOpenList = match[0][1]\n\t\ttokenName = match[0][2]\n\t\ttokenRequired = match[0][3]\n\t\ttokenCloseList = match[0][4]\n\t\ttokenListRequired = match[0][5]\n\t)\n\n\t// an unmatched open or close\n\tif len(tokenOpenList) != len(tokenCloseList) {\n\t\treturn nil\n\t}\n\n\tvar node ast.Type = NamedType(tokenName)\n\n\tif tokenRequired == \"!\" {\n\t\tnode = AsNonNull(node)\n\t}\n\tif tokenOpenList == \"[\" && tokenCloseList == \"]\" {\n\t\tnode = AsList(node)\n\t\tif tokenListRequired == \"!\" {\n\t\t\tnode = AsNonNull(node)\n\t\t}\n\t}\n\n\treturn node\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewLogger_append_Params creates a new list of Logger_append_Params.
|
func NewLogger_append_Params_List(s *capnp.Segment, sz int32) (Logger_append_Params_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)
return Logger_append_Params_List{l}, err
}
|
[
"func NewApp_logLevel_Params_List(s *capnp.Segment, sz int32) (App_logLevel_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_logLevel_Params_List{l}, err\n}",
"func NewLogParams() *LogParams {\n\treturn &LogParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewService_logLevel_Params_List(s *capnp.Segment, sz int32) (Service_logLevel_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn Service_logLevel_Params_List{l}, err\n}",
"func NewAddParams() *AddParams {\n\tvar ()\n\treturn &AddParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func AppendToStepParams(stepParams map[string]string, incomingOutputVars map[string]map[string]string) map[string]string {\n\tfor stepName, stepOutputMap := range incomingOutputVars {\n\t\tfor stepOutputVarKey, stepOutputVarValue := range stepOutputMap {\n\t\t\tkey := fmt.Sprintf(\"%s-%s\", stepName, stepOutputVarKey)\n\t\t\tstepParams[key] = stepOutputVarValue\n\t\t}\n\t}\n\treturn stepParams\n}",
"func (p Params) Append(name, val string) {\n\tjs.Value(p).Call(\"append\", name, val)\n}",
"func (s *RPC) AddLogger(l logger.LogReceiver) {\n\ts.logReceiver.Add(l)\n}",
"func (list *List) AppendNew(values ...string) {\n\tfor _, value := range values {\n\t\tif !list.Exist(value) {\n\t\t\tlist.Values = append(list.Values, value)\n\t\t}\n\t}\n}",
"func (c *XHttp) AddParams(paramMap map[string]string) *XHttp {\n\tif c.params == nil {\n\t\tc.params = make(map[string]string)\n\t}\n\tfor k, v := range paramMap {\n\t\tc.params[k] = v\n\t}\n\treturn c\n}",
"func (kpars Kparams) Append(name string, typ kparams.Type, value kparams.Value, opts ...ParamOption) Kparams {\n\tkpars[name] = NewKparam(name, typ, value, opts...)\n\treturn kpars\n}",
"func (pp Parameters) Append(ps ...Parameter) Parameters {\n\tfor _, p := range ps {\n\t\tp.Key = strings.ToUpper(p.Key)\n\t\tpp = append(pp.RemoveByKey(p.Key), p)\n\t}\n\treturn pp\n}",
"func newLoggers(prefix string) (debugL, infoL, warnL, errorL *log.Logger) {\n\tif *veryverbose {\n\t\t*verbose = true\n\t}\n\tdebugH := ioutil.Discard\n\tinfoH := ioutil.Discard\n\tif *verbose {\n\t\tinfoH = os.Stdout\n\t}\n\tif *veryverbose {\n\t\tdebugH = os.Stdout\n\t}\n\tdebugL = log.New(debugH, \"DEBU: \"+prefix, log.Ldate|log.Lmicroseconds)\n\tinfoL = log.New(infoH, \"INFO: \"+prefix, log.Ldate|log.Lmicroseconds)\n\twarnL = log.New(os.Stdout, \"WARN: \"+prefix, log.Ldate|log.Lmicroseconds)\n\terrorL = log.New(os.Stdout, \"ERRO: \"+prefix, log.Ldate|log.Lmicroseconds)\n\treturn\n}",
"func NewDownloadLogParams() *DownloadLogParams {\n\tvar ()\n\treturn &DownloadLogParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (ctx *CTX) AddParams(params map[string]string) {\n\tif ctx.Params == nil {\n\t\tctx.Params = make(map[string]string)\n\t}\n\n\tfor k, v := range params {\n\t\tctx.Params[k] = v\n\t}\n\n\treturn\n}",
"func addUrlParms(p *url.Values, field string, values []string) {\n\tfor _, v := range values {\n\t\tp.Add(field, v)\n\t}\n}",
"func (l *Factory) appendTags(tags map[string]string) map[string]string {\n\tnewTags := make(map[string]string)\n\tfor k, v := range l.tags {\n\t\tnewTags[k] = v\n\t}\n\tfor k, v := range tags {\n\t\tnewTags[k] = v\n\t}\n\treturn newTags\n}",
"func addParamsList(params map[string]string, label string, ids []string) {\n\tfor i, id := range ids {\n\t\tparams[label+\".\"+strconv.Itoa(i+1)] = id\n\t}\n}",
"func NewAdminChatHistoryParams() *AdminChatHistoryParams {\n\tvar ()\n\treturn &AdminChatHistoryParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (l *Logger) Append(data interface{}) error {\n\treturn l.store.C(data)\n}",
"func NewAddLogMessageParams() *AddLogMessageParams {\n\tvar ()\n\treturn &AddLogMessageParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewLogger_append_Results creates a new list of Logger_append_Results.
|
func NewLogger_append_Results_List(s *capnp.Segment, sz int32) (Logger_append_Results_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)
return Logger_append_Results_List{l}, err
}
|
[
"func appendResult(results []apierrors.ManifestResult, r apierrors.ManifestResult) []apierrors.ManifestResult {\n\tresultIdx := -1\n\tfor i, result := range results {\n\t\tif result.Name == r.Name {\n\t\t\tresultIdx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif resultIdx < 0 {\n\t\tresults = append(results, r)\n\t} else {\n\t\tresults[resultIdx].Add(r.Errors...)\n\t\tresults[resultIdx].Add(r.Warnings...)\n\t}\n\n\treturn results\n}",
"func NewApp_logLevel_Results_List(s *capnp.Segment, sz int32) (App_logLevel_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_logLevel_Results_List{l}, err\n}",
"func (m *ScanModel) Append(results ...File) {\n\tif len(results) == 0 {\n\t\treturn\n\t}\n\n\tm.mutex.Lock()\n\tstart := len(m.files)\n\tm.files = append(m.files, results...)\n\tend := len(m.files) - 1\n\tm.mutex.Unlock()\n\n\tif start <= end {\n\t\tm.PublishRowsInserted(start, end)\n\t}\n}",
"func addResults(queryType string, cumulativeResults, newResults Results) (Results, int, error) {\n\n\tvar size int\n\tswitch queryType {\n\tcase types.QtAdminVappTemplate:\n\t\tcumulativeResults.Results.AdminVappTemplateRecord = append(cumulativeResults.Results.AdminVappTemplateRecord, newResults.Results.AdminVappTemplateRecord...)\n\t\tsize = len(newResults.Results.AdminVappTemplateRecord)\n\tcase types.QtVappTemplate:\n\t\tsize = len(newResults.Results.VappTemplateRecord)\n\t\tcumulativeResults.Results.VappTemplateRecord = append(cumulativeResults.Results.VappTemplateRecord, newResults.Results.VappTemplateRecord...)\n\tcase types.QtCatalogItem:\n\t\tcumulativeResults.Results.CatalogItemRecord = append(cumulativeResults.Results.CatalogItemRecord, newResults.Results.CatalogItemRecord...)\n\t\tsize = len(newResults.Results.CatalogItemRecord)\n\tcase types.QtAdminCatalogItem:\n\t\tcumulativeResults.Results.AdminCatalogItemRecord = append(cumulativeResults.Results.AdminCatalogItemRecord, newResults.Results.AdminCatalogItemRecord...)\n\t\tsize = len(newResults.Results.AdminCatalogItemRecord)\n\tcase types.QtMedia:\n\t\tcumulativeResults.Results.MediaRecord = append(cumulativeResults.Results.MediaRecord, newResults.Results.MediaRecord...)\n\t\tsize = len(newResults.Results.MediaRecord)\n\tcase types.QtAdminMedia:\n\t\tcumulativeResults.Results.AdminMediaRecord = append(cumulativeResults.Results.AdminMediaRecord, newResults.Results.AdminMediaRecord...)\n\t\tsize = len(newResults.Results.AdminMediaRecord)\n\tcase types.QtCatalog:\n\t\tcumulativeResults.Results.CatalogRecord = append(cumulativeResults.Results.CatalogRecord, newResults.Results.CatalogRecord...)\n\t\tsize = len(newResults.Results.CatalogRecord)\n\tcase types.QtAdminCatalog:\n\t\tcumulativeResults.Results.AdminCatalogRecord = append(cumulativeResults.Results.AdminCatalogRecord, newResults.Results.AdminCatalogRecord...)\n\t\tsize = len(newResults.Results.AdminCatalogRecord)\n\tcase types.QtOrgVdcNetwork:\n\t\tcumulativeResults.Results.OrgVdcNetworkRecord = append(cumulativeResults.Results.OrgVdcNetworkRecord, newResults.Results.OrgVdcNetworkRecord...)\n\t\tsize = len(newResults.Results.OrgVdcNetworkRecord)\n\tcase types.QtEdgeGateway:\n\t\tcumulativeResults.Results.EdgeGatewayRecord = append(cumulativeResults.Results.EdgeGatewayRecord, newResults.Results.EdgeGatewayRecord...)\n\t\tsize = len(newResults.Results.EdgeGatewayRecord)\n\tcase types.QtVm:\n\t\tcumulativeResults.Results.VMRecord = append(cumulativeResults.Results.VMRecord, newResults.Results.VMRecord...)\n\t\tsize = len(newResults.Results.VMRecord)\n\tcase types.QtAdminVm:\n\t\tcumulativeResults.Results.AdminVMRecord = append(cumulativeResults.Results.AdminVMRecord, newResults.Results.AdminVMRecord...)\n\t\tsize = len(newResults.Results.AdminVMRecord)\n\tcase types.QtVapp:\n\t\tcumulativeResults.Results.VAppRecord = append(cumulativeResults.Results.VAppRecord, newResults.Results.VAppRecord...)\n\t\tsize = len(newResults.Results.VAppRecord)\n\tcase types.QtAdminVapp:\n\t\tcumulativeResults.Results.AdminVAppRecord = append(cumulativeResults.Results.AdminVAppRecord, newResults.Results.AdminVAppRecord...)\n\t\tsize = len(newResults.Results.AdminVAppRecord)\n\tcase types.QtOrgVdc:\n\t\tcumulativeResults.Results.OrgVdcRecord = append(cumulativeResults.Results.OrgVdcRecord, newResults.Results.OrgVdcRecord...)\n\t\tsize = len(newResults.Results.OrgVdcRecord)\n\tcase types.QtAdminOrgVdc:\n\t\tcumulativeResults.Results.OrgVdcAdminRecord = append(cumulativeResults.Results.OrgVdcAdminRecord, newResults.Results.OrgVdcAdminRecord...)\n\t\tsize = len(newResults.Results.OrgVdcAdminRecord)\n\tcase types.QtTask:\n\t\tcumulativeResults.Results.TaskRecord = append(cumulativeResults.Results.TaskRecord, newResults.Results.TaskRecord...)\n\t\tsize = len(newResults.Results.TaskRecord)\n\tcase types.QtAdminTask:\n\t\tcumulativeResults.Results.AdminTaskRecord = append(cumulativeResults.Results.AdminTaskRecord, newResults.Results.AdminTaskRecord...)\n\t\tsize = len(newResults.Results.AdminTaskRecord)\n\tcase types.QtNetworkPool:\n\t\tcumulativeResults.Results.NetworkPoolRecord = append(cumulativeResults.Results.NetworkPoolRecord, newResults.Results.NetworkPoolRecord...)\n\t\tsize = len(newResults.Results.NetworkPoolRecord)\n\tcase types.QtProviderVdcStorageProfile:\n\t\tcumulativeResults.Results.ProviderVdcStorageProfileRecord = append(cumulativeResults.Results.ProviderVdcStorageProfileRecord, newResults.Results.ProviderVdcStorageProfileRecord...)\n\t\tsize = len(newResults.Results.ProviderVdcStorageProfileRecord)\n\tcase types.QtResourcePool:\n\t\tcumulativeResults.Results.ResourcePoolRecord = append(cumulativeResults.Results.ResourcePoolRecord, newResults.Results.ResourcePoolRecord...)\n\t\tsize = len(newResults.Results.ResourcePoolRecord)\n\n\tdefault:\n\t\treturn Results{}, 0, fmt.Errorf(\"query type %s not supported\", queryType)\n\t}\n\n\treturn cumulativeResults, size, nil\n}",
"func (b *defaultBuilder) WithResults(results []Result) Builder {\n\tfor _, res := range results {\n\t\tif res == nil {\n\t\t\tcontinue\n\t\t}\n\t\tb.results[res.ID()] = res\n\t}\n\n\treturn b\n}",
"func WriteResults(a ...interface{}) {\n\twriteMessage(levelResults, outWriter, fmt.Sprintln(a...))\n}",
"func NewService_logLevel_Results_List(s *capnp.Segment, sz int32) (Service_logLevel_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn Service_logLevel_Results_List{l}, err\n}",
"func (m *LoggerManager) newLogger(filename string) *logrus.Logger {\n\tl := logrus.New()\n\tl.Out = m.newLogOutput(filename)\n\tl.Level = m.Level\n\tl.Formatter = m.Formatter\n\tif !m.UseStdout {\n\t\t// lumberjack handles write locks\n\t\tl.SetNoLock()\n\t}\n\n\tm.loggers = append(m.loggers, l)\n\treturn l\n}",
"func NewResults(del []*asura.ResponseDeliverTx) AsuraResults {\n\tres := make(AsuraResults, len(del))\n\tfor i, d := range del {\n\t\tres[i] = NewResultFromResponse(d)\n\t}\n\treturn res\n}",
"func (logDB *LoggerDB) ResultLogs(uuid, environment string, seconds int64) ([]OsqueryResultData, error) {\n\tvar logs []OsqueryResultData\n\tminusSeconds := time.Now().Add(time.Duration(-seconds) * time.Second)\n\tif err := logDB.Database.Conn.Where(\"uuid = ? AND environment = ?\", strings.ToUpper(uuid), environment).Where(\"created_at > ?\", minusSeconds).Find(&logs).Error; err != nil {\n\t\treturn logs, err\n\t}\n\treturn logs, nil\n}",
"func newAppendEntriesResponse(term uint64, success bool, index uint64, commitIndex uint64) *AppendEntriesResponse {\n\treturn &AppendEntriesResponse{\n\t\tTerm: term,\n\t\tSuccess: success,\n\t\tIndex: index,\n\t\tCommitIndex: commitIndex,\n\t}\n}",
"func addToLog(n int, term int, s *[]LogEntry) {\n\tfor i := 0; i < n; i++ {\n\t\t*s = append(*s, LogEntry{Term: term, Command: term})\n\t}\n}",
"func MergeLogEntries(organizationID string, from int64, to int64, entries LogEntries, errorIds []string) *grpc_unified_logging_go.LogResponseList {\n\n\t// responses is an array of responses (all messages group by serviceInstanceID)\n\tresponses := make([]*grpc_unified_logging_go.LogResponse, 0)\n\tnextIndex := 0\n\t// aux stores the index where the messages of a serviceInstanceID are stored\n\t// is indexed by Instance+InstanceID\n\t// The reason for implementing it in this way is because we will have the logReponses stored in an array (as we have to return them)\n\tmapIndex := make(map[string]int, 0)\n\tfor _, entry := range entries {\n\t\tpk := getLogEntryPK(*entry)\n\n\t\t// index is the index where the responses of this entry is stored\n\t\tindex, exists := mapIndex[pk]\n\t\tif !exists {\n\t\t\tmapIndex[pk] = nextIndex\n\t\t\tindex = nextIndex\n\n\t\t\tresponses = append(responses, &grpc_unified_logging_go.LogResponse{\n\t\t\t\tAppDescriptorId: entry.Kubernetes.Labels.AppDescriptorId,\n\t\t\t\tAppDescriptorName: entry.Kubernetes.Labels.AppDescriptorName,\n\t\t\t\tAppInstanceId: entry.Kubernetes.Labels.AppInstanceId,\n\t\t\t\tAppInstanceName: entry.Kubernetes.Labels.AppInstanceName,\n\t\t\t\tServiceGroupId: entry.Kubernetes.Labels.AppServiceGroupId,\n\t\t\t\tServiceGroupName: entry.Kubernetes.Labels.AppServiceGroupName,\n\t\t\t\tServiceGroupInstanceId: entry.Kubernetes.Labels.AppServiceGroupInstanceId,\n\t\t\t\tServiceId: entry.Kubernetes.Labels.AppServiceId,\n\t\t\t\tServiceName: entry.Kubernetes.Labels.AppServiceName,\n\t\t\t\tServiceInstanceId: entry.Kubernetes.Labels.AppServiceInstanceId,\n\t\t\t\tEntries: []*grpc_unified_logging_go.LogEntry{},\n\t\t\t})\n\t\t\t// we point to the next position of the array\n\t\t\tnextIndex++\n\t\t}\n\t\t// add the message\n\t\tresponses[index].Entries = append(responses[index].Entries, &grpc_unified_logging_go.LogEntry{\n\t\t\tTimestamp: entry.Timestamp.UnixNano(),\n\t\t\tMsg: entry.Msg,\n\t\t})\n\n\t}\n\n\treturn &grpc_unified_logging_go.LogResponseList{\n\t\tOrganizationId: organizationID,\n\t\tFrom: from,\n\t\tTo: to,\n\t\tResponses: responses,\n\t\tFailedClusterIds: errorIds,\n\t}\n}",
"func (e *Extension) WriteResults(ctx context.Context, results []distributed.Result) error {\n\tctx, span := traces.StartSpan(ctx)\n\tdefer span.End()\n\n\treturn e.writeResultsWithReenroll(ctx, results, true)\n}",
"func newLoggers(prefix string) (debugL, infoL, warnL, errorL *log.Logger) {\n\tif *veryverbose {\n\t\t*verbose = true\n\t}\n\tdebugH := ioutil.Discard\n\tinfoH := ioutil.Discard\n\tif *verbose {\n\t\tinfoH = os.Stdout\n\t}\n\tif *veryverbose {\n\t\tdebugH = os.Stdout\n\t}\n\tdebugL = log.New(debugH, \"DEBU: \"+prefix, log.Ldate|log.Lmicroseconds)\n\tinfoL = log.New(infoH, \"INFO: \"+prefix, log.Ldate|log.Lmicroseconds)\n\twarnL = log.New(os.Stdout, \"WARN: \"+prefix, log.Ldate|log.Lmicroseconds)\n\terrorL = log.New(os.Stdout, \"ERRO: \"+prefix, log.Ldate|log.Lmicroseconds)\n\treturn\n}",
"func outputResults(upgradeMap map[string][]InspectrResult, webhookID string, withinAlertWindow bool) {\n\tif len(upgradeMap) > 0 || withinAlertWindow {\n\t\tglog.Info(\"latest results: \" + fmt.Sprintf(\"%#v\", upgradeMap))\n\t\tpostResultToSlack(upgradeMap, webhookID)\n\t}\n}",
"func (rs *Results) Add(r Result) {\n\trs.results = append(rs.results, r)\n}",
"func AppendLog(\n\tcommand interface{},\n\tcurrentTerm RaftTerm,\n\tlogs *[]LogEntry,\n\tpersistFn func(logs []LogEntry),\n) {\n\tvar newEntry LogEntry\n\tnewEntry.Command = command\n\tnewEntry.Term = currentTerm\n\n\t*logs = append(*logs, newEntry)\n\n\tif persistFn != nil {\n\t\tpersistFn(*logs)\n\t}\n}",
"func (e *Extension) WriteResults(ctx context.Context, results []distributed.Result) error {\n\treturn e.writeResultsWithReenroll(ctx, results, true)\n}",
"func (self *LdapSearchResult) Append(e LdapEntry) {\n\tself.entries = append(self.entries, e)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewGh sets the gh field to a newly allocated GithubEvent struct, preferring placement in s's segment.
|
func (s IrcBot_noteGhEvent_Params) NewGh() (GithubEvent, error) {
ss, err := NewGithubEvent(s.Struct.Segment())
if err != nil {
return GithubEvent{}, err
}
err = s.Struct.SetPtr(0, ss.Struct.ToPtr())
return ss, err
}
|
[
"func NewGhost(new *ghostpb.Ghost) (ghost *Ghost) {\n\tghost = &Ghost{\n\t\tProto: new,\n\t}\n\n\treturn\n}",
"func (g HgGetterCreator) NewHg(addr, src string) (RepositoryGetter, error) {\n\treturn NewClientModeDirGetter(ClientModeDirGetterCfg{\n\t\tProtocol: \"hg\",\n\t\tUnderlying: &getter.HgGetter{},\n\t\tTmpDir: g.TmpDir,\n\t\tCli: g.Cli,\n\t\tAddr: addr,\n\t\tSrc: src,\n\t})\n}",
"func NewG() *G {\n\tt := newTempG()\n\treturn &G{t}\n}",
"func NewGithubEvent_List(s *capnp.Segment, sz int32) (GithubEvent_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 4}, sz)\n\treturn GithubEvent_List{l}, err\n}",
"func NewGHClient(httpClient *http.Client) *GHClient {\n\tclient := &GHClient{\n\t\tclient: github.NewClient(httpClient),\n\t}\n\tclient.Changes = client.client.PullRequests\n\tclient.Tickets = client.client.Issues\n\tclient.Repositories = client.client.Repositories\n\treturn client\n}",
"func NewGister() (*Gister, error) {\n\tc, err := githubClient()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create github client - %v\", err)\n\t}\n\n\treturn &Gister{\n\t\tclient: c,\n\t\tdescription: \"\",\n\t\tfiles: make([]*GistFile, 0),\n\t\tpublic: true,\n\t}, nil\n}",
"func New(config *Config) *Gogm {\n\treturn &Gogm{\n\t\tconfig,\n\t\tnil}\n}",
"func NewGlogHandler(h Handler) *GlogHandler {\n\treturn &GlogHandler{\n\t\torigin: h,\n\t}\n}",
"func NewGhost(blockID int32, characterID string, replayData []byte) *Ghost {\n\treturn &Ghost{\n\t\tBlockID: blockID,\n\t\tCharacterID: characterID,\n\t\tReplayData: replayData,\n\t\ttimestamp: time.Now(),\n\t}\n}",
"func NewGossip(magic Magic) *Gossip {\n\treturn &Gossip{\n\t\tMagic: magic,\n\t}\n}",
"func newPrnG(seed uint64) *PrnG {\n\treturn &PrnG{s: seed}\n}",
"func NewGHub(client *github.Client, debug bool) *GHub {\n\treturn &GHub{client: client, debug: debug}\n}",
"func NewGopher(userID string, pos Coordinates) Gopher {\n\tangle := RandomAngle()\n\n\treturn Gopher{\n\t\tUserID: userID,\n\t\tAlive: true,\n\t\tEntity: NewEntity(thrustStep, pos.X, pos.Y, 0, 0, angle),\n\t}\n}",
"func New(accessToken string, repo string) (afero.Fs, error) {\n\trepoParts := strings.Split(repo, \"/\")\n\tif len(repoParts) != 2 {\n\t\treturn nil, errors.New(\"invalid repo path, expected owner/repo style\")\n\t}\n\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: accessToken},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\n\tclient := github.NewClient(tc)\n\treturn &githubFS{\n\t\tclient: client,\n\t\trepoOwner: repoParts[0],\n\t\trepoName: repoParts[1],\n\t}, nil\n}",
"func newGache() *gache {\n\tg := &gache{\n\t\texpire: int64(time.Second * 30),\n\t}\n\tfor i := range g.shards {\n\t\tg.shards[i] = new(Map)\n\t}\n\tg.expChan = make(chan string, len(g.shards)*10)\n\treturn g\n}",
"func NewHunk(c *Change, ctxLen int) *Hunk {\n\t// First change; insert leading context\n\t// Populate B since it's necessary to produce header.\n\tsame := intMin(c.A, ctxLen)\n\tif same != intMin(c.B, ctxLen) {\n\t\tpanic(\"before/after diff offsets don't match\")\n\t}\n\tctx := &Change{\n\t\tA: intMax(0, c.A-ctxLen),\n\t\tB: intMax(0, c.B-ctxLen),\n\t\tSame: same,\n\t}\n\treturn &Hunk{\n\t\tchanges: []*Change{ctx, c},\n\t\tctxLen: ctxLen,\n\t}\n}",
"func newGPVTG(s BaseSentence) (GPVTG, error) {\n\tp := newParser(s, PrefixGPVTG)\n\treturn GPVTG{\n\t\tBaseSentence: s,\n\t\tTrueTrack: p.Float64(0, \"true track\"),\n\t\tMagneticTrack: p.Float64(2, \"magnetic track\"),\n\t\tGroundSpeedKnots: p.Float64(4, \"ground speed (knots)\"),\n\t\tGroundSpeedKPH: p.Float64(6, \"ground speed (km/h)\"),\n\t}, p.Err()\n}",
"func NewHgClient() *HgClient {\n\tvar err error\n\tme := new(HgClient)\n\tme.hgServer = exec.Command(\"hg\", \"--config\", \"ui.interactive=False\",\n\t\t\"serve\", \"--cmdserver\", \"pipe\")\n\tme.hgServer.Env = append(os.Environ(), \"HGENCODING=UTF-8\")\n\tme.pipeOut, err = me.hgServer.StdoutPipe()\n\tif err != nil {\n\t\tpanic(throw(\"extractor\", \"NewHgClient: could not connect StdoutPipe: %s\", err))\n\t}\n\tme.pipeIn, err = me.hgServer.StdinPipe()\n\tif err != nil {\n\t\tpanic(throw(\"extractor\", \"NewHgClient: could not connect StdinPipe: %s\", err))\n\t}\n\n\tif err = me.hgServer.Start(); err != nil {\n\t\tpanic(throw(\"extractor\", \"NewHgClient: could not start the Hg Command Server: %s\", err))\n\t}\n\tme.readHelloMessage()\n\treturn me\n}",
"func NewHVSockRegistryEntry(machineName string, purpose HVSockPurpose) (*HVSockRegistryEntry, error) {\n\t// a so-called wildcard entry ... everything from FACB -> 6D3 is MS special sauce\n\t// for a \" linux vm\". this first segment is hexi for the hvsock port number\n\t//00000400-FACB-11E6-BD58-64006A7986D3\n\tport, err := findOpenHVSockPort()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := HVSockRegistryEntry{\n\t\tKeyName: portToKeyName(port),\n\t\tPurpose: purpose,\n\t\tPort: port,\n\t\tMachineName: machineName,\n\t}\n\tif err := r.Add(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &r, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewIrcBot_noteGhEvent_Params creates a new list of IrcBot_noteGhEvent_Params.
|
func NewIrcBot_noteGhEvent_Params_List(s *capnp.Segment, sz int32) (IrcBot_noteGhEvent_Params_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)
return IrcBot_noteGhEvent_Params_List{l}, err
}
|
[
"func (s IrcBot_noteGhEvent_Params) NewGh() (GithubEvent, error) {\n\tss, err := NewGithubEvent(s.Struct.Segment())\n\tif err != nil {\n\t\treturn GithubEvent{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func NewParamsEvent(n string) *ParamsEvent {\n\tp := make(map[string]interface{})\n\te := ParamsEvent{n, false, p} // Propagation never stopped by default\n\treturn &e\n}",
"func NewNrPraiseClickParams() NrPraiseClickParams {\n\tvar ()\n\treturn NrPraiseClickParams{}\n}",
"func UA_EventNotificationList_new() []UA_EventNotificationList {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[47]))[:]).([]UA_EventNotificationList)\n}",
"func NewListRUMEventsOptionalParameters() *ListRUMEventsOptionalParameters {\n\tthis := ListRUMEventsOptionalParameters{}\n\treturn &this\n}",
"func NewTestParams(t *testing.T, vars map[string]string, inv *env.TestInventory) *Params {\n\tind := inv.GetTestIndex(t)\n\tports := env.NewPorts(ind)\n\treturn &Params{\n\t\tVars: vars,\n\t\tPorts: ports,\n\t}\n}",
"func NewPatchEventsEventIDParams() *PatchEventsEventIDParams {\n\tvar ()\n\treturn &PatchEventsEventIDParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetEventEventlistsParams() *GetEventEventlistsParams {\n\tvar ()\n\treturn &GetEventEventlistsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (s Value_Call) NewParams(n int32) (Value_List, error) {\n\tl, err := NewValue_List(capnp.Struct(s).Segment(), n)\n\tif err != nil {\n\t\treturn Value_List{}, err\n\t}\n\terr = capnp.Struct(s).SetPtr(1, l.ToPtr())\n\treturn l, err\n}",
"func NewAddParams() *AddParams {\n\tvar ()\n\treturn &AddParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewIrcBot_noteGhEvent_Results_List(s *capnp.Segment, sz int32) (IrcBot_noteGhEvent_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn IrcBot_noteGhEvent_Results_List{l}, err\n}",
"func newUpdateEvent() *updateEvent {\n\tc := &updateEvent{}\n\tc.RegisterSteps(c.firstStep, c.secondStep, c.thirdStep, c.fourthStep)\n\treturn c\n}",
"func UA_EventFieldList_new() []UA_EventFieldList {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[38]))[:]).([]UA_EventFieldList)\n}",
"func NewDescribeRepoEventsParams() *DescribeRepoEventsParams {\n\tvar ()\n\treturn &DescribeRepoEventsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func newAddEvent() *addEvent {\n\tc := &addEvent{}\n\tc.RegisterSteps(c.firstStep, c.secondStep, c.thirdStep, c.fourthStep, c.fifthStep, c.sixthStep)\n\treturn c\n}",
"func newEvents(parent ulid.I, pb pb.BroadcastEvent) ([]events.Event, error) {\n\tid, err := ulid.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := &Event{id: id, parent: parent, pb: pb}\n\te.pb.Id = e.id[:]\n\te.pb.Parent = e.parent[:]\n\treturn []events.Event{e}, nil\n}",
"func UA_MonitoringParameters_new() []UA_MonitoringParameters {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[63]))[:]).([]UA_MonitoringParameters)\n}",
"func (s Node) NewParameters(n int32) (Node_Parameter_List, error) {\n\tl, err := NewNode_Parameter_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Node_Parameter_List{}, err\n\t}\n\terr = s.Struct.SetPtr(5, l.List.ToPtr())\n\treturn l, err\n}",
"func VideoNotifyEventNew(buf []byte) xgb.Event {\n\tv := VideoNotifyEvent{}\n\tb := 1 // don't read event number\n\n\tv.Reason = buf[b]\n\tb += 1\n\n\tv.Sequence = xgb.Get16(buf[b:])\n\tb += 2\n\n\tv.Time = xproto.Timestamp(xgb.Get32(buf[b:]))\n\tb += 4\n\n\tv.Drawable = xproto.Drawable(xgb.Get32(buf[b:]))\n\tb += 4\n\n\tv.Port = Port(xgb.Get32(buf[b:]))\n\tb += 4\n\n\treturn v\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewIrcBot_noteGhEvent_Results creates a new list of IrcBot_noteGhEvent_Results.
|
func NewIrcBot_noteGhEvent_Results_List(s *capnp.Segment, sz int32) (IrcBot_noteGhEvent_Results_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)
return IrcBot_noteGhEvent_Results_List{l}, err
}
|
[
"func NewResults(del []*asura.ResponseDeliverTx) AsuraResults {\n\tres := make(AsuraResults, len(del))\n\tfor i, d := range del {\n\t\tres[i] = NewResultFromResponse(d)\n\t}\n\treturn res\n}",
"func UA_EventFilterResult_new() []UA_EventFilterResult {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[171]))[:]).([]UA_EventFilterResult)\n}",
"func UA_EventNotificationList_new() []UA_EventNotificationList {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[47]))[:]).([]UA_EventNotificationList)\n}",
"func UA_MonitoredItemCreateResult_new() []UA_MonitoredItemCreateResult {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[55]))[:]).([]UA_MonitoredItemCreateResult)\n}",
"func NewApp_instance_Results_List(s *capnp.Segment, sz int32) (App_instance_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_instance_Results_List{l}, err\n}",
"func UA_MonitoredItemModifyResult_new() []UA_MonitoredItemModifyResult {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[31]))[:]).([]UA_MonitoredItemModifyResult)\n}",
"func UA_AddNodesResult_new() []UA_AddNodesResult {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[34]))[:]).([]UA_AddNodesResult)\n}",
"func NewHLLQueryResults() *HLLQueryResults {\n\tr := &HLLQueryResults{}\n\theader := HLLDataHeader\n\tr.buffer.Write((*(*[4]byte)(unsafe.Pointer(&header)))[:])\n\t// Padding.\n\tvar bs [4]byte\n\tr.buffer.Write(bs[:])\n\treturn r\n}",
"func newBallotResults() ballotResults {\n\treturn ballotResults{\n\t\taddrs: make(map[string]string, 40960),\n\t\treplies: make(map[string]ticketvote.CastVoteReply, 40960),\n\t}\n}",
"func NewApp_startedOn_Results_List(s *capnp.Segment, sz int32) (App_startedOn_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_startedOn_Results_List{l}, err\n}",
"func NewFraugsterResults() *FraugsterResults {\n\treturn &FraugsterResults{}\n}",
"func NewOutputs(taskID string, attempt uint32, f []*tes.OutputFileLog) *Event {\n\treturn &Event{\n\t\tId: taskID,\n\t\tTimestamp: time.Now().Format(time.RFC3339Nano),\n\t\tType: Type_TASK_OUTPUTS,\n\t\tAttempt: attempt,\n\t\tData: &Event_Outputs{\n\t\t\tOutputs: &Outputs{\n\t\t\t\tValue: f,\n\t\t\t},\n\t\t},\n\t}\n}",
"func NewAppHooks_restore_Results_List(s *capnp.Segment, sz int32) (AppHooks_restore_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn capnp.StructList[AppHooks_restore_Results](l), err\n}",
"func (r *Results) NewRow() {\n\tr.Rows = append(r.Rows, []string{})\n}",
"func NewApp_releaseId_Results_List(s *capnp.Segment, sz int32) (App_releaseId_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_releaseId_Results_List{l}, err\n}",
"func UA_RepublishResponse_new() []UA_RepublishResponse {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[158]))[:]).([]UA_RepublishResponse)\n}",
"func NewApp_kill_Results_List(s *capnp.Segment, sz int32) (App_kill_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_kill_Results_List{l}, err\n}",
"func NewApp_logLevel_Results_List(s *capnp.Segment, sz int32) (App_logLevel_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_logLevel_Results_List{l}, err\n}",
"func NewBackend_getGrain_Results_List(s *capnp.Segment, sz int32) (Backend_getGrain_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn capnp.StructList[Backend_getGrain_Results](l), err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_What creates a new list of Builder_What.
|
func NewBuilder_What_List(s *capnp.Segment, sz int32) (Builder_What_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz)
return Builder_What_List{l}, err
}
|
[
"func NewBuilder() ListBuilder {\n\treturn &listBuilder{\n\t\telements: make(map[string]bool),\n\t}\n}",
"func BuilderNew() (*Builder, error) {\n\tc := C.gtk_builder_new()\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn &Builder{obj}, nil\n}",
"func CreateBuilder(\n\tnumberOfInputs int,\n\tlayerSizes []int,\n\tchar *neural.CellCharacter,\n\tgoalPosition *model.Vec,\n\tstartPosition model.Vec,\n\tboundryChecker CheckBoundry) (builder Builder) {\n\n\t//builder := new(Builder)\n\tbuilder.numberOfInputs = numberOfInputs\n\tbuilder.layerSizes = layerSizes\n\tbuilder.characteristics = char\n\tbuilder.goalPosition = goalPosition\n\tbuilder.startPosition = startPosition\n\tbuilder.boundryChecker = boundryChecker\n\treturn\n}",
"func NewBuilder() *Builder {\n\treturn &Builder{\n\t\tembed: &discordgo.MessageEmbed{\n\t\t\tTimestamp: time.Now().Format(time.RFC3339),\n\t\t\tColor: 0x439ef1,\n\t\t},\n\t}\n}",
"func NewBuilder() Builder {\n\treturn &builder{\n\t\tproduct: make(map[string]string),\n\t\tallFieldsPresent: true,\n\t}\n}",
"func newCheckListItemDefinitionMutation(c config, op Op, opts ...checklistitemdefinitionOption) *CheckListItemDefinitionMutation {\n\tm := &CheckListItemDefinitionMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeCheckListItemDefinition,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func NewBuilder() *Builder {\n\treturn &Builder{\n\t\tstate: map[string]string{},\n\t}\n}",
"func NewBuilder(table string) *Builder {\n\tb := Builder{\n\t\ttable: table,\n\t\tprocessor: &BuildProcessorDefault{},\n\t\tallowemptywhere: false,\n\t}\n\tb.fields = list.New()\n\treturn &b\n}",
"func NewBuilder(desc *descpb.DatabaseDescriptor) DatabaseDescriptorBuilder {\n\treturn &databaseDescriptorBuilder{\n\t\toriginal: protoutil.Clone(desc).(*descpb.DatabaseDescriptor),\n\t}\n}",
"func New() *BuildHelp {\n\treturn &BuildHelp{Builder: *gtk.NewBuilder()}\n}",
"func NewBuilder() *Builder {\n\treturn &Builder{email: email{}}\n}",
"func New(builder ListItemBuilder, paddingY, wantX, wantY int) (*List, error) {\n\tl := List{\n\t\tdata: EmptyList{},\n\t\tbuilder: builder,\n\t}\n\tvar err error\n\n\tif l.box, err = gtk.BoxNew(gtk.ORIENTATION_VERTICAL, paddingY); err != nil {\n\t\treturn nil, err\n\t}\n\tl.box.SetHExpand(true)\n\tl.box.SetVExpand(true)\n\tl.box.SetSizeRequest(wantX, wantY)\n\n\tif l.scroll, err = gtk.ScrolledWindowNew(nil, nil); err != nil {\n\t\treturn nil, err\n\t}\n\tl.scroll.Add(l.box)\n\n\treturn &l, nil\n}",
"func newCheckListItemMutation(c config, op Op, opts ...checklistitemOption) *CheckListItemMutation {\n\tm := &CheckListItemMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeCheckListItem,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func NewPeopleRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*PeopleRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewPeopleRequestBuilderInternal(urlParams, requestAdapter)\n}",
"func New() *CList { return newWithMax(MaxLength) }",
"func (*Builders) ListBuilders(ctx context.Context, req *pb.ListBuildersRequest) (*pb.ListBuildersResponse, error) {\n\tif err := validateListBuildersReq(ctx, req); err != nil {\n\t\treturn nil, appstatus.BadRequest(err)\n\t}\n\n\t// Parse the cursor from the page token.\n\tcur, err := listBuildersCursorVault.Cursor(ctx, req.PageToken)\n\tswitch err {\n\tcase pagination.ErrInvalidPageToken:\n\t\treturn nil, appstatus.BadRequest(err)\n\tcase nil:\n\t\t// continue\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t// ACL checks.\n\tvar key *datastore.Key\n\tvar allowedBuckets []string\n\tif req.Bucket == \"\" {\n\t\tif req.Project != \"\" {\n\t\t\tkey = model.ProjectKey(ctx, req.Project)\n\t\t}\n\n\t\tvar err error\n\t\tif allowedBuckets, err = perm.BucketsByPerm(ctx, bbperms.BuildersList, req.Project); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tkey = model.BucketKey(ctx, req.Project, req.Bucket)\n\n\t\tif err := perm.HasInBucket(ctx, bbperms.BuildersList, req.Project, req.Bucket); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tallowedBuckets = []string{protoutil.FormatBucketID(req.Project, req.Bucket)}\n\t}\n\n\t// Fetch the builders.\n\tq := datastore.NewQuery(model.BuilderKind).Ancestor(key).Start(cur)\n\tbuilders, nextCursor, err := fetchBuilders(ctx, q, allowedBuckets, req.PageSize)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to fetch builders\").Err()\n\t}\n\n\t// Generate the next page token.\n\tnextPageToken, err := listBuildersCursorVault.PageToken(ctx, nextCursor)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compose the response.\n\tres := &pb.ListBuildersResponse{\n\t\tBuilders: make([]*pb.BuilderItem, len(builders)),\n\t\tNextPageToken: nextPageToken,\n\t}\n\tfor i, b := range builders {\n\t\tres.Builders[i] = &pb.BuilderItem{\n\t\t\tId: &pb.BuilderID{\n\t\t\t\tProject: b.Parent.Parent().StringID(),\n\t\t\t\tBucket: b.Parent.StringID(),\n\t\t\t\tBuilder: b.ID,\n\t\t\t},\n\t\t\tConfig: b.Config,\n\t\t}\n\t}\n\treturn res, nil\n}",
"func NewBuilder(sender *host.Host) Builder {\n\tcb := Builder{}\n\tcb.actions = append(cb.actions, func(packet *Packet) {\n\t\tpacket.Sender = sender\n\t\tpacket.RemoteAddress = sender.Address.String()\n\t})\n\treturn cb\n}",
"func NewBuilder(scopes ...string) (*builder, error) {\n\tif len(scopes) == 0 {\n\t\tscopes = []string{di.App, di.Request, di.SubRequest}\n\t}\n\tb, err := di.NewBuilder(scopes...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create di.Builder: %v\", err)\n\t}\n\tprovider := &providerPkg.ProviderObject{}\n\tif err := provider.Load(); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load definitions with the Provider (ProviderObject from gitlab.com/igor.tumanov1/theboatscom/di/sarulabsdingo/container): %v\", err)\n\t}\n\tfor _, d := range getDiDefs(provider) {\n\t\tif err := b.Add(d); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not add di.Def in di.Builder: %v\", err)\n\t\t}\n\t}\n\treturn &builder{builder: b}, nil\n}",
"func NewItemListRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ItemListRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewItemListRequestBuilderInternal(urlParams, requestAdapter)\n}",
"func Builder(\n\tallocid *field.AllocIDField,\n\talloctranstype *field.AllocTransTypeField,\n\talloctype *field.AllocTypeField,\n\tallocnoorderstype *field.AllocNoOrdersTypeField,\n\tside *field.SideField,\n\tquantity *field.QuantityField,\n\tavgpx *field.AvgPxField,\n\ttradedate *field.TradeDateField) MessageBuilder {\n\tvar builder MessageBuilder\n\tbuilder.MessageBuilder = quickfix.NewMessageBuilder()\n\tbuilder.Header().Set(field.NewBeginString(fix.BeginString_FIX44))\n\tbuilder.Header().Set(field.NewMsgType(\"J\"))\n\tbuilder.Body().Set(allocid)\n\tbuilder.Body().Set(alloctranstype)\n\tbuilder.Body().Set(alloctype)\n\tbuilder.Body().Set(allocnoorderstype)\n\tbuilder.Body().Set(side)\n\tbuilder.Body().Set(quantity)\n\tbuilder.Body().Set(avgpx)\n\tbuilder.Body().Set(tradedate)\n\treturn builder\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_Capability creates a new list of Builder_Capability.
|
func NewBuilder_Capability_List(s *capnp.Segment, sz int32) (Builder_Capability_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)
return Builder_Capability_List{l}, err
}
|
[
"func NewCreateCapabilityTooManyRequests() *CreateCapabilityTooManyRequests {\n\treturn &CreateCapabilityTooManyRequests{}\n}",
"func newCapabilityWrite(length int) (string, *CapabilityWrite) {\n\tw := &CapabilityWrite{}\n\tw.handlerName = generateHandlerName() + `_w`\n\tw.Input = make(chan msg.Request, length)\n\tw.Shutdown = make(chan struct{})\n\treturn w.handlerName, w\n}",
"func NewBuilder() ListBuilder {\n\treturn &listBuilder{\n\t\telements: make(map[string]bool),\n\t}\n}",
"func NewPrinterCapabilities()(*PrinterCapabilities) {\n m := &PrinterCapabilities{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}",
"func NewBuilder() Builder {\n\treturn &builder{\n\t\tproduct: make(map[string]string),\n\t\tallFieldsPresent: true,\n\t}\n}",
"func NewCreateCapabilityForbidden() *CreateCapabilityForbidden {\n\treturn &CreateCapabilityForbidden{}\n}",
"func UA_BrowseDescription_new() []UA_BrowseDescription {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[133]))[:]).([]UA_BrowseDescription)\n}",
"func NewBattery(no_of_columns int) *Battery {\n\tbattery := new(Battery)\n\tbattery.no_of_columns = 4\n\tfor index := 0; index < battery.no_of_columns; index++ {\n\t\tcolumn := NewColumn(index)\n\t\tbattery.column_list = append(battery.column_list, *column)\n\t}\n\treturn battery\n}",
"func NewBackend_List(s *capnp.Segment, sz int32) (Backend_List, error) {\n\tl, err := capnp.NewPointerList(s, sz)\n\treturn capnp.CapList[Backend](l), err\n}",
"func NewCreateCapabilityDefault(code int) *CreateCapabilityDefault {\n\treturn &CreateCapabilityDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (api *API) AddCapability(conf CapConfig) ([]Capability, error) {\n\tvar (\n\t\tvalues = url.Values{}\n\t\tret = []Capability{}\n\t\terrs []error\n\t)\n\n\tif conf.UID == \"\" {\n\t\treturn nil, errors.New(\"UID field is required\")\n\t}\n\tif conf.UserCaps == \"\" {\n\t\treturn nil, errors.New(\"UserCaps field is required\")\n\t}\n\n\tvalues, errs = encurl.Translate(conf)\n\tif len(errs) > 0 {\n\t\treturn nil, errs[0]\n\t}\n\tvalues.Add(\"format\", \"json\")\n\tbody, _, err := api.call(\"PUT\", \"/user\", values, true, \"caps\")\n\tif err = json.Unmarshal(body, &ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, err\n}",
"func NewBrand_Binding_List(s *capnp.Segment, sz int32) (Brand_Binding_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn Brand_Binding_List{l}, err\n}",
"func BuilderNew() (*Builder, error) {\n\tc := C.gtk_builder_new()\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn &Builder{obj}, nil\n}",
"func NewCapabilityComponentDef(componentDefinition *v1beta1.ComponentDefinition) CapabilityComponentDefinition {\n\tvar def CapabilityComponentDefinition\n\tdef.Name = componentDefinition.Name\n\tif componentDefinition.Spec.Workload.Definition == (commontypes.WorkloadGVK{}) && componentDefinition.Spec.Workload.Type != \"\" {\n\t\tdef.WorkloadType = util.ReferWorkload\n\t\tdef.WorkloadDefName = componentDefinition.Spec.Workload.Type\n\t}\n\tif componentDefinition.Spec.Schematic != nil {\n\t\tif componentDefinition.Spec.Schematic.HELM != nil {\n\t\t\tdef.WorkloadType = util.HELMDef\n\t\t\tdef.Helm = componentDefinition.Spec.Schematic.HELM\n\t\t}\n\t\tif componentDefinition.Spec.Schematic.KUBE != nil {\n\t\t\tdef.WorkloadType = util.KubeDef\n\t\t\tdef.Kube = componentDefinition.Spec.Schematic.KUBE\n\t\t}\n\t\tif componentDefinition.Spec.Schematic.Terraform != nil {\n\t\t\tdef.WorkloadType = util.TerraformDef\n\t\t\tdef.Terraform = componentDefinition.Spec.Schematic.Terraform\n\t\t}\n\t}\n\tdef.ComponentDefinition = *componentDefinition.DeepCopy()\n\treturn def\n}",
"func (c *Client) Capability() error {\n\tmsg, err := c.cmd(\"CAPABILITY\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tcapability := make(map[string]string)\n\tcapabilityList := strings.Split(msg, \" \")\n\tif len(capabilityList) > 0 {\n\t\tfor _, c := range capabilityList[2:] {\n\t\t\tcapability[c] = \"\"\n\t\t}\n\t}\n\tc.capability = capability\n\treturn err\n}",
"func CreateBuilder(\n\tnumberOfInputs int,\n\tlayerSizes []int,\n\tchar *neural.CellCharacter,\n\tgoalPosition *model.Vec,\n\tstartPosition model.Vec,\n\tboundryChecker CheckBoundry) (builder Builder) {\n\n\t//builder := new(Builder)\n\tbuilder.numberOfInputs = numberOfInputs\n\tbuilder.layerSizes = layerSizes\n\tbuilder.characteristics = char\n\tbuilder.goalPosition = goalPosition\n\tbuilder.startPosition = startPosition\n\tbuilder.boundryChecker = boundryChecker\n\treturn\n}",
"func createCapabilityContainer(rc internalapi.RuntimeService, ic internalapi.ImageManagerService, podID string, podConfig *runtimeapi.PodSandboxConfig, prefix string) string {\n\tBy(\"create Capability container\")\n\tcontainerName := prefix + framework.NewUUID()\n\tcontainerConfig := &runtimeapi.ContainerConfig{\n\t\tMetadata: framework.BuildContainerMetadata(containerName, framework.DefaultAttempt),\n\t\tImage: &runtimeapi.ImageSpec{Image: framework.DefaultContainerImage},\n\t\tCommand: []string{\"top\"},\n\t\tLinux: &runtimeapi.LinuxContainerConfig{\n\t\t\tSecurityContext: &runtimeapi.LinuxContainerSecurityContext{\n\t\t\t\tCapabilities: &runtimeapi.Capability{\n\t\t\t\t\tAddCapabilities: []string{\"NET_ADMIN\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn framework.CreateContainer(rc, ic, containerConfig, podID, podConfig)\n}",
"func (s Brand_Scope) NewBind(n int32) (Brand_Binding_List, error) {\n\ts.Struct.SetUint16(8, 0)\n\tl, err := NewBrand_Binding_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Brand_Binding_List{}, err\n\t}\n\terr = s.Struct.SetPtr(0, l.List.ToPtr())\n\treturn l, err\n}",
"func New(capConfig *configs.Capabilities) (*Caps, error) {\n\tvar (\n\t\terr error\n\t\tcaps Caps\n\t)\n\n\tif caps.bounding, err = capSlice(capConfig.Bounding); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.effective, err = capSlice(capConfig.Effective); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.inheritable, err = capSlice(capConfig.Inheritable); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.permitted, err = capSlice(capConfig.Permitted); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.ambient, err = capSlice(capConfig.Ambient); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.pid, err = capability.NewPid2(0); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = caps.pid.Load(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &caps, nil\n}",
"func NewBuilder(scopes ...string) (*builder, error) {\n\tif len(scopes) == 0 {\n\t\tscopes = []string{di.App, di.Request, di.SubRequest}\n\t}\n\tb, err := di.NewBuilder(scopes...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create di.Builder: %v\", err)\n\t}\n\tprovider := &providerPkg.ProviderObject{}\n\tif err := provider.Load(); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load definitions with the Provider (ProviderObject from gitlab.com/igor.tumanov1/theboatscom/di/sarulabsdingo/container): %v\", err)\n\t}\n\tfor _, d := range getDiDefs(provider) {\n\t\tif err := b.Add(d); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not add di.Def in di.Builder: %v\", err)\n\t\t}\n\t}\n\treturn &builder{builder: b}, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_Opts creates a new list of Builder_Opts.
|
func NewBuilder_Opts_List(s *capnp.Segment, sz int32) (Builder_Opts_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)
return Builder_Opts_List{l}, err
}
|
[
"func NewBuilder() ListBuilder {\n\treturn &listBuilder{\n\t\telements: make(map[string]bool),\n\t}\n}",
"func newOptionList(opts []Option) *optionList {\n\toptions := new(optionList)\n\tfor _, opt := range opts {\n\t\topt(options)\n\t}\n\treturn options\n}",
"func BuilderNew() (*Builder, error) {\n\tc := C.gtk_builder_new()\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn &Builder{obj}, nil\n}",
"func NewBuilder(root Node, opts ...BuilderOption) *Builder {\n\tdb := &Builder{Node: root}\n\tfor _, opt := range opts {\n\t\topt(db)\n\t}\n\treturn db\n}",
"func NewOpts() *Opts {\n\treturn &Opts{\n\t\tHomeKubeconfig: os.Getenv(\"KUBECONFIG\"),\n\t\tNodeName: DefaultNodeName,\n\t\tPodName: os.Getenv(\"POD_NAME\"),\n\t\tTenantNamespace: corev1.NamespaceDefault,\n\t\tInformerResyncPeriod: DefaultInformerResyncPeriod,\n\n\t\tDisableIPReflection: false,\n\n\t\tCertificateType: argsutils.NewEnum([]string{CertificateTypeKubelet, CertificateTypeAWS, CertificateTypeSelfSigned}, CertificateTypeKubelet),\n\t\tListenPort: DefaultListenPort,\n\t\tEnableProfiling: false,\n\n\t\tPodWorkers: DefaultPodWorkers,\n\t\tServiceWorkers: DefaultServiceWorkers,\n\t\tEndpointSliceWorkers: DefaultEndpointSliceWorkers,\n\t\tIngressWorkers: DefaultIngressWorkers,\n\t\tConfigMapWorkers: DefaultConfigMapWorkers,\n\t\tSecretWorkers: DefaultSecretWorkers,\n\t\tServiceAccountWorkers: DefaultServiceAccountWorkers,\n\t\tPersistentVolumeClaimWorkers: DefaultPersistenVolumeClaimWorkers,\n\t\tEventWorkers: DefaultEventWorkers,\n\n\t\tLabelsNotReflected: argsutils.StringList{},\n\t\tAnnotationsNotReflected: argsutils.StringList{},\n\n\t\tNodeLeaseDuration: node.DefaultLeaseDuration * time.Second,\n\t\tNodePingInterval: node.DefaultPingInterval,\n\t\tNodePingTimeout: DefaultNodePingTimeout,\n\t\tNodeCheckNetwork: DefaultNodeCheckNetwork,\n\n\t\tVirtualKubeletLeaseEnabled: true,\n\t\tVirtualKubeletLeaseLeaseDuration: 15 * time.Second,\n\t\tVirtualKubeletLeaseRenewDeadline: 10 * time.Second,\n\t\tVirtualKubeletLeaseRetryPeriod: 5 * time.Second,\n\t}\n}",
"func NewBuilder(prefix string, defaults map[string]interface{}) *ConfigBuilder {\n\tprefix = strings.ToUpper(prefix)\n\tobj := make(map[string]interface{})\n\tdef := &ConfigDefault{prefix: prefix, values: defaults, maxRecursion: 5}\n\tconf := &ConfigImpl{values: obj, parent: nil, def: def}\n\tresult := &ConfigBuilder{conf: conf, ignoreMissingFiles: false}\n\n\treturn result\n}",
"func CreateBuilder(\n\tnumberOfInputs int,\n\tlayerSizes []int,\n\tchar *neural.CellCharacter,\n\tgoalPosition *model.Vec,\n\tstartPosition model.Vec,\n\tboundryChecker CheckBoundry) (builder Builder) {\n\n\t//builder := new(Builder)\n\tbuilder.numberOfInputs = numberOfInputs\n\tbuilder.layerSizes = layerSizes\n\tbuilder.characteristics = char\n\tbuilder.goalPosition = goalPosition\n\tbuilder.startPosition = startPosition\n\tbuilder.boundryChecker = boundryChecker\n\treturn\n}",
"func NewOpts(opts shared.Opts) Opts {\n\tc := Opts{\n\t\tOpts: opts,\n\t}\n\n\tif err := env.Parse(&c); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"Unable to unmarshal configuration\")\n\t}\n\n\t// Defaults\n\tif c.MQTTOpts.DiscoveryName == \"\" {\n\t\tc.MQTTOpts.DiscoveryName = \"unifi\"\n\t}\n\n\tif c.MQTTOpts.TopicPrefix == \"\" {\n\t\tc.MQTTOpts.TopicPrefix = \"home/unifi\"\n\t}\n\n\treturn c\n}",
"func NewItems(options ...Option) *ItemsBuilder {\n\tvar lock sync.Locker = &sync.Mutex{}\n\tfor _, option := range options {\n\t\tswitch option.Name() {\n\t\tcase optkeyLocker:\n\t\t\tlock = option.Value().(sync.Locker)\n\t\t}\n\t}\n\tvar b ItemsBuilder\n\tif lock == nil {\n\t\tlock = nilLock{}\n\t}\n\tb.lock = lock\n\tb.target = &items{}\n\treturn &b\n}",
"func NewBuilder() *Builder {\n\treturn &Builder{\n\t\tstate: map[string]string{},\n\t}\n}",
"func NewProposalBuilder(\n\tctx context.Context,\n\tlayerTimer timesync.LayerTimer,\n\tsigner *signing.EdSigner,\n\tvrfSigner *signing.VRFSigner,\n\tatxDB activationDB,\n\tpublisher pubsub.Publisher,\n\tpdb proposalDB,\n\tbbp baseBallotProvider,\n\tbeaconProvider system.BeaconGetter,\n\tsyncer system.SyncStateProvider,\n\tprojector projector,\n\ttxPool txPool,\n\topts ...Opt,\n) *ProposalBuilder {\n\tsctx, cancel := context.WithCancel(ctx)\n\tpb := &ProposalBuilder{\n\t\tlogger: log.NewNop(),\n\t\tcfg: defaultConfig(),\n\t\tctx: sctx,\n\t\tcancel: cancel,\n\t\tsigner: signer,\n\t\tlayerTimer: layerTimer,\n\t\tpublisher: publisher,\n\t\tproposalDB: pdb,\n\t\tbaseBallotProvider: bbp,\n\t\tbeaconProvider: beaconProvider,\n\t\tsyncer: syncer,\n\t\tprojector: projector,\n\t\ttxPool: txPool,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(pb)\n\t}\n\n\tif pb.projector == nil {\n\t\tpb.logger.Panic(\"nil projector\")\n\t}\n\n\tif pb.proposalOracle == nil {\n\t\tpb.proposalOracle = newMinerOracle(pb.cfg.layerSize, pb.cfg.layersPerEpoch, atxDB, vrfSigner, pb.cfg.minerID, pb.logger)\n\t}\n\n\tif pb.refBallotDB == nil {\n\t\tif len(pb.cfg.dbPath) == 0 {\n\t\t\tpb.refBallotDB = database.NewMemDatabase()\n\t\t} else {\n\t\t\tvar err error\n\t\t\tpb.refBallotDB, err = database.NewLDBDatabase(filepath.Join(pb.cfg.dbPath, \"miner\"), 16, 16, pb.logger)\n\t\t\tif err != nil {\n\t\t\t\tpb.logger.With().Panic(\"cannot create miner database\", log.Err(err))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn pb\n}",
"func NewBuilder() Builder {\n\treturn &builder{\n\t\tproduct: make(map[string]string),\n\t\tallFieldsPresent: true,\n\t}\n}",
"func NewBuilder(c Config) (pool.EnvBuilder, map[string]any, error) {\n\tb, err := winc.NewBuilder(\"\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tc.Info(\"created winc builder\")\n\treturn b, map[string]any{}, nil\n}",
"func NewBuilder() credentials.Builder { return &gitConfigBuilder{} }",
"func NewPingOpts(opts ...PingOptModifier) *PingOpt {\n\toptions := DefaultPingOpts()\n\tfor _, o := range opts {\n\t\to(options)\n\t}\n\treturn options\n}",
"func NewBuilder(scopes ...string) (*builder, error) {\n\tif len(scopes) == 0 {\n\t\tscopes = []string{di.App, di.Request, di.SubRequest}\n\t}\n\tb, err := di.NewBuilder(scopes...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create di.Builder: %v\", err)\n\t}\n\tprovider := &providerPkg.ProviderObject{}\n\tif err := provider.Load(); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load definitions with the Provider (ProviderObject from gitlab.com/igor.tumanov1/theboatscom/di/sarulabsdingo/container): %v\", err)\n\t}\n\tfor _, d := range getDiDefs(provider) {\n\t\tif err := b.Add(d); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not add di.Def in di.Builder: %v\", err)\n\t\t}\n\t}\n\treturn &builder{builder: b}, nil\n}",
"func NewToolchainOpts() ToolchainOpts {\n\treturn ToolchainOpts{\n\t\tName: toolchains.Default,\n\t}\n}",
"func CreateOptions(o []DeriveOptionsBuilder) *DeriveOpts {\n\topts := &DeriveOpts{}\n\tapply(opts, o)\n\treturn opts\n}",
"func NewBuilder(table string) *Builder {\n\tb := Builder{\n\t\ttable: table,\n\t\tprocessor: &BuildProcessorDefault{},\n\t\tallowemptywhere: false,\n\t}\n\tb.fields = list.New()\n\treturn &b\n}",
"func NewPlatformOpts(alias string, category map[string]interface{}, radar map[string]interface{}, publicProviderID int, description string) PlatformOpts {\n\tresult := PlatformOpts{\n\t\tPlatformAlias: alias,\n\t\tPlatformDispName: alias,\n\t\tCategory: category,\n\t\tRadarOpts: radar,\n\t\tPublicProviderID: publicProviderID,\n\t\tDescription: description,\n\t}\n\treturn result\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_capabilities_Params creates a new list of Builder_capabilities_Params.
|
func NewBuilder_capabilities_Params_List(s *capnp.Segment, sz int32) (Builder_capabilities_Params_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)
return Builder_capabilities_Params_List{l}, err
}
|
[
"func (s *ConfigurationService) NewListCapabilitiesParams() *ListCapabilitiesParams {\n\tp := &ListCapabilitiesParams{}\n\tp.p = make(map[string]interface{})\n\treturn p\n}",
"func NewPrinterCapabilities()(*PrinterCapabilities) {\n m := &PrinterCapabilities{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}",
"func NewBackend_backupGrain_Params_List(s *capnp.Segment, sz int32) (Backend_backupGrain_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 4}, sz)\n\treturn capnp.StructList[Backend_backupGrain_Params](l), err\n}",
"func UA_MonitoringParameters_new() []UA_MonitoringParameters {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[63]))[:]).([]UA_MonitoringParameters)\n}",
"func NewApp_releaseId_Params_List(s *capnp.Segment, sz int32) (App_releaseId_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_releaseId_Params_List{l}, err\n}",
"func NewBackend_downloadBackup_Params_List(s *capnp.Segment, sz int32) (Backend_downloadBackup_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz)\n\treturn capnp.StructList[Backend_downloadBackup_Params](l), err\n}",
"func NewApp_kill_Params_List(s *capnp.Segment, sz int32) (App_kill_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_kill_Params_List{l}, err\n}",
"func NewCreateCapabilityTooManyRequests() *CreateCapabilityTooManyRequests {\n\treturn &CreateCapabilityTooManyRequests{}\n}",
"func NewAssignable_asSetter_Params_List(s *capnp.Segment, sz int32) (Assignable_asSetter_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn Assignable_asSetter_Params_List{l}, err\n}",
"func NewPrintersItemGetCapabilitiesRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*PrintersItemGetCapabilitiesRequestBuilder) {\n m := &PrintersItemGetCapabilitiesRequestBuilder{\n BaseRequestBuilder: *i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewBaseRequestBuilder(requestAdapter, \"{+baseurl}/print/printers/{printer%2Did}/getCapabilities()\", pathParameters),\n }\n return m\n}",
"func ConstructGetCapabilities(cap uint32, count uint32, property uint32) ([]byte, error) {\n\tcmdHdr, err := MakeCommandHeader(tagNO_SESSIONS, 0, cmdGetCapability)\n\tif err != nil {\n\t\treturn nil, errors.New(\"GetCapability failed\")\n\t}\n\tcap_bytes := []interface{}{&cap, &property, &count}\n\tcmd, _ := packWithHeader(cmdHdr, cap_bytes)\n\treturn cmd, nil\n}",
"func NewBackend_uploadBackup_Params_List(s *capnp.Segment, sz int32) (Backend_uploadBackup_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn capnp.StructList[Backend_uploadBackup_Params](l), err\n}",
"func NewConfigurationManager_updateConfiguration_Params_List(s *capnp.Segment, sz int32) (ConfigurationManager_updateConfiguration_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn ConfigurationManager_updateConfiguration_Params_List{l}, err\n}",
"func MakeControlParameters() *ControlParameters {\n\tc := new(ControlParameters)\n\treturn c\n}",
"func NewBackend_installPackage_Params_List(s *capnp.Segment, sz int32) (Backend_installPackage_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn capnp.StructList[Backend_installPackage_Params](l), err\n}",
"func New(capConfig *configs.Capabilities) (*Caps, error) {\n\tvar (\n\t\terr error\n\t\tcaps Caps\n\t)\n\n\tif caps.bounding, err = capSlice(capConfig.Bounding); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.effective, err = capSlice(capConfig.Effective); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.inheritable, err = capSlice(capConfig.Inheritable); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.permitted, err = capSlice(capConfig.Permitted); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.ambient, err = capSlice(capConfig.Ambient); err != nil {\n\t\treturn nil, err\n\t}\n\tif caps.pid, err = capability.NewPid2(0); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = caps.pid.Load(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &caps, nil\n}",
"func NewApp_service_Params_List(s *capnp.Segment, sz int32) (App_service_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_service_Params_List{l}, err\n}",
"func NewPrintersItemGetCapabilitiesRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*PrintersItemGetCapabilitiesRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewPrintersItemGetCapabilitiesRequestBuilderInternal(urlParams, requestAdapter)\n}",
"func NewApp_instance_Params_List(s *capnp.Segment, sz int32) (App_instance_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_instance_Params_List{l}, err\n}",
"func NewBuilder() ListBuilder {\n\treturn &listBuilder{\n\t\telements: make(map[string]bool),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewResult sets the result field to a newly allocated Builder_Capability_List, preferring placement in s's segment.
|
func (s Builder_capabilities_Results) NewResult(n int32) (Builder_Capability_List, error) {
l, err := NewBuilder_Capability_List(s.Struct.Segment(), n)
if err != nil {
return Builder_Capability_List{}, err
}
err = s.Struct.SetPtr(0, l.List.ToPtr())
return l, err
}
|
[
"func (n *ProcessSizedElementsAndRestrictions) newSplitResult(ctx context.Context, rest any, w []typex.Window, weState any) (*FullValue, error) {\n\tvar size float64\n\tvar err error\n\telm := n.elm.Elm.(*FullValue).Elm\n\tif fv, ok := elm.(*FullValue); ok {\n\t\tsize, err = n.sizeInv.Invoke(ctx, fv, rest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif size < 0 {\n\t\t\terr := errors.Errorf(\"size returned expected to be non-negative but received %v.\", size)\n\t\t\treturn nil, errors.WithContextf(err, \"%v\", n)\n\t\t}\n\t} else {\n\t\tfv := &FullValue{Elm: elm}\n\t\tsize, err = n.sizeInv.Invoke(ctx, fv, rest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif size < 0 {\n\t\t\terr := errors.Errorf(\"size returned expected to be non-negative but received %v.\", size)\n\t\t\treturn nil, errors.WithContextf(err, \"%v\", n)\n\t\t}\n\t}\n\treturn &FullValue{\n\t\tElm: &FullValue{\n\t\t\tElm: elm,\n\t\t\tElm2: &FullValue{\n\t\t\t\tElm: rest,\n\t\t\t\tElm2: weState,\n\t\t\t},\n\t\t},\n\t\tElm2: size,\n\t\tTimestamp: n.elm.Timestamp,\n\t\tWindows: w,\n\t}, nil\n}",
"func newSuccessResult(vres *inventoryviews.SuccessResultView) *SuccessResult {\n\tres := &SuccessResult{}\n\tif vres.OK != nil {\n\t\tres.OK = *vres.OK\n\t}\n\treturn res\n}",
"func (s RegistrationServer_registerConnection_Results) NewResult() (ConnectionResponse, error) {\n\tss, err := NewConnectionResponse(s.Struct.Segment())\n\tif err != nil {\n\t\treturn ConnectionResponse{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func newSuccessResult(vres *productviews.SuccessResultView) *SuccessResult {\n\tres := &SuccessResult{}\n\tif vres.OK != nil {\n\t\tres.OK = *vres.OK\n\t}\n\treturn res\n}",
"func NewAssignable_Setter_set_Results_List(s *capnp.Segment, sz int32) (Assignable_Setter_set_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn Assignable_Setter_set_Results_List{l}, err\n}",
"func NewAssignable_get_Results_List(s *capnp.Segment, sz int32) (Assignable_get_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz)\n\treturn Assignable_get_Results_List{l}, err\n}",
"func NewBackend_installPackage_Results_List(s *capnp.Segment, sz int32) (Backend_installPackage_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn capnp.StructList[Backend_installPackage_Results](l), err\n}",
"func (this *SelectResult) BuildNewResult() (*sqltypes.Result, error) {\n\t//fmt.Println(this.tempRows)\n\terr := this.handleRowsGroupBy()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//sort\n\tthis.sort()\n\t//\n\tthis.optTempFieldsRows()\n\t//\n\tthis.optTempFields()\n\t//\n\tvar rows [][]sqltypes.Value\n\tvar offset, rowcount int64\n\tif this.stmt.Limit != nil && len(this.tempRows) > 0{\n\t\tif this.stmt.Limit.Offset != nil {\n\t\t\ttbufOffset := sqlparser.NewTrackedBuffer(nil)\n\t\t\tthis.stmt.Limit.Offset.Format(tbufOffset)\n\t\t\toffset, err = strconv.ParseInt(tbufOffset.String(), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif this.stmt.Limit.Rowcount != nil {\n\t\t\ttbufRowcount := sqlparser.NewTrackedBuffer(nil)\n\t\t\tthis.stmt.Limit.Rowcount.Format(tbufRowcount)\n\t\t\trowcount, err = strconv.ParseInt(tbufRowcount.String(), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\t//\n\t\trows = make([][]sqltypes.Value,rowcount)\n\t\t//glog.Infof(\"### limit %d,%d\", offset, rowcount)\n\t\tif offset < int64(len(this.tempRows)) && rowcount > 0 {\n\t\t\tif offset+rowcount < int64(len(this.tempRows)) {\n\t\t\t\tcopy(rows, this.tempRows[offset:offset+rowcount])\n\t\t\t} else {\n\t\t\t\trows = make([][]sqltypes.Value,len(this.tempRows) - int(offset))\n\t\t\t\tcopy(rows, this.tempRows[offset:])\n\t\t\t}\n\t\t}\n\t} else {\n\t\trows = this.tempRows\n\t}\n\t//\n\tnewResult := &sqltypes.Result{\n\t\tFields: this.tempFields,\n\t\tRows: rows,\n\t\tRowsAffected: uint64(len(this.tempRows)),\n\t}\n\treturn newResult, nil\n}",
"func NewRegistrationServer_updateLocalConfiguration_Results_List(s *capnp.Segment, sz int32) (RegistrationServer_updateLocalConfiguration_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn RegistrationServer_updateLocalConfiguration_Results_List{l}, err\n}",
"func (s ConfigurationManager_updateConfiguration_Results) NewResult() (UpdateConfigurationResponse, error) {\n\tss, err := NewUpdateConfigurationResponse(s.Struct.Segment())\n\tif err != nil {\n\t\treturn UpdateConfigurationResponse{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func (s TunnelServer_reconnectTunnel_Results) NewResult() (TunnelRegistration, error) {\n\tss, err := NewTunnelRegistration(s.Struct.Segment())\n\tif err != nil {\n\t\treturn TunnelRegistration{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func NewConfigurationManager_updateConfiguration_Results_List(s *capnp.Segment, sz int32) (ConfigurationManager_updateConfiguration_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn ConfigurationManager_updateConfiguration_Results_List{l}, err\n}",
"func NewAssignable_asSetter_Results_List(s *capnp.Segment, sz int32) (Assignable_asSetter_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn Assignable_asSetter_Results_List{l}, err\n}",
"func (s TunnelServer_registerTunnel_Results) NewResult() (TunnelRegistration, error) {\n\tss, err := NewTunnelRegistration(s.Struct.Segment())\n\tif err != nil {\n\t\treturn TunnelRegistration{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func UA_BrowseResult_new() []UA_BrowseResult {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[189]))[:]).([]UA_BrowseResult)\n}",
"func newResult(t reflect.Type, opts resultOptions) (result, error) {\n\tswitch {\n\tcase isIn(t) || (t.Kind() == reflect.Ptr && isIn(t.Elem())) || isEmbed(t, _inPtrType):\n\t\treturn nil, errf(\"cannot provide parameter objects\", \"%v embeds a dig.In\", t)\n\tcase isError(t):\n\t\treturn nil, errf(\"cannot return an error here, return it from the constructor instead\")\n\tcase isOut(t):\n\t\treturn newResultObject(t, opts)\n\tcase isEmbed(t, _outPtrType):\n\t\treturn nil, errf(\n\t\t\t\"cannot build a result object by embedding *dig.Out, embed dig.Out instead\",\n\t\t\t\"%v embeds *dig.Out\", t)\n\tcase t.Kind() == reflect.Ptr && isOut(t.Elem()):\n\t\treturn nil, errf(\n\t\t\t\"cannot return a pointer to a result object, use a value instead\",\n\t\t\t\"%v is a pointer to a struct that embeds dig.Out\", t)\n\tcase len(opts.Group) > 0:\n\t\tg, err := parseGroupString(opts.Group)\n\t\tif err != nil {\n\t\t\treturn nil, errf(\n\t\t\t\t\"cannot parse group %q\", opts.Group, err)\n\t\t}\n\t\trg := resultGrouped{Type: t, Group: g.Name, Flatten: g.Flatten}\n\t\tif g.Flatten {\n\t\t\tif t.Kind() != reflect.Slice {\n\t\t\t\treturn nil, errf(\n\t\t\t\t\t\"flatten can be applied to slices only\",\n\t\t\t\t\t\"%v is not a slice\", t)\n\t\t\t}\n\t\t\trg.Type = rg.Type.Elem()\n\t\t}\n\t\treturn rg, nil\n\tdefault:\n\t\treturn resultSingle{Type: t, Name: opts.Name}, nil\n\t}\n}",
"func (s TunnelServer_getServerInfo_Results) NewResult() (ServerInfo, error) {\n\tss, err := NewServerInfo(s.Struct.Segment())\n\tif err != nil {\n\t\treturn ServerInfo{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func (s TunnelServer_authenticate_Results) NewResult() (AuthenticateResponse, error) {\n\tss, err := NewAuthenticateResponse(s.Struct.Segment())\n\tif err != nil {\n\t\treturn AuthenticateResponse{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func newSuccessResultView(res *SuccessResult) *inventoryviews.SuccessResultView {\n\tvres := &inventoryviews.SuccessResultView{\n\t\tOK: &res.OK,\n\t}\n\treturn vres\n}",
"func newSuccessResultView(res *SuccessResult) *productviews.SuccessResultView {\n\tvres := &productviews.SuccessResultView{\n\t\tOK: &res.OK,\n\t}\n\treturn vres\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_capabilities_Results creates a new list of Builder_capabilities_Results.
|
func NewBuilder_capabilities_Results_List(s *capnp.Segment, sz int32) (Builder_capabilities_Results_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)
return Builder_capabilities_Results_List{l}, err
}
|
[
"func newBallotResults() ballotResults {\n\treturn ballotResults{\n\t\taddrs: make(map[string]string, 40960),\n\t\treplies: make(map[string]ticketvote.CastVoteReply, 40960),\n\t}\n}",
"func NewPrinterCapabilities()(*PrinterCapabilities) {\n m := &PrinterCapabilities{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}",
"func NewApp_releaseId_Results_List(s *capnp.Segment, sz int32) (App_releaseId_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_releaseId_Results_List{l}, err\n}",
"func NewApp_instance_Results_List(s *capnp.Segment, sz int32) (App_instance_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_instance_Results_List{l}, err\n}",
"func UA_BrowseResult_new() []UA_BrowseResult {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[189]))[:]).([]UA_BrowseResult)\n}",
"func (b *defaultBuilder) WithResults(results []Result) Builder {\n\tfor _, res := range results {\n\t\tif res == nil {\n\t\t\tcontinue\n\t\t}\n\t\tb.results[res.ID()] = res\n\t}\n\n\treturn b\n}",
"func NewBackend_installPackage_Results_List(s *capnp.Segment, sz int32) (Backend_installPackage_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn capnp.StructList[Backend_installPackage_Results](l), err\n}",
"func NewApp_kill_Results_List(s *capnp.Segment, sz int32) (App_kill_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_kill_Results_List{l}, err\n}",
"func (a *Client) ListCapabilities(params *ListCapabilitiesParams, authInfo runtime.ClientAuthInfoWriter) (*ListCapabilitiesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListCapabilitiesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listCapabilities\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/capabilities\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListCapabilitiesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListCapabilitiesOK), nil\n\n}",
"func NewBackend_backupGrain_Results_List(s *capnp.Segment, sz int32) (Backend_backupGrain_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn capnp.StructList[Backend_backupGrain_Results](l), err\n}",
"func NewApp_configs_Results_List(s *capnp.Segment, sz int32) (App_configs_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_configs_Results_List{l}, err\n}",
"func NewApp_service_Results_List(s *capnp.Segment, sz int32) (App_service_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_service_Results_List{l}, err\n}",
"func NewApp_id_Results_List(s *capnp.Segment, sz int32) (App_id_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_id_Results_List{l}, err\n}",
"func NewResults(del []*asura.ResponseDeliverTx) AsuraResults {\n\tres := make(AsuraResults, len(del))\n\tfor i, d := range del {\n\t\tres[i] = NewResultFromResponse(d)\n\t}\n\treturn res\n}",
"func NewAssignable_get_Results_List(s *capnp.Segment, sz int32) (Assignable_get_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz)\n\treturn Assignable_get_Results_List{l}, err\n}",
"func newVersionsFromInspectrResults(inspectrResults []InspectrResult) (versions []string) {\n\tfor _, inspectrResult := range inspectrResults {\n\t\tfor _, upgradeVersion := range inspectrResult.Upgrades {\n\t\t\tversions = append(versions, upgradeVersion)\n\t\t}\n\t}\n\treturn\n}",
"func NewConfigurationManager_updateConfiguration_Results_List(s *capnp.Segment, sz int32) (ConfigurationManager_updateConfiguration_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn ConfigurationManager_updateConfiguration_Results_List{l}, err\n}",
"func NewBackend_ping_Results_List(s *capnp.Segment, sz int32) (Backend_ping_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn capnp.StructList[Backend_ping_Results](l), err\n}",
"func newOptionList(opts []Option) *optionList {\n\toptions := new(optionList)\n\tfor _, opt := range opts {\n\t\topt(options)\n\t}\n\treturn options\n}",
"func NewAssignable_asSetter_Results_List(s *capnp.Segment, sz int32) (Assignable_asSetter_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn Assignable_asSetter_Results_List{l}, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewWhat sets the what field to a newly allocated Builder_What struct, preferring placement in s's segment.
|
func (s Builder_build_Params) NewWhat() (Builder_What, error) {
ss, err := NewBuilder_What(s.Struct.Segment())
if err != nil {
return Builder_What{}, err
}
err = s.Struct.SetPtr(0, ss.Struct.ToPtr())
return ss, err
}
|
[
"func newQuestion(t Type) *Question {\n\tq := new(Question)\n\tq.typ = t\n\tq.Responses = makeResponses()\n\tq.Default = nil\n\tq.FirstAnswer = nil\n\tswitch q.typ {\n\tcase String:\n\t\tq.Whitespace = Trim\n\tcase Int:\n\t\tfallthrough\n\tcase Uint:\n\t\tfallthrough\n\tcase Float:\n\t\tfallthrough\n\tcase StringSlice:\n\t\tfallthrough\n\tcase IntSlice:\n\t\tfallthrough\n\tcase UintSlice:\n\t\tfallthrough\n\tcase FloatSlice:\n\t\tq.Whitespace = Trim | Collapse\n\t}\n\tq.Sep = \" \"\n\tq.set = nil\n\treturn q\n}",
"func newSurveyQuestionMutation(c config, op Op, opts ...surveyquestionOption) *SurveyQuestionMutation {\n\tm := &SurveyQuestionMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeSurveyQuestion,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func newSurveyMutation(c config, op Op, opts ...surveyOption) *SurveyMutation {\n\tm := &SurveyMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeSurvey,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func NewFromString(str string) *BuildHelp {\n\treturn &BuildHelp{Builder: *gtk.NewBuilderFromString(str, -1)}\n}",
"func CreateBuilder(\n\tnumberOfInputs int,\n\tlayerSizes []int,\n\tchar *neural.CellCharacter,\n\tgoalPosition *model.Vec,\n\tstartPosition model.Vec,\n\tboundryChecker CheckBoundry) (builder Builder) {\n\n\t//builder := new(Builder)\n\tbuilder.numberOfInputs = numberOfInputs\n\tbuilder.layerSizes = layerSizes\n\tbuilder.characteristics = char\n\tbuilder.goalPosition = goalPosition\n\tbuilder.startPosition = startPosition\n\tbuilder.boundryChecker = boundryChecker\n\treturn\n}",
"func (kcp *KCP) newSegment(size int) *Segment {\n\treturn &Segment{\n\t\tdata: xmitBuf.Get().([]byte)[:size],\n\t}\n}",
"func newSpec() *Spec {\n\treturn &Spec{\n\t\t[]Type{(*Void)(nil)},\n\t\tmap[Type]TypeID{(*Void)(nil): 0},\n\t\t0,\n\t\tmake(map[essentialName][]Type),\n\t\tnil,\n\t\tnil,\n\t}\n}",
"func newSurveyTemplateQuestionMutation(c config, op Op, opts ...surveytemplatequestionOption) *SurveyTemplateQuestionMutation {\n\tm := &SurveyTemplateQuestionMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeSurveyTemplateQuestion,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func newConstraint(constraintType ConstraintType, details string) *constraintImpl {\n\treturn &constraintImpl{\n\t\tconstraintType: constraintType,\n\t\tdetails: details,\n\t}\n}",
"func New(s Sender, ccontrollerAddr string) SpoofMap {\n\tccontrollerAddrI, err := util.IPStringToInt32(ccontrollerAddr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsm := &spoofMap{\n\t\trecvSpoofsChan: make(chan *dm.Probe, env.MaximumFlyingSpoofed),\n\t\tspoofs: make(map[uint32]*spoof),\n\t\ttransport: s,\n\t\tquit: make(chan struct{}),\n\t\tccontrollerAddr : ccontrollerAddrI,\n\t}\n\tgo sm.sendSpoofs()\n\tgo sm.cleanOld()\n\treturn sm\n}",
"func (b *Schema) make() (proto.Message, error) {\n\tpbt := proto.MessageType(b.MessageName)\n\tif pbt == nil {\n\t\treturn nil, fmt.Errorf(\"unknown type %q\", b.MessageName)\n\t}\n\treturn reflect.New(pbt.Elem()).Interface().(proto.Message), nil\n}",
"func StructMakeOp(nf int) Op {\n\tswitch nf {\n\tcase 0:\n\t\treturn OpStructMake0\n\tcase 1:\n\t\treturn OpStructMake1\n\tcase 2:\n\t\treturn OpStructMake2\n\tcase 3:\n\t\treturn OpStructMake3\n\tcase 4:\n\t\treturn OpStructMake4\n\t}\n\tpanic(\"too many fields in an SSAable struct\")\n}",
"func (in *What) DeepCopy() *What {\n\treturn &What{Kind: in.Kind}\n}",
"func NewQuestion(ope Node) *Question {\n\treturn &Question{\n\t\tTy: TypeQuestion,\n\t\tOpe: ope,\n\t}\n}",
"func newWhere(typ string, expr Expr) *Where {\n\tif expr == nil {\n\t\treturn nil\n\t}\n\treturn &Where{Type: typ, Expr: expr}\n}",
"func NewPeopleRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*PeopleRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewPeopleRequestBuilderInternal(urlParams, requestAdapter)\n}",
"func New() *BuildHelp {\n\treturn &BuildHelp{Builder: *gtk.NewBuilder()}\n}",
"func NewBuilder(sender *host.Host) Builder {\n\tcb := Builder{}\n\tcb.actions = append(cb.actions, func(packet *Packet) {\n\t\tpacket.Sender = sender\n\t\tpacket.RemoteAddress = sender.Address.String()\n\t})\n\treturn cb\n}",
"func newSegment(ctx *context.T, name string, sampling *http.Request) (seg *xray.Segment, sub bool) {\n\tsanitized := sanitizeName(name)\n\tseg = GetSegment(ctx)\n\thdr := GetTraceHeader(ctx)\n\tif seg == nil {\n\t\t_, seg = xray.BeginSegmentWithSampling(ctx, sanitized, sampling, hdr)\n\t\tctx.VI(1).Infof(\"new Top segment: %v\", segStr(seg))\n\t} else {\n\t\t_, seg = xray.BeginSubsegment(ctx, sanitized)\n\t\tctx.VI(1).Infof(\"new Sub segment: %v\", segStr(seg))\n\t\tsub = true\n\t}\n\treturn\n}",
"func New() *Builder {\n\tb := &Builder{}\n\n\tb.info.Write([]byte{\n\t\t0x0, 0x0, 0x0, 0x0, // length\n\t\t0x4, 0x0, // version\n\t\t0x0, 0x0, 0x0, 0x0, // debug_abbrev_offset\n\t\t0x8, // address_size\n\t})\n\n\tb.TagOpen(dwarf.TagCompileUnit, \"go\")\n\tb.Attr(dwarf.AttrLanguage, uint8(22))\n\n\treturn b\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewOptions sets the options field to a newly allocated Builder_Opts struct, preferring placement in s's segment.
|
func (s Builder_build_Params) NewOptions() (Builder_Opts, error) {
ss, err := NewBuilder_Opts(s.Struct.Segment())
if err != nil {
return Builder_Opts{}, err
}
err = s.Struct.SetPtr(1, ss.Struct.ToPtr())
return ss, err
}
|
[
"func NewOptions(maxSegmentSize uint16, windowScale uint8, timestamp,\n\techoTimestamp uint32) []uint8 {\n\toptions := []uint8{}\n\n\tif maxSegmentSize != NoSegmentSize {\n\t\toptions = append(options, 2, 4, uint8(maxSegmentSize>>8), uint8(maxSegmentSize))\n\t}\n\n\tif windowScale != NoWindowScale {\n\t\toptions = append(options, 3, 3, windowScale)\n\t}\n\n\tif timestamp != NoTimestamp {\n\t\toptions = append(options, 8, 10, uint8(timestamp>>24), uint8(timestamp>>16),\n\t\t\tuint8(timestamp>>8), uint8(timestamp))\n\t\toptions = append(options, uint8(echoTimestamp>>24), uint8(echoTimestamp>>16),\n\t\t\tuint8(echoTimestamp>>8), uint8(echoTimestamp))\n\t}\n\n\t// End-of-options 0 value.\n\toptions = append(options, 0)\n\n\t// Padding to a multiple of 32.\n\tfor len(options)%4 != 0 {\n\t\toptions = append(options, 0)\n\t}\n\n\treturn options\n}",
"func newOptions() *options {\n\treturn &options{\n\t\tfillColor: cell.ColorNumber(DefaultFillColorNumber),\n\t\tplaceHolderColor: cell.ColorNumber(DefaultPlaceHolderColorNumber),\n\t\thighlightedColor: cell.ColorNumber(DefaultHighlightedColorNumber),\n\t\tcursorColor: cell.ColorNumber(DefaultCursorColorNumber),\n\t\tlabelAlign: DefaultLabelAlign,\n\t}\n}",
"func (c *WriterConfiguration) NewOptions(\n\tcs client.Client,\n\tiOpts instrument.Options,\n\trwOptions xio.Options,\n) (writer.Options, error) {\n\topts := writer.NewOptions().\n\t\tSetTopicName(c.TopicName).\n\t\tSetPlacementOptions(c.PlacementOptions.NewOptions()).\n\t\tSetInstrumentOptions(iOpts).\n\t\tSetWithoutConsumerScope(c.WithoutConsumerScope)\n\n\tkvOpts, err := c.TopicServiceOverride.NewOverrideOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttopicServiceOpts := topic.NewServiceOptions().\n\t\tSetConfigService(cs).\n\t\tSetKVOverrideOptions(kvOpts)\n\tts, err := topic.NewService(topicServiceOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts = opts.SetTopicService(ts)\n\n\tif c.TopicWatchInitTimeout != nil {\n\t\topts = opts.SetTopicWatchInitTimeout(*c.TopicWatchInitTimeout)\n\t}\n\tsd, err := cs.Services(c.PlacementServiceOverride.NewOptions())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts = opts.SetServiceDiscovery(sd)\n\n\tif c.PlacementWatchInitTimeout != nil {\n\t\topts = opts.SetPlacementWatchInitTimeout(*c.PlacementWatchInitTimeout)\n\t}\n\topts, err = c.setRetryOptions(opts, iOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.MessageQueueNewWritesScanInterval != nil {\n\t\topts = opts.SetMessageQueueNewWritesScanInterval(*c.MessageQueueNewWritesScanInterval)\n\t}\n\tif c.MessageQueueFullScanInterval != nil {\n\t\topts = opts.SetMessageQueueFullScanInterval(*c.MessageQueueFullScanInterval)\n\t}\n\tif c.MessageQueueScanBatchSize != nil {\n\t\topts = opts.SetMessageQueueScanBatchSize(*c.MessageQueueScanBatchSize)\n\t}\n\tif c.InitialAckMapSize != nil {\n\t\topts = opts.SetInitialAckMapSize(*c.InitialAckMapSize)\n\t}\n\tif c.CloseCheckInterval != nil {\n\t\topts = opts.SetCloseCheckInterval(*c.CloseCheckInterval)\n\t}\n\tif c.AckErrorRetry != nil {\n\t\topts = opts.SetAckErrorRetryOptions(c.AckErrorRetry.NewOptions(tally.NoopScope))\n\t}\n\tif c.Encoder != nil {\n\t\topts = opts.SetEncoderOptions(c.Encoder.NewOptions(iOpts))\n\t}\n\tif c.Decoder != nil {\n\t\topts = opts.SetDecoderOptions(c.Decoder.NewOptions(iOpts))\n\t}\n\tif c.Connection != nil {\n\t\topts = opts.SetConnectionOptions(c.Connection.NewOptions(iOpts))\n\t}\n\n\topts = opts.SetIgnoreCutoffCutover(c.IgnoreCutoffCutover)\n\n\topts = opts.SetDecoderOptions(opts.DecoderOptions().SetRWOptions(rwOptions))\n\treturn opts, nil\n}",
"func (s TunnelServer_authenticate_Params) NewOptions() (RegistrationOptions, error) {\n\tss, err := NewRegistrationOptions(s.Struct.Segment())\n\tif err != nil {\n\t\treturn RegistrationOptions{}, err\n\t}\n\terr = s.Struct.SetPtr(2, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func NewOptions() *Common {\n\treturn &Common{}\n}",
"func NewOptions(spec string) *OptionSpec {\n\t// TODO(gaal): move to constant\n\tflagSpec := regexp.MustCompile(`^([-\\w,]+)(=?)\\s+(.*)$`)\n\t// Not folded into previous pattern because that would necessitate FindStringSubmatchIndex.\n\tdefaultValue := regexp.MustCompile(`\\[(.*)\\]$`)\n\n\ts := &OptionSpec{UnknownOptionsFatal: true, UnknownValuesFatal: false, Exit: os.Exit}\n\ts.aliases = make(map[string]string)\n\ts.defaults = make(map[string]string)\n\ts.requiresArg = make(map[string]bool)\n\tstanza := 0 // synopsis\n\tspecLines := strings.Split(spec, \"\\n\")\n\tfor n, l := range specLines {\n\t\tswitch stanza {\n\t\tcase 0:\n\t\t\t{\n\t\t\t\tif l == \"--\" {\n\t\t\t\t\ts.Usage += \"\\n\"\n\t\t\t\t\tstanza++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ts.Usage += l + \"\\n\"\n\t\t\t}\n\t\tcase 1:\n\t\t\t{\n\t\t\t\tif l == \"\" {\n\t\t\t\t\ts.Usage += \"\\n\"\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tparts := flagSpec.FindStringSubmatch(l)\n\t\t\t\tif parts == nil {\n\t\t\t\t\tpanic(fmt.Sprint(n, \": no parse: \", l))\n\t\t\t\t}\n\t\t\t\tnames := strings.Split(parts[1], \",\")\n\t\t\t\tcanonical := names[len(names)-1]\n\t\t\t\tfor _, name := range names {\n\t\t\t\t\tif _, dup := s.aliases[name]; dup {\n\t\t\t\t\t\tpanic(fmt.Sprint(n, \": duplicate name: \", name))\n\t\t\t\t\t}\n\t\t\t\t\tif name == \"\" || name == \"-\" || name == \"--\" {\n\t\t\t\t\t\tpanic(fmt.Sprint(n, \": bad name: \", name))\n\t\t\t\t\t}\n\n\t\t\t\t\ts.aliases[name] = canonical\n\t\t\t\t}\n\t\t\t\tif parts[2] == \"=\" {\n\t\t\t\t\ts.requiresArg[canonical] = true\n\t\t\t\t}\n\t\t\t\tif def := defaultValue.FindStringSubmatch(parts[3]); def != nil {\n\t\t\t\t\ts.defaults[canonical] = def[1]\n\t\t\t\t}\n\t\t\t\t// TODO(gaal): linewrap.\n\t\t\t\ts.Usage += \" \" + strings.Join(smap(prettyFlag, names), \", \") +\n\t\t\t\t\tparts[2] + \" \" + parts[3] + \"\\n\"\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprint(n, \": no parse: \", spec))\n\t\t}\n\t}\n\treturn s\n}",
"func (s RegistrationServer_registerConnection_Params) NewOptions() (ConnectionOptions, error) {\n\tss, err := NewConnectionOptions(s.Struct.Segment())\n\tif err != nil {\n\t\treturn ConnectionOptions{}, err\n\t}\n\terr = s.Struct.SetPtr(2, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func NewOptions() *Options {\n\treturn &Options{\n\t\tOptions: scheduleroptions.NewOptions(),\n\t}\n}",
"func New(rs *RuleSet) *Opts {\n\topts := Opts{ruleSet: rs,\n\t\tallOff: true,\n\t\toptions: make(map[string]bool)}\n\n\topts.refreshOptions()\n\treturn &opts\n}",
"func newNativeOptions(c *C.rocksdb_options_t) *Options {\n\treturn &Options{c: c}\n}",
"func NewOptions() *Options {\n\tvar url *url.URL\n\n\tif os.Getenv(\"PRERENDER_SERVICE_URL\") != \"\" {\n\t\turl, _ = url.Parse(os.Getenv(\"PRERENDER_SERVICE_URL\"))\n\t} else {\n\t\turl, _ = url.Parse(\"https://service.headless-render-api.com/\")\n\t}\n\n\treturn &Options{\n\t\tPrerenderURL: url,\n\t\tToken: os.Getenv(\"PRERENDER_TOKEN\"),\n\t\tUsingAppEngine: false,\n\t\tBotsOnly: false,\n\t}\n}",
"func newConnOptions() *connOptions {\n\treturn &connOptions{\n\t\tBufferSize: defaultBufferSize,\n\t}\n}",
"func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {\n\topts := &Options{}\n\tvar (\n\t\tshowVersion bool\n\t\tshowHelp bool\n\t\tshowTLSHelp bool\n\t\tsignal string\n\t\tconfigFile string\n\t\tdbgAndTrace bool\n\t\ttrcAndVerboseTrc bool\n\t\tdbgAndTrcAndVerboseTrc bool\n\t\terr error\n\t)\n\n\tfs.BoolVar(&showHelp, \"h\", false, \"Show this message.\")\n\tfs.BoolVar(&showHelp, \"help\", false, \"Show this message.\")\n\tfs.IntVar(&opts.Port, \"port\", 0, \"Port to listen on.\")\n\tfs.IntVar(&opts.Port, \"p\", 0, \"Port to listen on.\")\n\tfs.StringVar(&opts.ServerName, \"n\", \"\", \"Server name.\")\n\tfs.StringVar(&opts.ServerName, \"name\", \"\", \"Server name.\")\n\tfs.StringVar(&opts.ServerName, \"server_name\", \"\", \"Server name.\")\n\tfs.StringVar(&opts.Host, \"addr\", \"\", \"Network host to listen on.\")\n\tfs.StringVar(&opts.Host, \"a\", \"\", \"Network host to listen on.\")\n\tfs.StringVar(&opts.Host, \"net\", \"\", \"Network host to listen on.\")\n\tfs.StringVar(&opts.ClientAdvertise, \"client_advertise\", \"\", \"Client URL to advertise to other servers.\")\n\tfs.BoolVar(&opts.Debug, \"D\", false, \"Enable Debug logging.\")\n\tfs.BoolVar(&opts.Debug, \"debug\", false, \"Enable Debug logging.\")\n\tfs.BoolVar(&opts.Trace, \"V\", false, \"Enable Trace logging.\")\n\tfs.BoolVar(&trcAndVerboseTrc, \"VV\", false, \"Enable Verbose Trace logging. (Traces system account as well)\")\n\tfs.BoolVar(&opts.Trace, \"trace\", false, \"Enable Trace logging.\")\n\tfs.BoolVar(&dbgAndTrace, \"DV\", false, \"Enable Debug and Trace logging.\")\n\tfs.BoolVar(&dbgAndTrcAndVerboseTrc, \"DVV\", false, \"Enable Debug and Verbose Trace logging. (Traces system account as well)\")\n\tfs.BoolVar(&opts.Logtime, \"T\", true, \"Timestamp log entries.\")\n\tfs.BoolVar(&opts.Logtime, \"logtime\", true, \"Timestamp log entries.\")\n\tfs.BoolVar(&opts.LogtimeUTC, \"logtime_utc\", false, \"Timestamps in UTC instead of local timezone.\")\n\tfs.StringVar(&opts.Username, \"user\", _EMPTY_, \"Username required for connection.\")\n\tfs.StringVar(&opts.Password, \"pass\", _EMPTY_, \"Password required for connection.\")\n\tfs.StringVar(&opts.Authorization, \"auth\", _EMPTY_, \"Authorization token required for connection.\")\n\tfs.IntVar(&opts.HTTPPort, \"m\", 0, \"HTTP Port for /varz, /connz endpoints.\")\n\tfs.IntVar(&opts.HTTPPort, \"http_port\", 0, \"HTTP Port for /varz, /connz endpoints.\")\n\tfs.IntVar(&opts.HTTPSPort, \"ms\", 0, \"HTTPS Port for /varz, /connz endpoints.\")\n\tfs.IntVar(&opts.HTTPSPort, \"https_port\", 0, \"HTTPS Port for /varz, /connz endpoints.\")\n\tfs.StringVar(&configFile, \"c\", \"\", \"Configuration file.\")\n\tfs.StringVar(&configFile, \"config\", \"\", \"Configuration file.\")\n\tfs.BoolVar(&opts.CheckConfig, \"t\", false, \"Check configuration and exit.\")\n\tfs.StringVar(&signal, \"sl\", \"\", \"Send signal to nats-server process (ldm, stop, quit, term, reopen, reload).\")\n\tfs.StringVar(&signal, \"signal\", \"\", \"Send signal to nats-server process (ldm, stop, quit, term, reopen, reload).\")\n\tfs.StringVar(&opts.PidFile, \"P\", \"\", \"File to store process pid.\")\n\tfs.StringVar(&opts.PidFile, \"pid\", \"\", \"File to store process pid.\")\n\tfs.StringVar(&opts.PortsFileDir, \"ports_file_dir\", \"\", \"Creates a ports file in the specified directory (<executable_name>_<pid>.ports).\")\n\tfs.StringVar(&opts.LogFile, \"l\", \"\", \"File to store logging output.\")\n\tfs.StringVar(&opts.LogFile, \"log\", \"\", \"File to store logging output.\")\n\tfs.Int64Var(&opts.LogSizeLimit, \"log_size_limit\", 0, \"Logfile size limit being auto-rotated\")\n\tfs.BoolVar(&opts.Syslog, \"s\", false, \"Enable syslog as log method.\")\n\tfs.BoolVar(&opts.Syslog, \"syslog\", false, \"Enable syslog as log method.\")\n\tfs.StringVar(&opts.RemoteSyslog, \"r\", \"\", \"Syslog server addr (udp://127.0.0.1:514).\")\n\tfs.StringVar(&opts.RemoteSyslog, \"remote_syslog\", \"\", \"Syslog server addr (udp://127.0.0.1:514).\")\n\tfs.BoolVar(&showVersion, \"version\", false, \"Print version information.\")\n\tfs.BoolVar(&showVersion, \"v\", false, \"Print version information.\")\n\tfs.IntVar(&opts.ProfPort, \"profile\", 0, \"Profiling HTTP port.\")\n\tfs.StringVar(&opts.RoutesStr, \"routes\", \"\", \"Routes to actively solicit a connection.\")\n\tfs.StringVar(&opts.Cluster.ListenStr, \"cluster\", \"\", \"Cluster url from which members can solicit routes.\")\n\tfs.StringVar(&opts.Cluster.ListenStr, \"cluster_listen\", \"\", \"Cluster url from which members can solicit routes.\")\n\tfs.StringVar(&opts.Cluster.Advertise, \"cluster_advertise\", \"\", \"Cluster URL to advertise to other servers.\")\n\tfs.BoolVar(&opts.Cluster.NoAdvertise, \"no_advertise\", false, \"Advertise known cluster IPs to clients.\")\n\tfs.IntVar(&opts.Cluster.ConnectRetries, \"connect_retries\", 0, \"For implicit routes, number of connect retries.\")\n\tfs.StringVar(&opts.Cluster.Name, \"cluster_name\", \"\", \"Cluster Name, if not set one will be dynamically generated.\")\n\tfs.BoolVar(&showTLSHelp, \"help_tls\", false, \"TLS help.\")\n\tfs.BoolVar(&opts.TLS, \"tls\", false, \"Enable TLS.\")\n\tfs.BoolVar(&opts.TLSVerify, \"tlsverify\", false, \"Enable TLS with client verification.\")\n\tfs.StringVar(&opts.TLSCert, \"tlscert\", \"\", \"Server certificate file.\")\n\tfs.StringVar(&opts.TLSKey, \"tlskey\", \"\", \"Private key for server certificate.\")\n\tfs.StringVar(&opts.TLSCaCert, \"tlscacert\", \"\", \"Client certificate CA for verification.\")\n\tfs.IntVar(&opts.MaxTracedMsgLen, \"max_traced_msg_len\", 0, \"Maximum printable length for traced messages. 0 for unlimited.\")\n\tfs.BoolVar(&opts.JetStream, \"js\", false, \"Enable JetStream.\")\n\tfs.BoolVar(&opts.JetStream, \"jetstream\", false, \"Enable JetStream.\")\n\tfs.StringVar(&opts.StoreDir, \"sd\", \"\", \"Storage directory.\")\n\tfs.StringVar(&opts.StoreDir, \"store_dir\", \"\", \"Storage directory.\")\n\n\t// The flags definition above set \"default\" values to some of the options.\n\t// Calling Parse() here will override the default options with any value\n\t// specified from the command line. This is ok. We will then update the\n\t// options with the content of the configuration file (if present), and then,\n\t// call Parse() again to override the default+config with command line values.\n\t// Calling Parse() before processing config file is necessary since configFile\n\t// itself is a command line argument, and also Parse() is required in order\n\t// to know if user wants simply to show \"help\" or \"version\", etc...\n\tif err := fs.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif showVersion {\n\t\tprintVersion()\n\t\treturn nil, nil\n\t}\n\n\tif showHelp {\n\t\tprintHelp()\n\t\treturn nil, nil\n\t}\n\n\tif showTLSHelp {\n\t\tprintTLSHelp()\n\t\treturn nil, nil\n\t}\n\n\t// Process args looking for non-flag options,\n\t// 'version' and 'help' only for now\n\tshowVersion, showHelp, err = ProcessCommandLineArgs(fs)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if showVersion {\n\t\tprintVersion()\n\t\treturn nil, nil\n\t} else if showHelp {\n\t\tprintHelp()\n\t\treturn nil, nil\n\t}\n\n\t// Snapshot flag options.\n\tFlagSnapshot = opts.Clone()\n\n\t// Keep track of the boolean flags that were explicitly set with their value.\n\tfs.Visit(func(f *flag.Flag) {\n\t\tswitch f.Name {\n\t\tcase \"DVV\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Debug\", dbgAndTrcAndVerboseTrc)\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Trace\", dbgAndTrcAndVerboseTrc)\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"TraceVerbose\", dbgAndTrcAndVerboseTrc)\n\t\tcase \"DV\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Debug\", dbgAndTrace)\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Trace\", dbgAndTrace)\n\t\tcase \"D\":\n\t\t\tfallthrough\n\t\tcase \"debug\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Debug\", FlagSnapshot.Debug)\n\t\tcase \"VV\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Trace\", trcAndVerboseTrc)\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"TraceVerbose\", trcAndVerboseTrc)\n\t\tcase \"V\":\n\t\t\tfallthrough\n\t\tcase \"trace\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Trace\", FlagSnapshot.Trace)\n\t\tcase \"T\":\n\t\t\tfallthrough\n\t\tcase \"logtime\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Logtime\", FlagSnapshot.Logtime)\n\t\tcase \"s\":\n\t\t\tfallthrough\n\t\tcase \"syslog\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Syslog\", FlagSnapshot.Syslog)\n\t\tcase \"no_advertise\":\n\t\t\ttrackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, \"Cluster.NoAdvertise\", FlagSnapshot.Cluster.NoAdvertise)\n\t\t}\n\t})\n\n\t// Process signal control.\n\tif signal != _EMPTY_ {\n\t\tif err := processSignal(signal); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Parse config if given\n\tif configFile != _EMPTY_ {\n\t\t// This will update the options with values from the config file.\n\t\terr := opts.ProcessConfigFile(configFile)\n\t\tif err != nil {\n\t\t\tif opts.CheckConfig {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif cerr, ok := err.(*processConfigErr); !ok || len(cerr.Errors()) != 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// If we get here we only have warnings and can still continue\n\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t} else if opts.CheckConfig {\n\t\t\t// Report configuration file syntax test was successful and exit.\n\t\t\treturn opts, nil\n\t\t}\n\n\t\t// Call this again to override config file options with options from command line.\n\t\t// Note: We don't need to check error here since if there was an error, it would\n\t\t// have been caught the first time this function was called (after setting up the\n\t\t// flags).\n\t\tfs.Parse(args)\n\t} else if opts.CheckConfig {\n\t\treturn nil, fmt.Errorf(\"must specify [-c, --config] option to check configuration file syntax\")\n\t}\n\n\t// Special handling of some flags\n\tvar (\n\t\tflagErr error\n\t\ttlsDisabled bool\n\t\ttlsOverride bool\n\t)\n\tfs.Visit(func(f *flag.Flag) {\n\t\t// short-circuit if an error was encountered\n\t\tif flagErr != nil {\n\t\t\treturn\n\t\t}\n\t\tif strings.HasPrefix(f.Name, \"tls\") {\n\t\t\tif f.Name == \"tls\" {\n\t\t\t\tif !opts.TLS {\n\t\t\t\t\t// User has specified \"-tls=false\", we need to disable TLS\n\t\t\t\t\topts.TLSConfig = nil\n\t\t\t\t\ttlsDisabled = true\n\t\t\t\t\ttlsOverride = false\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttlsOverride = true\n\t\t\t} else if !tlsDisabled {\n\t\t\t\ttlsOverride = true\n\t\t\t}\n\t\t} else {\n\t\t\tswitch f.Name {\n\t\t\tcase \"VV\":\n\t\t\t\topts.Trace, opts.TraceVerbose = trcAndVerboseTrc, trcAndVerboseTrc\n\t\t\tcase \"DVV\":\n\t\t\t\topts.Trace, opts.Debug, opts.TraceVerbose = dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc\n\t\t\tcase \"DV\":\n\t\t\t\t// Check value to support -DV=false\n\t\t\t\topts.Trace, opts.Debug = dbgAndTrace, dbgAndTrace\n\t\t\tcase \"cluster\", \"cluster_listen\":\n\t\t\t\t// Override cluster config if explicitly set via flags.\n\t\t\t\tflagErr = overrideCluster(opts)\n\t\t\tcase \"routes\":\n\t\t\t\t// Keep in mind that the flag has updated opts.RoutesStr at this point.\n\t\t\t\tif opts.RoutesStr == \"\" {\n\t\t\t\t\t// Set routes array to nil since routes string is empty\n\t\t\t\t\topts.Routes = nil\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trouteUrls := RoutesFromStr(opts.RoutesStr)\n\t\t\t\topts.Routes = routeUrls\n\t\t\t}\n\t\t}\n\t})\n\tif flagErr != nil {\n\t\treturn nil, flagErr\n\t}\n\n\t// This will be true if some of the `-tls` params have been set and\n\t// `-tls=false` has not been set.\n\tif tlsOverride {\n\t\tif err := overrideTLS(opts); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// If we don't have cluster defined in the configuration\n\t// file and no cluster listen string override, but we do\n\t// have a routes override, we need to report misconfiguration.\n\tif opts.RoutesStr != \"\" && opts.Cluster.ListenStr == \"\" && opts.Cluster.Host == \"\" && opts.Cluster.Port == 0 {\n\t\treturn nil, errors.New(\"solicited routes require cluster capabilities, e.g. --cluster\")\n\t}\n\n\treturn opts, nil\n}",
"func New(opts ...Option) (*SegmentDisplay, error) {\n\topt := newOptions()\n\tfor _, o := range opts {\n\t\to.set(opt)\n\t}\n\tif err := opt.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SegmentDisplay{\n\t\twOptsTracker: attrrange.NewTracker(),\n\t\topts: opt,\n\t}, nil\n}",
"func newInitOptions() *initOptions {\n\t// initialize the public kubeadm config API by applying defaults\n\texternalInitCfg := &kubeadmapiv1.InitConfiguration{}\n\tkubeadmscheme.Scheme.Default(externalInitCfg)\n\n\texternalClusterCfg := &kubeadmapiv1.ClusterConfiguration{\n\t\t// CertificatesDir: filepath.Join(homedir.HomeDir(), \".fireflyadm\", \"pki\"),\n\t}\n\tkubeadmscheme.Scheme.Default(externalClusterCfg)\n\n\t// Create the options object for the bootstrap token-related flags, and override the default value for .Description\n\tbto := options.NewBootstrapTokenOptions()\n\tbto.Description = \"The default bootstrap token generated by 'kubeadm init'.\"\n\n\tkubeconfig := os.Getenv(\"KUBECONFIG\")\n\tif kubeconfig == \"\" {\n\t\tkubeconfig = filepath.Join(homedir.HomeDir(), \".kube\", \"config\")\n\t}\n\n\treturn &initOptions{\n\t\texternalInitCfg: externalInitCfg,\n\t\texternalClusterCfg: externalClusterCfg,\n\t\tbto: bto,\n\t\tkubeconfigDir: constants.KubernetesDir,\n\t\tkubeconfigPath: kubeconfig,\n\t\tuploadCerts: false,\n\t}\n}",
"func New(opt *Options) *Config {\n\tif opt == nil {\n\t\topt = &Options{\n\t\t\tComment: DEFAULT_COMMENT,\n\t\t\tSeparator: DEFAULT_SEPARATOR,\n\t\t\tPreSpace: true,\n\t\t\tPostSpace: true,\n\t\t}\n\t}\n\tif opt.Comment == \"\" {\n\t\topt.Comment = DEFAULT_COMMENT\n\t}\n\tif opt.Separator == \"\" {\n\t\topt.Separator = DEFAULT_SEPARATOR\n\t}\n\n\tif opt.Comment != DEFAULT_COMMENT && opt.Comment != ALTERNATIVE_COMMENT {\n\t\tpanic(\"invalid comment:\" + opt.Comment)\n\t}\n\tif opt.Separator != DEFAULT_SEPARATOR && opt.Separator != ALTERNATIVE_SEPARATOR {\n\t\tpanic(\"invalid separator:\" + opt.Separator)\n\t}\n\n\tcomment := opt.Comment\n\tseparator := opt.Separator\n\n\t// Get spaces around separator\n\tif opt.PreSpace {\n\t\tseparator = \" \" + separator\n\t}\n\tif opt.PostSpace {\n\t\tseparator += \" \"\n\t}\n\n\tc := new(Config)\n\n\tc.comment = comment\n\tc.separator = separator\n\tc.idSectionMap = make(map[string]int)\n\tc.lastIdOptionMap = make(map[string]int)\n\tc.dataMap = make(map[string]map[string]*tValue)\n\n\tc.AddSection(DEFAULT_SECTION) // Default section always exists.\n\n\treturn c\n}",
"func NewOptions() IOptions {\n\treturn &Options{\n\t\tcache: 0,\n\t\tgzip: false,\n\t}\n}",
"func (s SelectStruct) Options(options ...SelectOption) SelectStruct {\n\ts.OptionsArray = options\n\treturn s\n}",
"func (s TunnelServer_registerTunnel_Params) NewOptions() (RegistrationOptions, error) {\n\tss, err := NewRegistrationOptions(s.Struct.Segment())\n\tif err != nil {\n\t\treturn RegistrationOptions{}, err\n\t}\n\terr = s.Struct.SetPtr(2, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func NewOpts(opts shared.Opts) Opts {\n\tc := Opts{\n\t\tOpts: opts,\n\t}\n\n\tif err := env.Parse(&c); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"Unable to unmarshal configuration\")\n\t}\n\n\t// Defaults\n\tif c.MQTTOpts.DiscoveryName == \"\" {\n\t\tc.MQTTOpts.DiscoveryName = \"unifi\"\n\t}\n\n\tif c.MQTTOpts.TopicPrefix == \"\" {\n\t\tc.MQTTOpts.TopicPrefix = \"home/unifi\"\n\t}\n\n\treturn c\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_build_Params creates a new list of Builder_build_Params.
|
func NewBuilder_build_Params_List(s *capnp.Segment, sz int32) (Builder_build_Params_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz)
return Builder_build_Params_List{l}, err
}
|
[
"func (a *busArrival) BuildParams(t *train.Train, lang language.Tag) (*TemplateParams, *TemplateParams, bool, error) {\n\n\tparams := TemplateParams{\n\t\t\"Category\": l10n.FromMetaDictionary(train.GetCategory(t), l10n.Genitive, lang),\n\t\t\"Carrier\": l10n.FromMetaDictionary(t.Carrier, l10n.Locative, lang),\n\t\t\"SubrouteStart\": t.Route.SubrouteStart.Name,\n\t\t\"Name\": t.Name,\n\t\t\"From\": t.Route.StartStation.Name,\n\t\t\"To\": t.Route.EndStation.Name,\n\t\t\"Delayed\": train.GetDelay(t) > 0,\n\t}\n\tif log.GetLevel() >= log.DebugLevel {\n\t\tlog.WithFields(log.Fields{\"logger\": \"annongen.producer.busArrival\", \"method\": \"BuildParams\", \"params\": fmt.Sprintf(\"%+v\", params)}).\n\t\t\tDebug(\"Parameters for speech generator\")\n\t}\n\treturn ¶ms, ¶ms, false, nil\n}",
"func CreateBuilder(\n\tnumberOfInputs int,\n\tlayerSizes []int,\n\tchar *neural.CellCharacter,\n\tgoalPosition *model.Vec,\n\tstartPosition model.Vec,\n\tboundryChecker CheckBoundry) (builder Builder) {\n\n\t//builder := new(Builder)\n\tbuilder.numberOfInputs = numberOfInputs\n\tbuilder.layerSizes = layerSizes\n\tbuilder.characteristics = char\n\tbuilder.goalPosition = goalPosition\n\tbuilder.startPosition = startPosition\n\tbuilder.boundryChecker = boundryChecker\n\treturn\n}",
"func Build(parameters map[string]interface{}) (api.Parser, error) {\n\tvar options []Option\n\n\tfor key, value := range parameters {\n\t\tswitch key {\n\t\tcase \"rule\":\n\t\t\toptions = append(options, WithRule(value.(string)))\n\t\tcase \"ignore_rule\":\n\t\t\toptions = append(options, WithIgnoreRule(value.(string)))\n\t\tcase \"fields\":\n\t\t\tvar fields []string\n\t\t\tfor _, f := range value.([]interface{}) {\n\t\t\t\tfields = append(fields, f.(string))\n\t\t\t}\n\t\t\toptions = append(options, WithFields(fields...))\n\t\tdefault:\n\t\t}\n\t}\n\n\treturn New(options...)\n}",
"func (b *ServiceParameterBuilder) Build() (object *ServiceParameter, err error) {\n\tobject = new(ServiceParameter)\n\tobject.bitmap_ = b.bitmap_\n\tobject.id = b.id\n\tobject.value = b.value\n\treturn\n}",
"func (s Value_Call) NewParams(n int32) (Value_List, error) {\n\tl, err := NewValue_List(capnp.Struct(s).Segment(), n)\n\tif err != nil {\n\t\treturn Value_List{}, err\n\t}\n\terr = capnp.Struct(s).SetPtr(1, l.ToPtr())\n\treturn l, err\n}",
"func NewBuilder() ListBuilder {\n\treturn &listBuilder{\n\t\telements: make(map[string]bool),\n\t}\n}",
"func BuildParameters(params interface{}) *runtime.RawExtension {\n\tparamsJSON, err := json.Marshal(params)\n\tif err != nil {\n\t\t// This should never be hit because marshalling a map[string]string is pretty safe\n\t\t// I'd rather throw a panic then force handling of an error that I don't think is possible.\n\t\tpanic(fmt.Errorf(\"unable to marshal the request parameters %v (%s)\", params, err))\n\t}\n\n\treturn &runtime.RawExtension{Raw: paramsJSON}\n}",
"func NewApp_configs_Params_List(s *capnp.Segment, sz int32) (App_configs_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_configs_Params_List{l}, err\n}",
"func ParseBuildParams(commands []string) map[string]string {\n\tparams := map[string]string{\n\t\t\"instance\": \"\",\n\t\t\"branch\": \"master\",\n\t\t\"country\": \"\",\n\t\t\"option\": \"primary\",\n\t}\n\n\tif len(commands) > 0 {\n\t\tparams[\"instance\"] = commands[0]\n\t}\n\tif len(commands) > 1 {\n\t\tparams[\"branch\"] = commands[1]\n\t}\n\tif len(commands) > 2 {\n\t\tparams[\"country\"] = commands[2]\n\t}\n\tif len(commands) > 3 {\n\t\tparams[\"option\"] = commands[3]\n\t}\n\n\treturn params\n}",
"func NewApp_releaseId_Params_List(s *capnp.Segment, sz int32) (App_releaseId_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_releaseId_Params_List{l}, err\n}",
"func NewBuilder() Builder {\n\treturn &builder{\n\t\tproduct: make(map[string]string),\n\t\tallFieldsPresent: true,\n\t}\n}",
"func (t *Template) BuildParams(url string) []string {\n\tparams := []string{\n\t\tfmt.Sprintf(\"%s/main\", url),\n\t\t// \"--disable-smart-shrinking\",\n\t}\n\n\tif t.Footer != nil {\n\t\tparams = append(params, \"--footer-html\", fmt.Sprintf(\"%s/footer\", url))\n\t}\n\tif t.Header != nil {\n\t\tparams = append(params, \"--header-html\", fmt.Sprintf(\"%s/header\", url))\n\t}\n\tparams = append(params, \"-\")\n\treturn params\n}",
"func NewApp_service_Params_List(s *capnp.Segment, sz int32) (App_service_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_service_Params_List{l}, err\n}",
"func (b *BillingModelItemListBuilder) Build() (list *BillingModelItemList, err error) {\n\titems := make([]*BillingModelItem, len(b.items))\n\tfor i, item := range b.items {\n\t\titems[i], err = item.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tlist = new(BillingModelItemList)\n\tlist.items = items\n\treturn\n}",
"func NewConfigurationManager_updateConfiguration_Params_List(s *capnp.Segment, sz int32) (ConfigurationManager_updateConfiguration_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn ConfigurationManager_updateConfiguration_Params_List{l}, err\n}",
"func (b *Builder) Build() ClientInterfaces {\n\treturn b\n}",
"func BuildListOptions(page int) *buildkite.BuildsListOptions {\n\treturn &buildkite.BuildsListOptions{\n\t\tListOptions: buildkite.ListOptions{\n\t\t\tPage: page,\n\t\t\tPerPage: 100,\n\t\t},\n\t}\n}",
"func NewBatchGetItemsBuilder(t mockConstructorTestingTNewBatchGetItemsBuilder) *BatchGetItemsBuilder {\n\tmock := &BatchGetItemsBuilder{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewApp_id_Params_List(s *capnp.Segment, sz int32) (App_id_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_id_Params_List{l}, err\n}",
"func (t *School) CreateParams() []interface{} {\n\treturn []interface{}{\n\t\t&t.Name,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewBuilder_build_Results creates a new list of Builder_build_Results.
|
func NewBuilder_build_Results_List(s *capnp.Segment, sz int32) (Builder_build_Results_List, error) {
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)
return Builder_build_Results_List{l}, err
}
|
[
"func (b *defaultBuilder) WithResults(results []Result) Builder {\n\tfor _, res := range results {\n\t\tif res == nil {\n\t\t\tcontinue\n\t\t}\n\t\tb.results[res.ID()] = res\n\t}\n\n\treturn b\n}",
"func (r *ReturnResults) Build() ast.Stmt {\n\tresultSelectors := []ast.Expr{}\n\tfor _, result := range r.method.MethodResults {\n\t\tresultSelectors = append(resultSelectors, ast.NewIdent(result.Names[0].String()))\n\t}\n\n\treturn &ast.ReturnStmt{\n\t\tResults: resultSelectors,\n\t}\n}",
"func NewApp_releaseId_Results_List(s *capnp.Segment, sz int32) (App_releaseId_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_releaseId_Results_List{l}, err\n}",
"func newBallotResults() ballotResults {\n\treturn ballotResults{\n\t\taddrs: make(map[string]string, 40960),\n\t\treplies: make(map[string]ticketvote.CastVoteReply, 40960),\n\t}\n}",
"func NewApp_configs_Results_List(s *capnp.Segment, sz int32) (App_configs_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_configs_Results_List{l}, err\n}",
"func NewApp_instance_Results_List(s *capnp.Segment, sz int32) (App_instance_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_instance_Results_List{l}, err\n}",
"func NewResults(del []*asura.ResponseDeliverTx) AsuraResults {\n\tres := make(AsuraResults, len(del))\n\tfor i, d := range del {\n\t\tres[i] = NewResultFromResponse(d)\n\t}\n\treturn res\n}",
"func NewBatchGetItemsBuilder(t mockConstructorTestingTNewBatchGetItemsBuilder) *BatchGetItemsBuilder {\n\tmock := &BatchGetItemsBuilder{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (b *BrowserkCrawler) buildResult(result *browserk.NavigationResult, start time.Time, browser browserk.Browser) {\n\tmessages, err := browser.GetMessages()\n\tresult.AddError(err)\n\tresult.Messages = browserk.MessagesAfterRequestTime(messages, start)\n\tresult.MessageCount = len(result.Messages)\n\tdom, err := browser.GetDOM()\n\tresult.AddError(err)\n\tresult.DOM = dom\n\tendURL, err := browser.GetURL()\n\tresult.AddError(err)\n\tresult.EndURL = endURL\n\tcookies, err := browser.GetCookies()\n\tresult.AddError(err)\n\tresult.Cookies = browserk.DiffCookies(result.Cookies, cookies)\n\tresult.StorageEvents = browser.GetStorageEvents()\n\tresult.ConsoleEvents = browser.GetConsoleEvents()\n\tresult.Hash()\n}",
"func NewApp_service_Results_List(s *capnp.Segment, sz int32) (App_service_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn App_service_Results_List{l}, err\n}",
"func (f *PaginationInfoFactory) Build(activePage int, totalHits int, pageSize int, lastPage int, urlBase *url.URL) PaginationInfo {\n\treturn BuildWith(CurrentResultInfos{\n\t\tActivePage: activePage,\n\t\tTotalHits: totalHits,\n\t\tPageSize: pageSize,\n\t\tLastPage: lastPage,\n\t}, *f.DefaultConfig, urlBase)\n}",
"func BuildListOptions(page int) *buildkite.BuildsListOptions {\n\treturn &buildkite.BuildsListOptions{\n\t\tListOptions: buildkite.ListOptions{\n\t\t\tPage: page,\n\t\t\tPerPage: 100,\n\t\t},\n\t}\n}",
"func (b *defaultBuilder) Build() []druidv1alpha1.EtcdMemberStatus {\n\tvar (\n\t\tnow = b.nowFunc()\n\n\t\tmembers []druidv1alpha1.EtcdMemberStatus\n\t)\n\n\tfor id, res := range b.results {\n\t\tmember, ok := b.old[id]\n\t\tif !ok {\n\t\t\t// Continue if we can't find an existing member because druid is not supposed to add one.\n\t\t\tcontinue\n\t\t}\n\n\t\tmember.Status = res.Status()\n\t\tmember.LastTransitionTime = now\n\t\tmember.LastUpdateTime = now\n\t\tmember.Reason = res.Reason()\n\n\t\tmembers = append(members, member)\n\t\tdelete(b.old, id)\n\t}\n\n\tfor _, member := range b.old {\n\t\t// Add existing members as they were. This needs to be changed when SSA is used.\n\t\tmembers = append(members, member)\n\t}\n\n\treturn members\n}",
"func NewApp_id_Results_List(s *capnp.Segment, sz int32) (App_id_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_id_Results_List{l}, err\n}",
"func (b *BillingModelItemListBuilder) Build() (list *BillingModelItemList, err error) {\n\titems := make([]*BillingModelItem, len(b.items))\n\tfor i, item := range b.items {\n\t\titems[i], err = item.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tlist = new(BillingModelItemList)\n\tlist.items = items\n\treturn\n}",
"func NewBuilder() ListBuilder {\n\treturn &listBuilder{\n\t\telements: make(map[string]bool),\n\t}\n}",
"func (f *FixturesBuilder) BuildListers() *client.Listers {\n\tlisters := &client.Listers{\n\t\tStorageListers: client.StorageListers{\n\t\t\tInfrastructures: configv1listers.NewInfrastructureLister(f.infraIndexer),\n\t\t\tOpenShiftConfig: corev1listers.NewConfigMapLister(f.configMapsIndexer).ConfigMaps(\"openshift-config\"),\n\t\t\tOpenShiftConfigManaged: corev1listers.NewConfigMapLister(f.configMapsIndexer).ConfigMaps(\"openshift-config-managed\"),\n\t\t\tSecrets: corev1listers.NewSecretLister(f.secretsIndexer).Secrets(\"openshift-image-registry\"),\n\t\t},\n\t\tDeployments: appsv1listers.NewDeploymentLister(f.deploymentIndexer).Deployments(\"openshift-image-registry\"),\n\t\tServices: corev1listers.NewServiceLister(f.servicesIndexer).Services(\"openshift-image-registry\"),\n\t\tConfigMaps: corev1listers.NewConfigMapLister(f.configMapsIndexer).ConfigMaps(\"openshift-image-registry\"),\n\t\tServiceAccounts: corev1listers.NewServiceAccountLister(f.serviceAcctIndexer).ServiceAccounts(\"openshift-image-registry\"),\n\t\tRoutes: routev1listers.NewRouteLister(f.routesIndexer).Routes(\"openshift-image-registry\"),\n\t\tClusterRoles: rbacv1listers.NewClusterRoleLister(f.clusterRolesIndexer),\n\t\tClusterRoleBindings: rbacv1listers.NewClusterRoleBindingLister(f.clusterRoleBindingsIndexer),\n\t\tRegistryConfigs: regopv1listers.NewConfigLister(f.registryConfigsIndexer),\n\t\tProxyConfigs: configv1listers.NewProxyLister(f.proxyConfigsIndexer),\n\t}\n\treturn listers\n}",
"func (rb *EmailResultBuilder) Build() EmailResult {\n\treturn *rb.v\n}",
"func NewApp_kill_Results_List(s *capnp.Segment, sz int32) (App_kill_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\treturn App_kill_Results_List{l}, err\n}",
"func NewApp_startedOn_Results_List(s *capnp.Segment, sz int32) (App_startedOn_Results_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn App_startedOn_Results_List{l}, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Avg return the avg frequency between message
|
func (a *AvgFreq) Avg() float64 {
totalMs := float64(a.Total.Nanoseconds() / int64(time.Millisecond))
return totalMs / float64(a.Samples)
}
|
[
"func Average(msgs []*Score) float64 {\n\tvar total float64\n\tfor _, m := range msgs {\n\t\ttotal += float64(m.complexity)\n\t}\n\treturn total / float64(len(msgs))\n}",
"func (m measurementsType) avg() time.Duration {\n\tif m.count() == 0 {\n\t\treturn 0.0\n\t}\n\n\treturn m.sum() / time.Duration(m.count())\n}",
"func (p *player /* receiver */) average() float64 /* return type */ {\n\t// what about 0 at bats? that's not a player.\n\treturn float64(p.hits) / float64(p.atBats)\n}",
"func (p *player) average() float64 {\n\tif p.atBats == 0 {\n\t\treturn 0.0\n\t}\n\n\treturn float64(p.hits) / float64(p.atBats)\n}",
"func getAverage(records chan seqs.SeqRecord, errs chan error, output io.Writer) error {\n\ttotal, count := 0, 0\n\n\tfor records != nil && errs != nil {\n\t\tselect {\n\t\tcase record := <-records:\n\t\t\ttotal += record.Seq.Length()\n\t\t\tcount++\n\t\tcase err := <-errs:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = fmt.Fprintln(output, float32(total)/float32(count))\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *Service) Avg(_ *int, result *float64) error {\n\tlog.Print(\"Calculating avg WTF level\")\n\tavg := 0.0\n\tfor _, level := range s.levels {\n\t\tavg += level\n\t}\n\tavg /= float64(len(s.levels))\n\t*result = avg\n\treturn nil\n}",
"func averagetime(xs []float64) float64 {\n\ttotal := 0.0\n\tfor _, v := range xs {\n\t\ttotal +=v\n\t}\n\treturn total / float64(len(xs))\n}",
"func (m *metricStore) mean() float64 {\n\tsum := m.stats()\n\n\treturn float64(sum) / float64(m.interval)\n}",
"func (sd *Sender) AverageResponseMs() float64 {\n\tif sd.stats.packetsDelivered == 0 {\n\t\treturn 0.0\n\t}\n\t// instead of using transferTime.Milliseconds(),\n\t// cast to float64 to get sub-millisecond timing\n\tret := float64(sd.stats.transferTime) /\n\t\tfloat64(time.Millisecond) /\n\t\tfloat64(sd.stats.packetsDelivered)\n\treturn ret\n}",
"func (s *Statistic) Average() float64 {\n\treturn s.sum / float64(s.n)\n}",
"func (c *Response) AverageBytesPerSecond() float64 {\n\treturn float64(c.BytesTransferred()-c.bytesResumed) / c.Duration().Seconds()\n}",
"func Average() {\n var total = 0.0;\n var count = 0.0\n for Items() {\n total = total + Pop()\n count = count + 1\n }\n ans := total / count\n Push(ans)\n}",
"func score(msg string) float64 {\n\t// soft filter on nonstandard characters\n\t// plaintext will likely have a very small number of nonstandard characters\n\tlmod := 1.0\n\tfor _, s := range msg {\n\t\tif !unicode.In(s, unicode.Letter, unicode.Punct, unicode.Space) {\n\t\t\tif (s < '0') || (s > '9') {\n\t\t\t\tif s != '\\n' {\n\t\t\t\t\tlmod *= 0.1\t\t\t\t\t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif lmod < 0.01 {\n\t\treturn lmod\n\t}\n\n\t// calculate regularity measure for character frequency\n\t// plaintext will likely have similar character frequency to natural language\n\tfmod := 1.0\n\tappear := make(map[rune]int)\n\tcountAppearance(msg, appear)\n\tfmod = regularity(appear)\n\n\t// average word length and standard deviation\n\t// plaintext will likely not have really long words\n\twmod := 1.0\n\tspaceMsg := strings.Replace(msg, \"\\n\", \" \", -1)\n\tspaceMsg = strings.Replace(spaceMsg, \"\\t\", \" \", -1)\n\tspaceMsg = strings.Trim(spaceMsg, \" \")\n\tspaceMsg = strings.Replace(spaceMsg, \" \", \" \", -1)\n\twords := strings.Split(spaceMsg, \" \")\n\ttotal := 0\n\tfor _, word := range words {\n\t\ttotal += len(word)\n\t}\n\taverage := float64(total) / float64(len(words))\n\twmod /= (math.Abs(average - 5.08)+1.0)\n\n\t// TODO: calculate standard deviation\n\t// regular sd = 0.4305961\n\n\treturn lmod * fmod * wmod\n}",
"func (*Service) Average(stream pb.Calculator_AverageServer) error {\n\tfmt.Println(\"Run average streaming\")\n\tvar sum int32\n\tvar count float64\n\tfor {\n\t\treq, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn stream.SendAndClose(\n\t\t\t\t&pb.ResponseDouble{\n\t\t\t\t\tAverage: float64(sum) / count,\n\t\t\t\t})\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error while reading client stream %v\", err)\n\t\t}\n\t\tposInt := req.PositiveInteger\n\t\tsum += posInt\n\t\tcount++\n\t}\n}",
"func Avg() int { return DefaultMyType.Avg() }",
"func (sw *SlidingWindow) Avg(now time.Time) float64 {\n\treturn sw.Sum(now) / 10\n}",
"func Average(values []float64) float64 {\n\ttotal := .0\n\tfor _, v := range values {\n\t\ttotal += v\n\t}\n\n\treturn total / float64(len(values))\n}",
"func avg(vals []float64) float64 {\n\ttotal := 0.0\n\tfor _, v := range vals {\n\t\ttotal += v\n\t}\n\tavg := total / float64(len(vals))\n\treturn roundUp(avg, 2)\n}",
"func Average(values []float64) (result float64) {\n\tfor _, value := range values {\n\t\tresult += value\n\t}\n\tresult = result / float64(len(values))\n\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Samples return the amount of collected samples so far
|
func (s *Stats) Samples() uint16 {
return s.avgFreq.Samples
}
|
[
"func CountSamples(inch Pipe) int {\n\tcount := 0\n\tfor range inch {\n\t\tcount++\n\t}\n\treturn count\n}",
"func SampleSize(sample Sample) int {\n\ttotal := 0\n\tfor k, v := range sample {\n\t\ttotal += len(k)\n\t\ttotal += len(v)\n\t}\n\treturn total\n}",
"func (c *FloatHistogramChunk) NumSamples() int {\n\treturn int(binary.BigEndian.Uint16(c.Bytes()))\n}",
"func (h Histogram) Samples() uint64 {\n\ts := uint64(0)\n\tfor i := 0; i < bucketCount; i++ {\n\t\ts += atomic.LoadUint64(&h.buckets[i])\n\t}\n\treturn s\n}",
"func (storage *Storage) GetNumSamples() int {\n\treturn storage.sampleDB.Len()\n}",
"func (s *SampleSnapshot) Count() int64 { return s.count }",
"func (c *Client) SampledCount(name string, value int64, rate float64) error {\n\tstat := fmt.Sprintf(\"%d|c|@%f\", value, rate)\n\treturn c.send(name, stat)\n}",
"func (c *connCounter) Sample() int {\n\tc.m.Lock()\n\tn := c.n\n\tc.n = 0\n\tc.m.Unlock()\n\treturn n\n}",
"func (src *StandardRateCounter) maybeSampleCount() {\n\tcurrentTimeMs := src.clock.Now().UnixNano() / 1e6\n\tcurrentSampleTimeMs := src.roundTime(currentTimeMs)\n\n\tsrc.lock.RLock()\n\ttoSample := currentSampleTimeMs > src.lastSampleTimeMs\n\tsrc.lock.RUnlock()\n\n\tif !toSample {\n\t\treturn\n\t}\n\n\tsrc.lock.Lock()\n\tdefer src.lock.Unlock()\n\n\tif currentSampleTimeMs > src.lastSampleTimeMs {\n\t\tsrc.sampleCountAndUpdateRate(currentSampleTimeMs)\n\t}\n}",
"func (sb *SttsBox) SampleCounts() []uint32 {\n\treturn sb.sampleCounts\n}",
"func (m Matrix) TotalSamples() int {\n\tnumSamples := 0\n\tfor _, series := range m {\n\t\tnumSamples += len(series.Floats) + len(series.Histograms)\n\t}\n\treturn numSamples\n}",
"func PacketGetNbSamples(packet string, len int32, fs int32) int32 {\n\tcpacket, cpacketAllocMap := unpackPUcharString(packet)\n\tclen, clenAllocMap := (C.opus_int32)(len), cgoAllocsUnknown\n\tcfs, cfsAllocMap := (C.opus_int32)(fs), cgoAllocsUnknown\n\t__ret := C.opus_packet_get_nb_samples(cpacket, clen, cfs)\n\truntime.KeepAlive(cfsAllocMap)\n\truntime.KeepAlive(clenAllocMap)\n\truntime.KeepAlive(cpacketAllocMap)\n\t__v := (int32)(__ret)\n\treturn __v\n}",
"func (s *Samples) GetSamples(offset int, sampleCount int) (samples []int16) {\n\tsamples = make([]int16, sampleCount*s.channels)\n\tfor i := 0; i < len(samples); i++ {\n\t\tsamples[i] = int16(s.samplesSlice[i])\n\t}\n\treturn\n}",
"func Sample(distribution Sampler, generator Generator, count uint) []float64 {\n\tresult := make([]float64, count)\n\tfor i := range result {\n\t\tresult[i] = distribution.Sample(generator)\n\t}\n\treturn result\n}",
"func (w *Worker) SamplesChanged() {\n\tselect {\n\tcase w.samplesChangedC <- struct{}{}:\n\tdefault:\n\t}\n}",
"func DecoderGetNbSamples(dec *Decoder, packet string, len int32) int32 {\n\tcdec, cdecAllocMap := (*C.OpusDecoder)(unsafe.Pointer(dec)), cgoAllocsUnknown\n\tcpacket, cpacketAllocMap := unpackPUcharString(packet)\n\tclen, clenAllocMap := (C.opus_int32)(len), cgoAllocsUnknown\n\t__ret := C.opus_decoder_get_nb_samples(cdec, cpacket, clen)\n\truntime.KeepAlive(clenAllocMap)\n\truntime.KeepAlive(cpacketAllocMap)\n\truntime.KeepAlive(cdecAllocMap)\n\t__v := (int32)(__ret)\n\treturn __v\n}",
"func (a *TestAgentDemultiplexer) WaitForNumberOfSamples(ontimeCount, timedCount int, timeout time.Duration) (ontime []metrics.MetricSample, timed []metrics.MetricSample) {\n\treturn a.waitForSamples(timeout, func(ontime, timed []metrics.MetricSample) bool {\n\t\treturn (len(ontime) >= ontimeCount || ontimeCount == 0) &&\n\t\t\t(len(timed) >= timedCount || timedCount == 0)\n\t})\n}",
"func (s *SampleSet) Len() int {\n\treturn len(s.Samples)\n}",
"func (s *PseudorandomSampler) Sample() []float64 {\n\tvar probs []float64\n\n\tfor i := 0; i < s.n; i++ {\n\t\tprobs = append(probs, s.random.Float64())\n\t}\n\n\treturn probs\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ActiveCount returns the number of active connections in the pool.
|
func (this *BeanstalkdPool) ActiveCount() int {
this.mu.Lock()
active := this.active
this.mu.Unlock()
return active
}
|
[
"func (np *Pool) GetActiveCount() (count int) {\n\tnp.mmx.RLock()\n\tdefer np.mmx.RUnlock()\n\n\tfor _, node := range np.Nodes {\n\t\tif node.IsActive() {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}",
"func (p *Pool) NumAvailConns() int {\n\treturn len(p.pool)\n}",
"func (p *Pool) ActiveConnection() int32 {\n\treturn p.active\n}",
"func (p *Pool) Active() int {\n\treturn int(atomic.LoadInt32(&p.active))\n}",
"func NumActiveStatements() int64 {\n\treturn atomic.LoadInt64(&activeStatements)\n}",
"func (p *Pool) Count() int {\n\treturn len(p.c)\n}",
"func (b *backend) NumOpenConnections() int {\n\treturn int(atomic.LoadInt32(b.counter))\n}",
"func (p *Server) CountOpenConnections() int {\n\treturn len(p.sigs)\n}",
"func (T *HostStatus) GetConnectionCount() int {\n\tT.RLock()\n\tdefer T.RUnlock()\n\treturn len(T.Connections)\n}",
"func (es *EventSource) ConnectionCount() int {\n\ti := 0\n\n\tfor _, channel := range es.channels {\n\t\ti += len(channel.clientsConnected)\n\t}\n\n\treturn i\n}",
"func (c *Config) GetCurrentPoolCount() (int, error) {\n\tcspList, err := apiscsp.NewKubeClient().WithNamespace(c.Namespace).List(metav1.ListOptions{LabelSelector: string(apis.CStorPoolClusterCPK) + \"=\" + c.CSPC.Name})\n\tif err != nil {\n\t\treturn 0, errors.Errorf(\"unable to get current pool count:unable to list cstor pools: %v\", err)\n\t}\n\treturn len(cspList.Items), nil\n}",
"func (c *Connector) GetConnectionsCount() int {\n\tc.lock.RLock()\n\tdefer c.lock.RUnlock()\n\n\treturn len(c.registry)\n}",
"func (s *StatsTracker) NumTCPConns() int {\n\treturn int(atomic.LoadInt64(&s.numTCPConns))\n}",
"func (obj *SSLChannel) GetNoOfConnections() int32 {\n\treturn obj.numOfConnections\n}",
"func (p Performance) ActiveJobCount() int {\n\treturn p.Dispatched.LoadProcesss + p.Dispatched.CopyJobs +\n\t\tp.Pending.LoadProcesss + p.Pending.CopyJobs +\n\t\tp.Running.LoadProcesss + p.Running.CopyJobs\n}",
"func (s *Stream) Connections() int {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\treturn len(s.pool)\n}",
"func (s *StatsTracker) NumTCPConnsClosed() int {\n\treturn int(atomic.LoadInt64(&s.numTCPConnsClosed))\n}",
"func (p Performance) ActiveQueryCount() int {\n\treturn p.Dispatched.QueryJobs + p.Dispatched.QueryJobs +\n\t\tp.Pending.QueryJobs + p.Pending.QueryJobs +\n\t\tp.Running.QueryJobs + p.Running.QueryJobs\n}",
"func (s *Scheduler) activeHostCount() int {\n\tcount := 0\n\tfor _, host := range s.hosts {\n\t\tif !host.Shutdown {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.